1 /* 2 * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/macro.hpp" 40 #include "opto/phaseX.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/rootnode.hpp" 43 #include "utilities/macros.hpp" 44 45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 46 // If ReduceAllocationMerges is enabled we might call split_through_phi during 47 // split_unique_types and that will create additional nodes that need to be 48 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 49 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 50 // the array will be reallocated. 51 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 52 _in_worklist(C->comp_arena()), 53 _next_pidx(0), 54 _collecting(true), 55 _verify(false), 56 _compile(C), 57 _igvn(igvn), 58 _invocation(invocation), 59 _build_iterations(0), 60 _build_time(0.), 61 _node_map(C->comp_arena()) { 62 // Add unknown java object. 63 add_java_object(C->top(), PointsToNode::GlobalEscape); 64 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 65 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 66 // Add ConP and ConN null oop nodes 67 Node* oop_null = igvn->zerocon(T_OBJECT); 68 assert(oop_null->_idx < nodes_size(), "should be created already"); 69 add_java_object(oop_null, PointsToNode::NoEscape); 70 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 71 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 72 if (UseCompressedOops) { 73 Node* noop_null = igvn->zerocon(T_NARROWOOP); 74 assert(noop_null->_idx < nodes_size(), "should be created already"); 75 map_ideal_node(noop_null, null_obj); 76 } 77 } 78 79 bool ConnectionGraph::has_candidates(Compile *C) { 80 // EA brings benefits only when the code has allocations and/or locks which 81 // are represented by ideal Macro nodes. 82 int cnt = C->macro_count(); 83 for (int i = 0; i < cnt; i++) { 84 Node *n = C->macro_node(i); 85 if (n->is_Allocate()) { 86 return true; 87 } 88 if (n->is_Lock()) { 89 Node* obj = n->as_Lock()->obj_node()->uncast(); 90 if (!(obj->is_Parm() || obj->is_Con())) { 91 return true; 92 } 93 } 94 if (n->is_CallStaticJava() && 95 n->as_CallStaticJava()->is_boxing_method()) { 96 return true; 97 } 98 } 99 return false; 100 } 101 102 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 103 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 104 ResourceMark rm; 105 106 // Add ConP and ConN null oop nodes before ConnectionGraph construction 107 // to create space for them in ConnectionGraph::_nodes[]. 108 Node* oop_null = igvn->zerocon(T_OBJECT); 109 Node* noop_null = igvn->zerocon(T_NARROWOOP); 110 int invocation = 0; 111 if (C->congraph() != nullptr) { 112 invocation = C->congraph()->_invocation + 1; 113 } 114 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 115 // Perform escape analysis 116 if (congraph->compute_escape()) { 117 // There are non escaping objects. 118 C->set_congraph(congraph); 119 } 120 // Cleanup. 121 if (oop_null->outcnt() == 0) { 122 igvn->hash_delete(oop_null); 123 } 124 if (noop_null->outcnt() == 0) { 125 igvn->hash_delete(noop_null); 126 } 127 } 128 129 bool ConnectionGraph::compute_escape() { 130 Compile* C = _compile; 131 PhaseGVN* igvn = _igvn; 132 133 // Worklists used by EA. 134 Unique_Node_List delayed_worklist; 135 Unique_Node_List reducible_merges; 136 GrowableArray<Node*> alloc_worklist; 137 GrowableArray<Node*> ptr_cmp_worklist; 138 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 139 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 140 GrowableArray<PointsToNode*> ptnodes_worklist; 141 GrowableArray<JavaObjectNode*> java_objects_worklist; 142 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 143 GrowableArray<FieldNode*> oop_fields_worklist; 144 GrowableArray<SafePointNode*> sfn_worklist; 145 GrowableArray<MergeMemNode*> mergemem_worklist; 146 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 147 148 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 149 150 // 1. Populate Connection Graph (CG) with PointsTo nodes. 151 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 152 // Initialize worklist 153 if (C->root() != nullptr) { 154 ideal_nodes.push(C->root()); 155 } 156 // Processed ideal nodes are unique on ideal_nodes list 157 // but several ideal nodes are mapped to the phantom_obj. 158 // To avoid duplicated entries on the following worklists 159 // add the phantom_obj only once to them. 160 ptnodes_worklist.append(phantom_obj); 161 java_objects_worklist.append(phantom_obj); 162 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 163 Node* n = ideal_nodes.at(next); 164 // Create PointsTo nodes and add them to Connection Graph. Called 165 // only once per ideal node since ideal_nodes is Unique_Node list. 166 add_node_to_connection_graph(n, &delayed_worklist); 167 PointsToNode* ptn = ptnode_adr(n->_idx); 168 if (ptn != nullptr && ptn != phantom_obj) { 169 ptnodes_worklist.append(ptn); 170 if (ptn->is_JavaObject()) { 171 java_objects_worklist.append(ptn->as_JavaObject()); 172 if ((n->is_Allocate() || n->is_CallStaticJava()) && 173 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 174 // Only allocations and java static calls results are interesting. 175 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 176 } 177 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 178 oop_fields_worklist.append(ptn->as_Field()); 179 } 180 } 181 // Collect some interesting nodes for further use. 182 switch (n->Opcode()) { 183 case Op_MergeMem: 184 // Collect all MergeMem nodes to add memory slices for 185 // scalar replaceable objects in split_unique_types(). 186 mergemem_worklist.append(n->as_MergeMem()); 187 break; 188 case Op_CmpP: 189 case Op_CmpN: 190 // Collect compare pointers nodes. 191 if (OptimizePtrCompare) { 192 ptr_cmp_worklist.append(n); 193 } 194 break; 195 case Op_MemBarStoreStore: 196 // Collect all MemBarStoreStore nodes so that depending on the 197 // escape status of the associated Allocate node some of them 198 // may be eliminated. 199 storestore_worklist.append(n->as_MemBarStoreStore()); 200 break; 201 case Op_MemBarRelease: 202 if (n->req() > MemBarNode::Precedent) { 203 record_for_optimizer(n); 204 } 205 break; 206 #ifdef ASSERT 207 case Op_AddP: 208 // Collect address nodes for graph verification. 209 addp_worklist.append(n); 210 break; 211 #endif 212 case Op_ArrayCopy: 213 // Keep a list of ArrayCopy nodes so if one of its input is non 214 // escaping, we can record a unique type 215 arraycopy_worklist.append(n->as_ArrayCopy()); 216 break; 217 default: 218 // not interested now, ignore... 219 break; 220 } 221 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 222 Node* m = n->fast_out(i); // Get user 223 ideal_nodes.push(m); 224 } 225 if (n->is_SafePoint()) { 226 sfn_worklist.append(n->as_SafePoint()); 227 } 228 } 229 230 #ifndef PRODUCT 231 if (_compile->directive()->TraceEscapeAnalysisOption) { 232 tty->print("+++++ Initial worklist for "); 233 _compile->method()->print_name(); 234 tty->print_cr(" (ea_inv=%d)", _invocation); 235 for (int i = 0; i < ptnodes_worklist.length(); i++) { 236 PointsToNode* ptn = ptnodes_worklist.at(i); 237 ptn->dump(); 238 } 239 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 240 } 241 #endif 242 243 if (non_escaped_allocs_worklist.length() == 0) { 244 _collecting = false; 245 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 246 return false; // Nothing to do. 247 } 248 // Add final simple edges to graph. 249 while(delayed_worklist.size() > 0) { 250 Node* n = delayed_worklist.pop(); 251 add_final_edges(n); 252 } 253 254 #ifdef ASSERT 255 if (VerifyConnectionGraph) { 256 // Verify that no new simple edges could be created and all 257 // local vars has edges. 258 _verify = true; 259 int ptnodes_length = ptnodes_worklist.length(); 260 for (int next = 0; next < ptnodes_length; ++next) { 261 PointsToNode* ptn = ptnodes_worklist.at(next); 262 add_final_edges(ptn->ideal_node()); 263 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 264 ptn->dump(); 265 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 266 } 267 } 268 _verify = false; 269 } 270 #endif 271 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 272 // processing, calls to CI to resolve symbols (types, fields, methods) 273 // referenced in bytecode. During symbol resolution VM may throw 274 // an exception which CI cleans and converts to compilation failure. 275 if (C->failing()) { 276 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 277 return false; 278 } 279 280 // 2. Finish Graph construction by propagating references to all 281 // java objects through graph. 282 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 283 java_objects_worklist, oop_fields_worklist)) { 284 // All objects escaped or hit time or iterations limits. 285 _collecting = false; 286 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 287 return false; 288 } 289 290 // 3. Adjust scalar_replaceable state of nonescaping objects and push 291 // scalar replaceable allocations on alloc_worklist for processing 292 // in split_unique_types(). 293 GrowableArray<JavaObjectNode*> jobj_worklist; 294 int non_escaped_length = non_escaped_allocs_worklist.length(); 295 bool found_nsr_alloc = false; 296 for (int next = 0; next < non_escaped_length; next++) { 297 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 298 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 299 Node* n = ptn->ideal_node(); 300 if (n->is_Allocate()) { 301 n->as_Allocate()->_is_non_escaping = noescape; 302 } 303 if (noescape && ptn->scalar_replaceable()) { 304 adjust_scalar_replaceable_state(ptn, reducible_merges); 305 if (ptn->scalar_replaceable()) { 306 jobj_worklist.push(ptn); 307 } else { 308 found_nsr_alloc = true; 309 } 310 } 311 } 312 313 // Propagate NSR (Not Scalar Replaceable) state. 314 if (found_nsr_alloc) { 315 find_scalar_replaceable_allocs(jobj_worklist); 316 } 317 318 // alloc_worklist will be processed in reverse push order. 319 // Therefore the reducible Phis will be processed for last and that's what we 320 // want because by then the scalarizable inputs of the merge will already have 321 // an unique instance type. 322 for (uint i = 0; i < reducible_merges.size(); i++ ) { 323 Node* n = reducible_merges.at(i); 324 alloc_worklist.append(n); 325 } 326 327 for (int next = 0; next < jobj_worklist.length(); ++next) { 328 JavaObjectNode* jobj = jobj_worklist.at(next); 329 if (jobj->scalar_replaceable()) { 330 alloc_worklist.append(jobj->ideal_node()); 331 } 332 } 333 334 #ifdef ASSERT 335 if (VerifyConnectionGraph) { 336 // Verify that graph is complete - no new edges could be added or needed. 337 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 338 java_objects_worklist, addp_worklist); 339 } 340 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 341 assert(null_obj->escape_state() == PointsToNode::NoEscape && 342 null_obj->edge_count() == 0 && 343 !null_obj->arraycopy_src() && 344 !null_obj->arraycopy_dst(), "sanity"); 345 #endif 346 347 _collecting = false; 348 349 } // TracePhase t3("connectionGraph") 350 351 // 4. Optimize ideal graph based on EA information. 352 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 353 if (has_non_escaping_obj) { 354 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 355 } 356 357 #ifndef PRODUCT 358 if (PrintEscapeAnalysis) { 359 dump(ptnodes_worklist); // Dump ConnectionGraph 360 } 361 #endif 362 363 #ifdef ASSERT 364 if (VerifyConnectionGraph) { 365 int alloc_length = alloc_worklist.length(); 366 for (int next = 0; next < alloc_length; ++next) { 367 Node* n = alloc_worklist.at(next); 368 PointsToNode* ptn = ptnode_adr(n->_idx); 369 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 370 } 371 } 372 373 if (VerifyReduceAllocationMerges) { 374 for (uint i = 0; i < reducible_merges.size(); i++ ) { 375 Node* n = reducible_merges.at(i); 376 if (!can_reduce_phi(n->as_Phi())) { 377 TraceReduceAllocationMerges = true; 378 n->dump(2); 379 n->dump(-2); 380 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 381 } 382 } 383 } 384 #endif 385 386 // 5. Separate memory graph for scalar replaceable allcations. 387 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 388 if (has_scalar_replaceable_candidates && EliminateAllocations) { 389 assert(C->do_aliasing(), "Aliasing should be enabled"); 390 // Now use the escape information to create unique types for 391 // scalar replaceable objects. 392 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 393 if (C->failing()) { 394 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 395 return false; 396 } 397 C->print_method(PHASE_AFTER_EA, 2); 398 399 #ifdef ASSERT 400 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 401 tty->print("=== No allocations eliminated for "); 402 C->method()->print_short_name(); 403 if (!EliminateAllocations) { 404 tty->print(" since EliminateAllocations is off ==="); 405 } else if(!has_scalar_replaceable_candidates) { 406 tty->print(" since there are no scalar replaceable candidates ==="); 407 } 408 tty->cr(); 409 #endif 410 } 411 412 // 6. Remove reducible allocation merges from ideal graph 413 if (reducible_merges.size() > 0) { 414 bool delay = _igvn->delay_transform(); 415 _igvn->set_delay_transform(true); 416 for (uint i = 0; i < reducible_merges.size(); i++ ) { 417 Node* n = reducible_merges.at(i); 418 reduce_phi(n->as_Phi()); 419 if (C->failing()) { 420 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 421 return false; 422 } 423 } 424 _igvn->set_delay_transform(delay); 425 } 426 427 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 428 // java calls if they pass ArgEscape objects as parameters. 429 if (has_non_escaping_obj && 430 (C->env()->should_retain_local_variables() || 431 C->env()->jvmti_can_get_owned_monitor_info() || 432 C->env()->jvmti_can_walk_any_space() || 433 DeoptimizeObjectsALot)) { 434 int sfn_length = sfn_worklist.length(); 435 for (int next = 0; next < sfn_length; next++) { 436 SafePointNode* sfn = sfn_worklist.at(next); 437 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 438 if (sfn->is_CallJava()) { 439 CallJavaNode* call = sfn->as_CallJava(); 440 call->set_arg_escape(has_arg_escape(call)); 441 } 442 } 443 } 444 445 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 446 return has_non_escaping_obj; 447 } 448 449 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 450 // if at least one scalar replaceable allocation participates in the merge and 451 // no input to the Phi is nullable. 452 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 453 // Check if there is a scalar replaceable allocate in the Phi 454 bool found_sr_allocate = false; 455 456 for (uint i = 1; i < ophi->req(); i++) { 457 // Right now we can't restore a "null" pointer during deoptimization 458 const Type* inp_t = _igvn->type(ophi->in(i)); 459 if (inp_t == nullptr || inp_t->make_oopptr() == nullptr || inp_t->make_oopptr()->maybe_null()) { 460 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Input %d is nullable.", ophi->_idx, _invocation, i);) 461 return false; 462 } 463 464 // We are looking for at least one SR object in the merge 465 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 466 if (ptn != nullptr && ptn->scalar_replaceable()) { 467 assert(ptn->ideal_node() != nullptr && ptn->ideal_node()->is_Allocate(), "sanity"); 468 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 469 470 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 471 found_sr_allocate = true; 472 } else { 473 ptn->set_scalar_replaceable(false); 474 } 475 } 476 } 477 478 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 479 return found_sr_allocate; 480 } 481 482 // Check if we are able to untangle the merge. Right now we only reduce Phis 483 // which are only used as debug information. 484 bool ConnectionGraph::can_reduce_phi_check_users(PhiNode* ophi) const { 485 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 486 Node* use = ophi->fast_out(i); 487 488 if (use->is_SafePoint()) { 489 if (use->is_Call() && use->as_Call()->has_non_debug_use(ophi)) { 490 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", ophi->_idx, _invocation);) 491 return false; 492 } 493 } else if (use->is_AddP()) { 494 Node* addp = use; 495 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 496 Node* use_use = addp->fast_out(j); 497 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 498 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", ophi->_idx, _invocation, use_use->Name());) 499 return false; 500 } 501 } 502 } else { 503 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", ophi->_idx, _invocation, use->_idx, use->Name());) 504 return false; 505 } 506 } 507 508 return true; 509 } 510 511 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 512 // only used in some certain code shapes. Check comments in 513 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 514 // details. 515 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 516 // If there was an error attempting to reduce allocation merges for this 517 // method we might have disabled the compilation and be retrying with RAM 518 // disabled. 519 // If EliminateAllocations is False, there is no point in reducing merges. 520 if (!_compile->do_reduce_allocation_merges()) { 521 return false; 522 } 523 524 const Type* phi_t = _igvn->type(ophi); 525 if (phi_t == nullptr || phi_t->make_ptr() == nullptr || 526 phi_t->make_ptr()->isa_instptr() == nullptr || 527 !phi_t->make_ptr()->isa_instptr()->klass_is_exact()) { 528 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can NOT reduce Phi %d during invocation %d because it's nullable.", ophi->_idx, _invocation); }) 529 return false; 530 } 531 532 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_phi_check_users(ophi)) { 533 return false; 534 } 535 536 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 537 return true; 538 } 539 540 void ConnectionGraph::reduce_phi_on_field_access(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist) { 541 // We'll pass this to 'split_through_phi' so that it'll do the split even 542 // though the load doesn't have an unique instance type. 543 bool ignore_missing_instance_id = true; 544 545 #ifdef ASSERT 546 if (VerifyReduceAllocationMerges && !can_reduce_phi(ophi)) { 547 TraceReduceAllocationMerges = true; 548 ophi->dump(2); 549 ophi->dump(-2); 550 assert(can_reduce_phi(ophi), "Sanity: previous reducible Phi is no longer reducible inside reduce_phi_on_field_access."); 551 } 552 #endif 553 554 // Iterate over Phi outputs looking for an AddP 555 for (int j = ophi->outcnt()-1; j >= 0;) { 556 Node* previous_addp = ophi->raw_out(j); 557 if (previous_addp->is_AddP()) { 558 // All AddPs are present in the connection graph 559 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 560 561 // Iterate over AddP looking for a Load 562 for (int k = previous_addp->outcnt()-1; k >= 0;) { 563 Node* previous_load = previous_addp->raw_out(k); 564 if (previous_load->is_Load()) { 565 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 566 _igvn->replace_node(previous_load, data_phi); 567 assert(data_phi != nullptr, "Output of split_through_phi is null."); 568 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 569 assert(data_phi->is_Phi(), "Return of split_through_phi should be a Phi."); 570 571 // Push the newly created AddP on alloc_worklist and patch 572 // the connection graph. Note that the changes in the CG below 573 // won't affect the ES of objects since the new nodes have the 574 // same status as the old ones. 575 for (uint i = 1; i < data_phi->req(); i++) { 576 Node* new_load = data_phi->in(i); 577 if (new_load->is_Load()) { 578 Node* new_addp = new_load->in(MemNode::Address); 579 Node* base = get_addp_base(new_addp); 580 581 // The base might not be something that we can create an unique 582 // type for. If that's the case we are done with that input. 583 PointsToNode* jobj_ptn = unique_java_object(base); 584 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 585 continue; 586 } 587 588 // Push to alloc_worklist since the base has an unique_type 589 alloc_worklist.append_if_missing(new_addp); 590 591 // Now let's add the node to the connection graph 592 _nodes.at_grow(new_addp->_idx, nullptr); 593 add_field(new_addp, fn->escape_state(), fn->offset()); 594 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 595 596 // If the load doesn't load an object then it won't be 597 // part of the connection graph 598 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 599 if (curr_load_ptn != nullptr) { 600 _nodes.at_grow(new_load->_idx, nullptr); 601 add_local_var(new_load, curr_load_ptn->escape_state()); 602 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 603 } 604 } 605 } 606 } 607 k = MIN2(--k, (int)previous_addp->outcnt()-1); 608 } 609 610 // Remove the old AddP from the processing list because it's dead now 611 alloc_worklist.remove_if_existing(previous_addp); 612 _igvn->remove_globally_dead_node(previous_addp); 613 } 614 j = MIN2(--j, (int)ophi->outcnt()-1); 615 } 616 617 #ifdef ASSERT 618 if (VerifyReduceAllocationMerges) { 619 for (uint j = 0; j < ophi->outcnt(); j++) { 620 Node* use = ophi->raw_out(j); 621 if (!use->is_SafePoint()) { 622 ophi->dump(2); 623 ophi->dump(-2); 624 assert(false, "Should be a SafePoint."); 625 } 626 } 627 } 628 #endif 629 } 630 631 // This method will create a SafePointScalarObjectNode for each combination of 632 // scalar replaceable allocation in 'ophi' and SafePoint node in 'safepoints'. 633 // The method will create a SafePointScalarMERGEnode for each combination of 634 // 'ophi' and SafePoint node in 'safepoints'. 635 // Each SafePointScalarMergeNode created here may describe multiple scalar 636 // replaced objects - check detailed description in SafePointScalarMergeNode 637 // class header. 638 // 639 // This method will set entries in the Phi that are scalar replaceable to 'null'. 640 void ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi, Unique_Node_List* safepoints) { 641 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 642 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 643 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 644 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 645 uint number_of_sr_objects = 0; 646 PhaseMacroExpand mexp(*_igvn); 647 648 _igvn->hash_delete(ophi); 649 650 // Fill in the 'selector' Phi. If index 'i' of the selector is: 651 // -> a '-1' constant, the i'th input of the original Phi is NSR. 652 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and the 653 // info about the scalarized object will be at index x of 654 // ObjectMergeValue::possible_objects 655 for (uint i = 1; i < ophi->req(); i++) { 656 Node* base = ophi->in(i); 657 JavaObjectNode* ptn = unique_java_object(base); 658 659 if (ptn != nullptr && ptn->scalar_replaceable()) { 660 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 661 selector->set_req(i, sr_obj_idx); 662 number_of_sr_objects++; 663 } 664 } 665 666 // Update the debug information of all safepoints in turn 667 for (uint spi = 0; spi < safepoints->size(); spi++) { 668 SafePointNode* sfpt = safepoints->at(spi)->as_SafePoint(); 669 JVMState *jvms = sfpt->jvms(); 670 uint merge_idx = (sfpt->req() - jvms->scloff()); 671 int debug_start = jvms->debug_start(); 672 673 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 674 smerge->init_req(0, _compile->root()); 675 _igvn->register_new_node_with_optimizer(smerge); 676 677 // The next two inputs are: 678 // (1) A copy of the original pointer to NSR objects. 679 // (2) A selector, used to decide if we need to rematerialize an object 680 // or use the pointer to a NSR object. 681 // See more details of these fields in the declaration of SafePointScalarMergeNode 682 sfpt->add_req(ophi); 683 sfpt->add_req(selector); 684 685 for (uint i = 1; i < ophi->req(); i++) { 686 Node* base = ophi->in(i); 687 JavaObjectNode* ptn = unique_java_object(base); 688 689 // If the base is not scalar replaceable we don't need to register information about 690 // it at this time. 691 if (ptn == nullptr || !ptn->scalar_replaceable()) { 692 continue; 693 } 694 695 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 696 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt); 697 if (sobj == nullptr) { 698 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 699 return; 700 } 701 702 // Now make a pass over the debug information replacing any references 703 // to the allocated object with "sobj" 704 Node* ccpp = alloc->result_cast(); 705 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 706 707 // Register the scalarized object as a candidate for reallocation 708 smerge->add_req(sobj); 709 } 710 711 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge" 712 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn); 713 714 // The call to 'replace_edges_in_range' above might have removed the 715 // reference to ophi that we need at _merge_pointer_idx. The line below make 716 // sure the reference is maintained. 717 sfpt->set_req(smerge->merge_pointer_idx(jvms), ophi); 718 _igvn->_worklist.push(sfpt); 719 } 720 721 // Now we can change ophi since we don't need to know the types 722 // of the input allocations anymore. 723 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 724 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 725 for (uint i = 1; i < ophi->req(); i++) { 726 Node* base = ophi->in(i); 727 JavaObjectNode* ptn = unique_java_object(base); 728 729 if (ptn != nullptr && ptn->scalar_replaceable()) { 730 new_phi->set_req(i, null_ptr); 731 } else { 732 new_phi->set_req(i, ophi->in(i)); 733 } 734 } 735 736 _igvn->replace_node(ophi, new_phi); 737 _igvn->hash_insert(ophi); 738 _igvn->_worklist.push(ophi); 739 } 740 741 void ConnectionGraph::reduce_phi(PhiNode* ophi) { 742 Unique_Node_List safepoints; 743 744 for (uint i = 0; i < ophi->outcnt(); i++) { 745 Node* use = ophi->raw_out(i); 746 747 // All SafePoint nodes using the same Phi node use the same debug 748 // information (regarding the Phi). Furthermore, reducing the Phi used by a 749 // SafePoint requires changing the Phi. Therefore, I collect all safepoints 750 // and patch them all at once later. 751 if (use->is_SafePoint()) { 752 safepoints.push(use->as_SafePoint()); 753 } else { 754 #ifdef ASSERT 755 ophi->dump(-3); 756 assert(false, "Unexpected user of reducible Phi %d -> %d:%s", ophi->_idx, use->_idx, use->Name()); 757 #endif 758 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 759 return; 760 } 761 } 762 763 if (safepoints.size() > 0) { 764 reduce_phi_on_safepoints(ophi, &safepoints); 765 } 766 } 767 768 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 769 if (!C->do_reduce_allocation_merges()) return; 770 771 Unique_Node_List ideal_nodes; 772 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 773 ideal_nodes.push(root); 774 775 for (uint next = 0; next < ideal_nodes.size(); ++next) { 776 Node* n = ideal_nodes.at(next); 777 778 if (n->is_SafePointScalarMerge()) { 779 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 780 781 // Validate inputs of merge 782 for (uint i = 1; i < merge->req(); i++) { 783 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 784 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 785 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 786 } 787 } 788 789 // Validate users of merge 790 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 791 Node* sfpt = merge->fast_out(i); 792 if (sfpt->is_SafePoint()) { 793 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 794 795 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 796 assert(false, "SafePointScalarMerge nodes can't be nested."); 797 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 798 } 799 } else { 800 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 801 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 802 } 803 } 804 } 805 806 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 807 Node* m = n->fast_out(i); 808 ideal_nodes.push(m); 809 } 810 } 811 } 812 813 // Returns true if there is an object in the scope of sfn that does not escape globally. 814 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 815 Compile* C = _compile; 816 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 817 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 818 DeoptimizeObjectsALot) { 819 // Jvmti agents can access locals. Must provide info about local objects at runtime. 820 int num_locs = jvms->loc_size(); 821 for (int idx = 0; idx < num_locs; idx++) { 822 Node* l = sfn->local(jvms, idx); 823 if (not_global_escape(l)) { 824 return true; 825 } 826 } 827 } 828 if (C->env()->jvmti_can_get_owned_monitor_info() || 829 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 830 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 831 int num_mon = jvms->nof_monitors(); 832 for (int idx = 0; idx < num_mon; idx++) { 833 Node* m = sfn->monitor_obj(jvms, idx); 834 if (m != nullptr && not_global_escape(m)) { 835 return true; 836 } 837 } 838 } 839 } 840 return false; 841 } 842 843 // Returns true if at least one of the arguments to the call is an object 844 // that does not escape globally. 845 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 846 if (call->method() != nullptr) { 847 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 848 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 849 Node* p = call->in(idx); 850 if (not_global_escape(p)) { 851 return true; 852 } 853 } 854 } else { 855 const char* name = call->as_CallStaticJava()->_name; 856 assert(name != nullptr, "no name"); 857 // no arg escapes through uncommon traps 858 if (strcmp(name, "uncommon_trap") != 0) { 859 // process_call_arguments() assumes that all arguments escape globally 860 const TypeTuple* d = call->tf()->domain(); 861 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 862 const Type* at = d->field_at(i); 863 if (at->isa_oopptr() != nullptr) { 864 return true; 865 } 866 } 867 } 868 } 869 return false; 870 } 871 872 873 874 // Utility function for nodes that load an object 875 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 876 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 877 // ThreadLocal has RawPtr type. 878 const Type* t = _igvn->type(n); 879 if (t->make_ptr() != nullptr) { 880 Node* adr = n->in(MemNode::Address); 881 #ifdef ASSERT 882 if (!adr->is_AddP()) { 883 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 884 } else { 885 assert((ptnode_adr(adr->_idx) == nullptr || 886 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 887 } 888 #endif 889 add_local_var_and_edge(n, PointsToNode::NoEscape, 890 adr, delayed_worklist); 891 } 892 } 893 894 // Populate Connection Graph with PointsTo nodes and create simple 895 // connection graph edges. 896 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 897 assert(!_verify, "this method should not be called for verification"); 898 PhaseGVN* igvn = _igvn; 899 uint n_idx = n->_idx; 900 PointsToNode* n_ptn = ptnode_adr(n_idx); 901 if (n_ptn != nullptr) { 902 return; // No need to redefine PointsTo node during first iteration. 903 } 904 int opcode = n->Opcode(); 905 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 906 if (gc_handled) { 907 return; // Ignore node if already handled by GC. 908 } 909 910 if (n->is_Call()) { 911 // Arguments to allocation and locking don't escape. 912 if (n->is_AbstractLock()) { 913 // Put Lock and Unlock nodes on IGVN worklist to process them during 914 // first IGVN optimization when escape information is still available. 915 record_for_optimizer(n); 916 } else if (n->is_Allocate()) { 917 add_call_node(n->as_Call()); 918 record_for_optimizer(n); 919 } else { 920 if (n->is_CallStaticJava()) { 921 const char* name = n->as_CallStaticJava()->_name; 922 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 923 return; // Skip uncommon traps 924 } 925 } 926 // Don't mark as processed since call's arguments have to be processed. 927 delayed_worklist->push(n); 928 // Check if a call returns an object. 929 if ((n->as_Call()->returns_pointer() && 930 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 931 (n->is_CallStaticJava() && 932 n->as_CallStaticJava()->is_boxing_method())) { 933 add_call_node(n->as_Call()); 934 } 935 } 936 return; 937 } 938 // Put this check here to process call arguments since some call nodes 939 // point to phantom_obj. 940 if (n_ptn == phantom_obj || n_ptn == null_obj) { 941 return; // Skip predefined nodes. 942 } 943 switch (opcode) { 944 case Op_AddP: { 945 Node* base = get_addp_base(n); 946 PointsToNode* ptn_base = ptnode_adr(base->_idx); 947 // Field nodes are created for all field types. They are used in 948 // adjust_scalar_replaceable_state() and split_unique_types(). 949 // Note, non-oop fields will have only base edges in Connection 950 // Graph because such fields are not used for oop loads and stores. 951 int offset = address_offset(n, igvn); 952 add_field(n, PointsToNode::NoEscape, offset); 953 if (ptn_base == nullptr) { 954 delayed_worklist->push(n); // Process it later. 955 } else { 956 n_ptn = ptnode_adr(n_idx); 957 add_base(n_ptn->as_Field(), ptn_base); 958 } 959 break; 960 } 961 case Op_CastX2P: { 962 map_ideal_node(n, phantom_obj); 963 break; 964 } 965 case Op_CastPP: 966 case Op_CheckCastPP: 967 case Op_EncodeP: 968 case Op_DecodeN: 969 case Op_EncodePKlass: 970 case Op_DecodeNKlass: { 971 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 972 break; 973 } 974 case Op_CMoveP: { 975 add_local_var(n, PointsToNode::NoEscape); 976 // Do not add edges during first iteration because some could be 977 // not defined yet. 978 delayed_worklist->push(n); 979 break; 980 } 981 case Op_ConP: 982 case Op_ConN: 983 case Op_ConNKlass: { 984 // assume all oop constants globally escape except for null 985 PointsToNode::EscapeState es; 986 const Type* t = igvn->type(n); 987 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 988 es = PointsToNode::NoEscape; 989 } else { 990 es = PointsToNode::GlobalEscape; 991 } 992 PointsToNode* ptn_con = add_java_object(n, es); 993 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 994 break; 995 } 996 case Op_CreateEx: { 997 // assume that all exception objects globally escape 998 map_ideal_node(n, phantom_obj); 999 break; 1000 } 1001 case Op_LoadKlass: 1002 case Op_LoadNKlass: { 1003 // Unknown class is loaded 1004 map_ideal_node(n, phantom_obj); 1005 break; 1006 } 1007 case Op_LoadP: 1008 case Op_LoadN: { 1009 add_objload_to_connection_graph(n, delayed_worklist); 1010 break; 1011 } 1012 case Op_Parm: { 1013 map_ideal_node(n, phantom_obj); 1014 break; 1015 } 1016 case Op_PartialSubtypeCheck: { 1017 // Produces Null or notNull and is used in only in CmpP so 1018 // phantom_obj could be used. 1019 map_ideal_node(n, phantom_obj); // Result is unknown 1020 break; 1021 } 1022 case Op_Phi: { 1023 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1024 // ThreadLocal has RawPtr type. 1025 const Type* t = n->as_Phi()->type(); 1026 if (t->make_ptr() != nullptr) { 1027 add_local_var(n, PointsToNode::NoEscape); 1028 // Do not add edges during first iteration because some could be 1029 // not defined yet. 1030 delayed_worklist->push(n); 1031 } 1032 break; 1033 } 1034 case Op_Proj: { 1035 // we are only interested in the oop result projection from a call 1036 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1037 n->in(0)->as_Call()->returns_pointer()) { 1038 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1039 } 1040 break; 1041 } 1042 case Op_Rethrow: // Exception object escapes 1043 case Op_Return: { 1044 if (n->req() > TypeFunc::Parms && 1045 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1046 // Treat Return value as LocalVar with GlobalEscape escape state. 1047 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1048 } 1049 break; 1050 } 1051 case Op_CompareAndExchangeP: 1052 case Op_CompareAndExchangeN: 1053 case Op_GetAndSetP: 1054 case Op_GetAndSetN: { 1055 add_objload_to_connection_graph(n, delayed_worklist); 1056 // fall-through 1057 } 1058 case Op_StoreP: 1059 case Op_StoreN: 1060 case Op_StoreNKlass: 1061 case Op_WeakCompareAndSwapP: 1062 case Op_WeakCompareAndSwapN: 1063 case Op_CompareAndSwapP: 1064 case Op_CompareAndSwapN: { 1065 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1066 break; 1067 } 1068 case Op_AryEq: 1069 case Op_CountPositives: 1070 case Op_StrComp: 1071 case Op_StrEquals: 1072 case Op_StrIndexOf: 1073 case Op_StrIndexOfChar: 1074 case Op_StrInflatedCopy: 1075 case Op_StrCompressedCopy: 1076 case Op_VectorizedHashCode: 1077 case Op_EncodeISOArray: { 1078 add_local_var(n, PointsToNode::ArgEscape); 1079 delayed_worklist->push(n); // Process it later. 1080 break; 1081 } 1082 case Op_ThreadLocal: { 1083 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1084 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1085 break; 1086 } 1087 case Op_Blackhole: { 1088 // All blackhole pointer arguments are globally escaping. 1089 // Only do this if there is at least one pointer argument. 1090 // Do not add edges during first iteration because some could be 1091 // not defined yet, defer to final step. 1092 for (uint i = 0; i < n->req(); i++) { 1093 Node* in = n->in(i); 1094 if (in != nullptr) { 1095 const Type* at = _igvn->type(in); 1096 if (!at->isa_ptr()) continue; 1097 1098 add_local_var(n, PointsToNode::GlobalEscape); 1099 delayed_worklist->push(n); 1100 break; 1101 } 1102 } 1103 break; 1104 } 1105 default: 1106 ; // Do nothing for nodes not related to EA. 1107 } 1108 return; 1109 } 1110 1111 // Add final simple edges to graph. 1112 void ConnectionGraph::add_final_edges(Node *n) { 1113 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1114 #ifdef ASSERT 1115 if (_verify && n_ptn->is_JavaObject()) 1116 return; // This method does not change graph for JavaObject. 1117 #endif 1118 1119 if (n->is_Call()) { 1120 process_call_arguments(n->as_Call()); 1121 return; 1122 } 1123 assert(n->is_Store() || n->is_LoadStore() || 1124 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr), 1125 "node should be registered already"); 1126 int opcode = n->Opcode(); 1127 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1128 if (gc_handled) { 1129 return; // Ignore node if already handled by GC. 1130 } 1131 switch (opcode) { 1132 case Op_AddP: { 1133 Node* base = get_addp_base(n); 1134 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1135 assert(ptn_base != nullptr, "field's base should be registered"); 1136 add_base(n_ptn->as_Field(), ptn_base); 1137 break; 1138 } 1139 case Op_CastPP: 1140 case Op_CheckCastPP: 1141 case Op_EncodeP: 1142 case Op_DecodeN: 1143 case Op_EncodePKlass: 1144 case Op_DecodeNKlass: { 1145 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1146 break; 1147 } 1148 case Op_CMoveP: { 1149 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1150 Node* in = n->in(i); 1151 if (in == nullptr) { 1152 continue; // ignore null 1153 } 1154 Node* uncast_in = in->uncast(); 1155 if (uncast_in->is_top() || uncast_in == n) { 1156 continue; // ignore top or inputs which go back this node 1157 } 1158 PointsToNode* ptn = ptnode_adr(in->_idx); 1159 assert(ptn != nullptr, "node should be registered"); 1160 add_edge(n_ptn, ptn); 1161 } 1162 break; 1163 } 1164 case Op_LoadP: 1165 case Op_LoadN: { 1166 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1167 // ThreadLocal has RawPtr type. 1168 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1169 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1170 break; 1171 } 1172 case Op_Phi: { 1173 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1174 // ThreadLocal has RawPtr type. 1175 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1176 for (uint i = 1; i < n->req(); i++) { 1177 Node* in = n->in(i); 1178 if (in == nullptr) { 1179 continue; // ignore null 1180 } 1181 Node* uncast_in = in->uncast(); 1182 if (uncast_in->is_top() || uncast_in == n) { 1183 continue; // ignore top or inputs which go back this node 1184 } 1185 PointsToNode* ptn = ptnode_adr(in->_idx); 1186 assert(ptn != nullptr, "node should be registered"); 1187 add_edge(n_ptn, ptn); 1188 } 1189 break; 1190 } 1191 case Op_Proj: { 1192 // we are only interested in the oop result projection from a call 1193 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1194 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); 1195 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1196 break; 1197 } 1198 case Op_Rethrow: // Exception object escapes 1199 case Op_Return: { 1200 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1201 "Unexpected node type"); 1202 // Treat Return value as LocalVar with GlobalEscape escape state. 1203 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1204 break; 1205 } 1206 case Op_CompareAndExchangeP: 1207 case Op_CompareAndExchangeN: 1208 case Op_GetAndSetP: 1209 case Op_GetAndSetN:{ 1210 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1211 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1212 // fall-through 1213 } 1214 case Op_CompareAndSwapP: 1215 case Op_CompareAndSwapN: 1216 case Op_WeakCompareAndSwapP: 1217 case Op_WeakCompareAndSwapN: 1218 case Op_StoreP: 1219 case Op_StoreN: 1220 case Op_StoreNKlass:{ 1221 add_final_edges_unsafe_access(n, opcode); 1222 break; 1223 } 1224 case Op_VectorizedHashCode: 1225 case Op_AryEq: 1226 case Op_CountPositives: 1227 case Op_StrComp: 1228 case Op_StrEquals: 1229 case Op_StrIndexOf: 1230 case Op_StrIndexOfChar: 1231 case Op_StrInflatedCopy: 1232 case Op_StrCompressedCopy: 1233 case Op_EncodeISOArray: { 1234 // char[]/byte[] arrays passed to string intrinsic do not escape but 1235 // they are not scalar replaceable. Adjust escape state for them. 1236 // Start from in(2) edge since in(1) is memory edge. 1237 for (uint i = 2; i < n->req(); i++) { 1238 Node* adr = n->in(i); 1239 const Type* at = _igvn->type(adr); 1240 if (!adr->is_top() && at->isa_ptr()) { 1241 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1242 at->isa_ptr() != nullptr, "expecting a pointer"); 1243 if (adr->is_AddP()) { 1244 adr = get_addp_base(adr); 1245 } 1246 PointsToNode* ptn = ptnode_adr(adr->_idx); 1247 assert(ptn != nullptr, "node should be registered"); 1248 add_edge(n_ptn, ptn); 1249 } 1250 } 1251 break; 1252 } 1253 case Op_Blackhole: { 1254 // All blackhole pointer arguments are globally escaping. 1255 for (uint i = 0; i < n->req(); i++) { 1256 Node* in = n->in(i); 1257 if (in != nullptr) { 1258 const Type* at = _igvn->type(in); 1259 if (!at->isa_ptr()) continue; 1260 1261 if (in->is_AddP()) { 1262 in = get_addp_base(in); 1263 } 1264 1265 PointsToNode* ptn = ptnode_adr(in->_idx); 1266 assert(ptn != nullptr, "should be defined already"); 1267 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1268 add_edge(n_ptn, ptn); 1269 } 1270 } 1271 break; 1272 } 1273 default: { 1274 // This method should be called only for EA specific nodes which may 1275 // miss some edges when they were created. 1276 #ifdef ASSERT 1277 n->dump(1); 1278 #endif 1279 guarantee(false, "unknown node"); 1280 } 1281 } 1282 return; 1283 } 1284 1285 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1286 Node* adr = n->in(MemNode::Address); 1287 const Type* adr_type = _igvn->type(adr); 1288 adr_type = adr_type->make_ptr(); 1289 if (adr_type == nullptr) { 1290 return; // skip dead nodes 1291 } 1292 if (adr_type->isa_oopptr() 1293 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1294 && adr_type == TypeRawPtr::NOTNULL 1295 && is_captured_store_address(adr))) { 1296 delayed_worklist->push(n); // Process it later. 1297 #ifdef ASSERT 1298 assert (adr->is_AddP(), "expecting an AddP"); 1299 if (adr_type == TypeRawPtr::NOTNULL) { 1300 // Verify a raw address for a store captured by Initialize node. 1301 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1302 assert(offs != Type::OffsetBot, "offset must be a constant"); 1303 } 1304 #endif 1305 } else { 1306 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1307 if (adr->is_BoxLock()) { 1308 return; 1309 } 1310 // Stored value escapes in unsafe access. 1311 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1312 delayed_worklist->push(n); // Process unsafe access later. 1313 return; 1314 } 1315 #ifdef ASSERT 1316 n->dump(1); 1317 assert(false, "not unsafe"); 1318 #endif 1319 } 1320 } 1321 1322 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1323 Node* adr = n->in(MemNode::Address); 1324 const Type *adr_type = _igvn->type(adr); 1325 adr_type = adr_type->make_ptr(); 1326 #ifdef ASSERT 1327 if (adr_type == nullptr) { 1328 n->dump(1); 1329 assert(adr_type != nullptr, "dead node should not be on list"); 1330 return true; 1331 } 1332 #endif 1333 1334 if (adr_type->isa_oopptr() 1335 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1336 && adr_type == TypeRawPtr::NOTNULL 1337 && is_captured_store_address(adr))) { 1338 // Point Address to Value 1339 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1340 assert(adr_ptn != nullptr && 1341 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1342 Node* val = n->in(MemNode::ValueIn); 1343 PointsToNode* ptn = ptnode_adr(val->_idx); 1344 assert(ptn != nullptr, "node should be registered"); 1345 add_edge(adr_ptn, ptn); 1346 return true; 1347 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1348 // Stored value escapes in unsafe access. 1349 Node* val = n->in(MemNode::ValueIn); 1350 PointsToNode* ptn = ptnode_adr(val->_idx); 1351 assert(ptn != nullptr, "node should be registered"); 1352 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1353 // Add edge to object for unsafe access with offset. 1354 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1355 assert(adr_ptn != nullptr, "node should be registered"); 1356 if (adr_ptn->is_Field()) { 1357 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1358 add_edge(adr_ptn, ptn); 1359 } 1360 return true; 1361 } 1362 #ifdef ASSERT 1363 n->dump(1); 1364 assert(false, "not unsafe"); 1365 #endif 1366 return false; 1367 } 1368 1369 void ConnectionGraph::add_call_node(CallNode* call) { 1370 assert(call->returns_pointer(), "only for call which returns pointer"); 1371 uint call_idx = call->_idx; 1372 if (call->is_Allocate()) { 1373 Node* k = call->in(AllocateNode::KlassNode); 1374 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1375 assert(kt != nullptr, "TypeKlassPtr required."); 1376 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1377 bool scalar_replaceable = true; 1378 NOT_PRODUCT(const char* nsr_reason = ""); 1379 if (call->is_AllocateArray()) { 1380 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1381 es = PointsToNode::GlobalEscape; 1382 } else { 1383 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1384 if (length < 0) { 1385 // Not scalar replaceable if the length is not constant. 1386 scalar_replaceable = false; 1387 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1388 } else if (length > EliminateAllocationArraySizeLimit) { 1389 // Not scalar replaceable if the length is too big. 1390 scalar_replaceable = false; 1391 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1392 } 1393 } 1394 } else { // Allocate instance 1395 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1396 es = PointsToNode::GlobalEscape; 1397 } else { 1398 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1399 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 1400 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 1401 ik->is_subclass_of(_compile->env()->Reference_klass()) || 1402 !ik->can_be_instantiated() || 1403 ik->has_finalizer()) { 1404 es = PointsToNode::GlobalEscape; 1405 } else { 1406 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 1407 if (nfields > EliminateAllocationFieldsLimit) { 1408 // Not scalar replaceable if there are too many fields. 1409 scalar_replaceable = false; 1410 NOT_PRODUCT(nsr_reason = "has too many fields"); 1411 } 1412 } 1413 } 1414 } 1415 add_java_object(call, es); 1416 PointsToNode* ptn = ptnode_adr(call_idx); 1417 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1418 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1419 } 1420 } else if (call->is_CallStaticJava()) { 1421 // Call nodes could be different types: 1422 // 1423 // 1. CallDynamicJavaNode (what happened during call is unknown): 1424 // 1425 // - mapped to GlobalEscape JavaObject node if oop is returned; 1426 // 1427 // - all oop arguments are escaping globally; 1428 // 1429 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1430 // 1431 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1432 // 1433 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1434 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1435 // during call is returned; 1436 // - mapped to ArgEscape LocalVar node pointed to object arguments 1437 // which are returned and does not escape during call; 1438 // 1439 // - oop arguments escaping status is defined by bytecode analysis; 1440 // 1441 // For a static call, we know exactly what method is being called. 1442 // Use bytecode estimator to record whether the call's return value escapes. 1443 ciMethod* meth = call->as_CallJava()->method(); 1444 if (meth == nullptr) { 1445 const char* name = call->as_CallStaticJava()->_name; 1446 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 1447 // Returns a newly allocated non-escaped object. 1448 add_java_object(call, PointsToNode::NoEscape); 1449 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1450 } else if (meth->is_boxing_method()) { 1451 // Returns boxing object 1452 PointsToNode::EscapeState es; 1453 vmIntrinsics::ID intr = meth->intrinsic_id(); 1454 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1455 // It does not escape if object is always allocated. 1456 es = PointsToNode::NoEscape; 1457 } else { 1458 // It escapes globally if object could be loaded from cache. 1459 es = PointsToNode::GlobalEscape; 1460 } 1461 add_java_object(call, es); 1462 if (es == PointsToNode::GlobalEscape) { 1463 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 1464 } 1465 } else { 1466 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1467 call_analyzer->copy_dependencies(_compile->dependencies()); 1468 if (call_analyzer->is_return_allocated()) { 1469 // Returns a newly allocated non-escaped object, simply 1470 // update dependency information. 1471 // Mark it as NoEscape so that objects referenced by 1472 // it's fields will be marked as NoEscape at least. 1473 add_java_object(call, PointsToNode::NoEscape); 1474 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1475 } else { 1476 // Determine whether any arguments are returned. 1477 const TypeTuple* d = call->tf()->domain(); 1478 bool ret_arg = false; 1479 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1480 if (d->field_at(i)->isa_ptr() != nullptr && 1481 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1482 ret_arg = true; 1483 break; 1484 } 1485 } 1486 if (ret_arg) { 1487 add_local_var(call, PointsToNode::ArgEscape); 1488 } else { 1489 // Returns unknown object. 1490 map_ideal_node(call, phantom_obj); 1491 } 1492 } 1493 } 1494 } else { 1495 // An other type of call, assume the worst case: 1496 // returned value is unknown and globally escapes. 1497 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1498 map_ideal_node(call, phantom_obj); 1499 } 1500 } 1501 1502 void ConnectionGraph::process_call_arguments(CallNode *call) { 1503 bool is_arraycopy = false; 1504 switch (call->Opcode()) { 1505 #ifdef ASSERT 1506 case Op_Allocate: 1507 case Op_AllocateArray: 1508 case Op_Lock: 1509 case Op_Unlock: 1510 assert(false, "should be done already"); 1511 break; 1512 #endif 1513 case Op_ArrayCopy: 1514 case Op_CallLeafNoFP: 1515 // Most array copies are ArrayCopy nodes at this point but there 1516 // are still a few direct calls to the copy subroutines (See 1517 // PhaseStringOpts::copy_string()) 1518 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1519 call->as_CallLeaf()->is_call_to_arraycopystub(); 1520 // fall through 1521 case Op_CallLeafVector: 1522 case Op_CallLeaf: { 1523 // Stub calls, objects do not escape but they are not scale replaceable. 1524 // Adjust escape state for outgoing arguments. 1525 const TypeTuple * d = call->tf()->domain(); 1526 bool src_has_oops = false; 1527 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1528 const Type* at = d->field_at(i); 1529 Node *arg = call->in(i); 1530 if (arg == nullptr) { 1531 continue; 1532 } 1533 const Type *aat = _igvn->type(arg); 1534 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1535 continue; 1536 } 1537 if (arg->is_AddP()) { 1538 // 1539 // The inline_native_clone() case when the arraycopy stub is called 1540 // after the allocation before Initialize and CheckCastPP nodes. 1541 // Or normal arraycopy for object arrays case. 1542 // 1543 // Set AddP's base (Allocate) as not scalar replaceable since 1544 // pointer to the base (with offset) is passed as argument. 1545 // 1546 arg = get_addp_base(arg); 1547 } 1548 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1549 assert(arg_ptn != nullptr, "should be registered"); 1550 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1551 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1552 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1553 aat->isa_ptr() != nullptr, "expecting an Ptr"); 1554 bool arg_has_oops = aat->isa_oopptr() && 1555 (aat->isa_instptr() || 1556 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr))); 1557 if (i == TypeFunc::Parms) { 1558 src_has_oops = arg_has_oops; 1559 } 1560 // 1561 // src or dst could be j.l.Object when other is basic type array: 1562 // 1563 // arraycopy(char[],0,Object*,0,size); 1564 // arraycopy(Object*,0,char[],0,size); 1565 // 1566 // Don't add edges in such cases. 1567 // 1568 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1569 arg_has_oops && (i > TypeFunc::Parms); 1570 #ifdef ASSERT 1571 if (!(is_arraycopy || 1572 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1573 (call->as_CallLeaf()->_name != nullptr && 1574 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1575 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1576 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1577 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1578 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1579 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1580 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1581 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1582 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1583 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1584 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1585 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1586 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1587 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1588 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1589 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1590 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1591 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1592 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1593 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1594 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1595 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1596 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1597 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1598 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1599 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1600 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1601 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1602 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1603 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1604 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1605 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1606 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1607 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1608 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 1609 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 1610 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1611 ))) { 1612 call->dump(); 1613 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1614 } 1615 #endif 1616 // Always process arraycopy's destination object since 1617 // we need to add all possible edges to references in 1618 // source object. 1619 if (arg_esc >= PointsToNode::ArgEscape && 1620 !arg_is_arraycopy_dest) { 1621 continue; 1622 } 1623 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1624 if (call->is_ArrayCopy()) { 1625 ArrayCopyNode* ac = call->as_ArrayCopy(); 1626 if (ac->is_clonebasic() || 1627 ac->is_arraycopy_validated() || 1628 ac->is_copyof_validated() || 1629 ac->is_copyofrange_validated()) { 1630 es = PointsToNode::NoEscape; 1631 } 1632 } 1633 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1634 if (arg_is_arraycopy_dest) { 1635 Node* src = call->in(TypeFunc::Parms); 1636 if (src->is_AddP()) { 1637 src = get_addp_base(src); 1638 } 1639 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1640 assert(src_ptn != nullptr, "should be registered"); 1641 if (arg_ptn != src_ptn) { 1642 // Special arraycopy edge: 1643 // A destination object's field can't have the source object 1644 // as base since objects escape states are not related. 1645 // Only escape state of destination object's fields affects 1646 // escape state of fields in source object. 1647 add_arraycopy(call, es, src_ptn, arg_ptn); 1648 } 1649 } 1650 } 1651 } 1652 break; 1653 } 1654 case Op_CallStaticJava: { 1655 // For a static call, we know exactly what method is being called. 1656 // Use bytecode estimator to record the call's escape affects 1657 #ifdef ASSERT 1658 const char* name = call->as_CallStaticJava()->_name; 1659 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1660 #endif 1661 ciMethod* meth = call->as_CallJava()->method(); 1662 if ((meth != nullptr) && meth->is_boxing_method()) { 1663 break; // Boxing methods do not modify any oops. 1664 } 1665 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 1666 // fall-through if not a Java method or no analyzer information 1667 if (call_analyzer != nullptr) { 1668 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1669 const TypeTuple* d = call->tf()->domain(); 1670 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1671 const Type* at = d->field_at(i); 1672 int k = i - TypeFunc::Parms; 1673 Node* arg = call->in(i); 1674 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1675 if (at->isa_ptr() != nullptr && 1676 call_analyzer->is_arg_returned(k)) { 1677 // The call returns arguments. 1678 if (call_ptn != nullptr) { // Is call's result used? 1679 assert(call_ptn->is_LocalVar(), "node should be registered"); 1680 assert(arg_ptn != nullptr, "node should be registered"); 1681 add_edge(call_ptn, arg_ptn); 1682 } 1683 } 1684 if (at->isa_oopptr() != nullptr && 1685 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1686 if (!call_analyzer->is_arg_stack(k)) { 1687 // The argument global escapes 1688 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1689 } else { 1690 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1691 if (!call_analyzer->is_arg_local(k)) { 1692 // The argument itself doesn't escape, but any fields might 1693 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1694 } 1695 } 1696 } 1697 } 1698 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 1699 // The call returns arguments. 1700 assert(call_ptn->edge_count() > 0, "sanity"); 1701 if (!call_analyzer->is_return_local()) { 1702 // Returns also unknown object. 1703 add_edge(call_ptn, phantom_obj); 1704 } 1705 } 1706 break; 1707 } 1708 } 1709 default: { 1710 // Fall-through here if not a Java method or no analyzer information 1711 // or some other type of call, assume the worst case: all arguments 1712 // globally escape. 1713 const TypeTuple* d = call->tf()->domain(); 1714 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1715 const Type* at = d->field_at(i); 1716 if (at->isa_oopptr() != nullptr) { 1717 Node* arg = call->in(i); 1718 if (arg->is_AddP()) { 1719 arg = get_addp_base(arg); 1720 } 1721 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 1722 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1723 } 1724 } 1725 } 1726 } 1727 } 1728 1729 1730 // Finish Graph construction. 1731 bool ConnectionGraph::complete_connection_graph( 1732 GrowableArray<PointsToNode*>& ptnodes_worklist, 1733 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1734 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1735 GrowableArray<FieldNode*>& oop_fields_worklist) { 1736 // Normally only 1-3 passes needed to build Connection Graph depending 1737 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1738 // Set limit to 20 to catch situation when something did go wrong and 1739 // bailout Escape Analysis. 1740 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1741 #define GRAPH_BUILD_ITER_LIMIT 20 1742 1743 // Propagate GlobalEscape and ArgEscape escape states and check that 1744 // we still have non-escaping objects. The method pushs on _worklist 1745 // Field nodes which reference phantom_object. 1746 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1747 return false; // Nothing to do. 1748 } 1749 // Now propagate references to all JavaObject nodes. 1750 int java_objects_length = java_objects_worklist.length(); 1751 elapsedTimer build_time; 1752 build_time.start(); 1753 elapsedTimer time; 1754 bool timeout = false; 1755 int new_edges = 1; 1756 int iterations = 0; 1757 do { 1758 while ((new_edges > 0) && 1759 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1760 double start_time = time.seconds(); 1761 time.start(); 1762 new_edges = 0; 1763 // Propagate references to phantom_object for nodes pushed on _worklist 1764 // by find_non_escaped_objects() and find_field_value(). 1765 new_edges += add_java_object_edges(phantom_obj, false); 1766 for (int next = 0; next < java_objects_length; ++next) { 1767 JavaObjectNode* ptn = java_objects_worklist.at(next); 1768 new_edges += add_java_object_edges(ptn, true); 1769 1770 #define SAMPLE_SIZE 4 1771 if ((next % SAMPLE_SIZE) == 0) { 1772 // Each 4 iterations calculate how much time it will take 1773 // to complete graph construction. 1774 time.stop(); 1775 // Poll for requests from shutdown mechanism to quiesce compiler 1776 // because Connection graph construction may take long time. 1777 CompileBroker::maybe_block(); 1778 double stop_time = time.seconds(); 1779 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1780 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1781 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1782 timeout = true; 1783 break; // Timeout 1784 } 1785 start_time = stop_time; 1786 time.start(); 1787 } 1788 #undef SAMPLE_SIZE 1789 1790 } 1791 if (timeout) break; 1792 if (new_edges > 0) { 1793 // Update escape states on each iteration if graph was updated. 1794 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1795 return false; // Nothing to do. 1796 } 1797 } 1798 time.stop(); 1799 if (time.seconds() >= EscapeAnalysisTimeout) { 1800 timeout = true; 1801 break; 1802 } 1803 } 1804 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1805 time.start(); 1806 // Find fields which have unknown value. 1807 int fields_length = oop_fields_worklist.length(); 1808 for (int next = 0; next < fields_length; next++) { 1809 FieldNode* field = oop_fields_worklist.at(next); 1810 if (field->edge_count() == 0) { 1811 new_edges += find_field_value(field); 1812 // This code may added new edges to phantom_object. 1813 // Need an other cycle to propagate references to phantom_object. 1814 } 1815 } 1816 time.stop(); 1817 if (time.seconds() >= EscapeAnalysisTimeout) { 1818 timeout = true; 1819 break; 1820 } 1821 } else { 1822 new_edges = 0; // Bailout 1823 } 1824 } while (new_edges > 0); 1825 1826 build_time.stop(); 1827 _build_time = build_time.seconds(); 1828 _build_iterations = iterations; 1829 1830 // Bailout if passed limits. 1831 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1832 Compile* C = _compile; 1833 if (C->log() != nullptr) { 1834 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1835 C->log()->text("%s", timeout ? "time" : "iterations"); 1836 C->log()->end_elem(" limit'"); 1837 } 1838 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1839 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1840 // Possible infinite build_connection_graph loop, 1841 // bailout (no changes to ideal graph were made). 1842 return false; 1843 } 1844 1845 #undef GRAPH_BUILD_ITER_LIMIT 1846 1847 // Find fields initialized by null for non-escaping Allocations. 1848 int non_escaped_length = non_escaped_allocs_worklist.length(); 1849 for (int next = 0; next < non_escaped_length; next++) { 1850 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1851 PointsToNode::EscapeState es = ptn->escape_state(); 1852 assert(es <= PointsToNode::ArgEscape, "sanity"); 1853 if (es == PointsToNode::NoEscape) { 1854 if (find_init_values_null(ptn, _igvn) > 0) { 1855 // Adding references to null object does not change escape states 1856 // since it does not escape. Also no fields are added to null object. 1857 add_java_object_edges(null_obj, false); 1858 } 1859 } 1860 Node* n = ptn->ideal_node(); 1861 if (n->is_Allocate()) { 1862 // The object allocated by this Allocate node will never be 1863 // seen by an other thread. Mark it so that when it is 1864 // expanded no MemBarStoreStore is added. 1865 InitializeNode* ini = n->as_Allocate()->initialization(); 1866 if (ini != nullptr) 1867 ini->set_does_not_escape(); 1868 } 1869 } 1870 return true; // Finished graph construction. 1871 } 1872 1873 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1874 // and check that we still have non-escaping java objects. 1875 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1876 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1877 GrowableArray<PointsToNode*> escape_worklist; 1878 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1879 int ptnodes_length = ptnodes_worklist.length(); 1880 for (int next = 0; next < ptnodes_length; ++next) { 1881 PointsToNode* ptn = ptnodes_worklist.at(next); 1882 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1883 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1884 escape_worklist.push(ptn); 1885 } 1886 } 1887 // Set escape states to referenced nodes (edges list). 1888 while (escape_worklist.length() > 0) { 1889 PointsToNode* ptn = escape_worklist.pop(); 1890 PointsToNode::EscapeState es = ptn->escape_state(); 1891 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1892 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1893 es >= PointsToNode::ArgEscape) { 1894 // GlobalEscape or ArgEscape state of field means it has unknown value. 1895 if (add_edge(ptn, phantom_obj)) { 1896 // New edge was added 1897 add_field_uses_to_worklist(ptn->as_Field()); 1898 } 1899 } 1900 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1901 PointsToNode* e = i.get(); 1902 if (e->is_Arraycopy()) { 1903 assert(ptn->arraycopy_dst(), "sanity"); 1904 // Propagate only fields escape state through arraycopy edge. 1905 if (e->fields_escape_state() < field_es) { 1906 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1907 escape_worklist.push(e); 1908 } 1909 } else if (es >= field_es) { 1910 // fields_escape_state is also set to 'es' if it is less than 'es'. 1911 if (e->escape_state() < es) { 1912 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1913 escape_worklist.push(e); 1914 } 1915 } else { 1916 // Propagate field escape state. 1917 bool es_changed = false; 1918 if (e->fields_escape_state() < field_es) { 1919 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1920 es_changed = true; 1921 } 1922 if ((e->escape_state() < field_es) && 1923 e->is_Field() && ptn->is_JavaObject() && 1924 e->as_Field()->is_oop()) { 1925 // Change escape state of referenced fields. 1926 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1927 es_changed = true; 1928 } else if (e->escape_state() < es) { 1929 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1930 es_changed = true; 1931 } 1932 if (es_changed) { 1933 escape_worklist.push(e); 1934 } 1935 } 1936 } 1937 } 1938 // Remove escaped objects from non_escaped list. 1939 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1940 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1941 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1942 non_escaped_allocs_worklist.delete_at(next); 1943 } 1944 if (ptn->escape_state() == PointsToNode::NoEscape) { 1945 // Find fields in non-escaped allocations which have unknown value. 1946 find_init_values_phantom(ptn); 1947 } 1948 } 1949 return (non_escaped_allocs_worklist.length() > 0); 1950 } 1951 1952 // Add all references to JavaObject node by walking over all uses. 1953 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1954 int new_edges = 0; 1955 if (populate_worklist) { 1956 // Populate _worklist by uses of jobj's uses. 1957 for (UseIterator i(jobj); i.has_next(); i.next()) { 1958 PointsToNode* use = i.get(); 1959 if (use->is_Arraycopy()) { 1960 continue; 1961 } 1962 add_uses_to_worklist(use); 1963 if (use->is_Field() && use->as_Field()->is_oop()) { 1964 // Put on worklist all field's uses (loads) and 1965 // related field nodes (same base and offset). 1966 add_field_uses_to_worklist(use->as_Field()); 1967 } 1968 } 1969 } 1970 for (int l = 0; l < _worklist.length(); l++) { 1971 PointsToNode* use = _worklist.at(l); 1972 if (PointsToNode::is_base_use(use)) { 1973 // Add reference from jobj to field and from field to jobj (field's base). 1974 use = PointsToNode::get_use_node(use)->as_Field(); 1975 if (add_base(use->as_Field(), jobj)) { 1976 new_edges++; 1977 } 1978 continue; 1979 } 1980 assert(!use->is_JavaObject(), "sanity"); 1981 if (use->is_Arraycopy()) { 1982 if (jobj == null_obj) { // null object does not have field edges 1983 continue; 1984 } 1985 // Added edge from Arraycopy node to arraycopy's source java object 1986 if (add_edge(use, jobj)) { 1987 jobj->set_arraycopy_src(); 1988 new_edges++; 1989 } 1990 // and stop here. 1991 continue; 1992 } 1993 if (!add_edge(use, jobj)) { 1994 continue; // No new edge added, there was such edge already. 1995 } 1996 new_edges++; 1997 if (use->is_LocalVar()) { 1998 add_uses_to_worklist(use); 1999 if (use->arraycopy_dst()) { 2000 for (EdgeIterator i(use); i.has_next(); i.next()) { 2001 PointsToNode* e = i.get(); 2002 if (e->is_Arraycopy()) { 2003 if (jobj == null_obj) { // null object does not have field edges 2004 continue; 2005 } 2006 // Add edge from arraycopy's destination java object to Arraycopy node. 2007 if (add_edge(jobj, e)) { 2008 new_edges++; 2009 jobj->set_arraycopy_dst(); 2010 } 2011 } 2012 } 2013 } 2014 } else { 2015 // Added new edge to stored in field values. 2016 // Put on worklist all field's uses (loads) and 2017 // related field nodes (same base and offset). 2018 add_field_uses_to_worklist(use->as_Field()); 2019 } 2020 } 2021 _worklist.clear(); 2022 _in_worklist.reset(); 2023 return new_edges; 2024 } 2025 2026 // Put on worklist all related field nodes. 2027 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2028 assert(field->is_oop(), "sanity"); 2029 int offset = field->offset(); 2030 add_uses_to_worklist(field); 2031 // Loop over all bases of this field and push on worklist Field nodes 2032 // with the same offset and base (since they may reference the same field). 2033 for (BaseIterator i(field); i.has_next(); i.next()) { 2034 PointsToNode* base = i.get(); 2035 add_fields_to_worklist(field, base); 2036 // Check if the base was source object of arraycopy and go over arraycopy's 2037 // destination objects since values stored to a field of source object are 2038 // accessible by uses (loads) of fields of destination objects. 2039 if (base->arraycopy_src()) { 2040 for (UseIterator j(base); j.has_next(); j.next()) { 2041 PointsToNode* arycp = j.get(); 2042 if (arycp->is_Arraycopy()) { 2043 for (UseIterator k(arycp); k.has_next(); k.next()) { 2044 PointsToNode* abase = k.get(); 2045 if (abase->arraycopy_dst() && abase != base) { 2046 // Look for the same arraycopy reference. 2047 add_fields_to_worklist(field, abase); 2048 } 2049 } 2050 } 2051 } 2052 } 2053 } 2054 } 2055 2056 // Put on worklist all related field nodes. 2057 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2058 int offset = field->offset(); 2059 if (base->is_LocalVar()) { 2060 for (UseIterator j(base); j.has_next(); j.next()) { 2061 PointsToNode* f = j.get(); 2062 if (PointsToNode::is_base_use(f)) { // Field 2063 f = PointsToNode::get_use_node(f); 2064 if (f == field || !f->as_Field()->is_oop()) { 2065 continue; 2066 } 2067 int offs = f->as_Field()->offset(); 2068 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2069 add_to_worklist(f); 2070 } 2071 } 2072 } 2073 } else { 2074 assert(base->is_JavaObject(), "sanity"); 2075 if (// Skip phantom_object since it is only used to indicate that 2076 // this field's content globally escapes. 2077 (base != phantom_obj) && 2078 // null object node does not have fields. 2079 (base != null_obj)) { 2080 for (EdgeIterator i(base); i.has_next(); i.next()) { 2081 PointsToNode* f = i.get(); 2082 // Skip arraycopy edge since store to destination object field 2083 // does not update value in source object field. 2084 if (f->is_Arraycopy()) { 2085 assert(base->arraycopy_dst(), "sanity"); 2086 continue; 2087 } 2088 if (f == field || !f->as_Field()->is_oop()) { 2089 continue; 2090 } 2091 int offs = f->as_Field()->offset(); 2092 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2093 add_to_worklist(f); 2094 } 2095 } 2096 } 2097 } 2098 } 2099 2100 // Find fields which have unknown value. 2101 int ConnectionGraph::find_field_value(FieldNode* field) { 2102 // Escaped fields should have init value already. 2103 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2104 int new_edges = 0; 2105 for (BaseIterator i(field); i.has_next(); i.next()) { 2106 PointsToNode* base = i.get(); 2107 if (base->is_JavaObject()) { 2108 // Skip Allocate's fields which will be processed later. 2109 if (base->ideal_node()->is_Allocate()) { 2110 return 0; 2111 } 2112 assert(base == null_obj, "only null ptr base expected here"); 2113 } 2114 } 2115 if (add_edge(field, phantom_obj)) { 2116 // New edge was added 2117 new_edges++; 2118 add_field_uses_to_worklist(field); 2119 } 2120 return new_edges; 2121 } 2122 2123 // Find fields initializing values for allocations. 2124 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2125 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2126 Node* alloc = pta->ideal_node(); 2127 2128 // Do nothing for Allocate nodes since its fields values are 2129 // "known" unless they are initialized by arraycopy/clone. 2130 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2131 return 0; 2132 } 2133 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 2134 #ifdef ASSERT 2135 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { 2136 const char* name = alloc->as_CallStaticJava()->_name; 2137 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 2138 } 2139 #endif 2140 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2141 int new_edges = 0; 2142 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2143 PointsToNode* field = i.get(); 2144 if (field->is_Field() && field->as_Field()->is_oop()) { 2145 if (add_edge(field, phantom_obj)) { 2146 // New edge was added 2147 new_edges++; 2148 add_field_uses_to_worklist(field->as_Field()); 2149 } 2150 } 2151 } 2152 return new_edges; 2153 } 2154 2155 // Find fields initializing values for allocations. 2156 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2157 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2158 Node* alloc = pta->ideal_node(); 2159 // Do nothing for Call nodes since its fields values are unknown. 2160 if (!alloc->is_Allocate()) { 2161 return 0; 2162 } 2163 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2164 bool visited_bottom_offset = false; 2165 GrowableArray<int> offsets_worklist; 2166 int new_edges = 0; 2167 2168 // Check if an oop field's initializing value is recorded and add 2169 // a corresponding null if field's value if it is not recorded. 2170 // Connection Graph does not record a default initialization by null 2171 // captured by Initialize node. 2172 // 2173 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2174 PointsToNode* field = i.get(); // Field (AddP) 2175 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2176 continue; // Not oop field 2177 } 2178 int offset = field->as_Field()->offset(); 2179 if (offset == Type::OffsetBot) { 2180 if (!visited_bottom_offset) { 2181 // OffsetBot is used to reference array's element, 2182 // always add reference to null to all Field nodes since we don't 2183 // known which element is referenced. 2184 if (add_edge(field, null_obj)) { 2185 // New edge was added 2186 new_edges++; 2187 add_field_uses_to_worklist(field->as_Field()); 2188 visited_bottom_offset = true; 2189 } 2190 } 2191 } else { 2192 // Check only oop fields. 2193 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2194 if (adr_type->isa_rawptr()) { 2195 #ifdef ASSERT 2196 // Raw pointers are used for initializing stores so skip it 2197 // since it should be recorded already 2198 Node* base = get_addp_base(field->ideal_node()); 2199 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2200 #endif 2201 continue; 2202 } 2203 if (!offsets_worklist.contains(offset)) { 2204 offsets_worklist.append(offset); 2205 Node* value = nullptr; 2206 if (ini != nullptr) { 2207 // StoreP::memory_type() == T_ADDRESS 2208 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2209 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2210 // Make sure initializing store has the same type as this AddP. 2211 // This AddP may reference non existing field because it is on a 2212 // dead branch of bimorphic call which is not eliminated yet. 2213 if (store != nullptr && store->is_Store() && 2214 store->as_Store()->memory_type() == ft) { 2215 value = store->in(MemNode::ValueIn); 2216 #ifdef ASSERT 2217 if (VerifyConnectionGraph) { 2218 // Verify that AddP already points to all objects the value points to. 2219 PointsToNode* val = ptnode_adr(value->_idx); 2220 assert((val != nullptr), "should be processed already"); 2221 PointsToNode* missed_obj = nullptr; 2222 if (val->is_JavaObject()) { 2223 if (!field->points_to(val->as_JavaObject())) { 2224 missed_obj = val; 2225 } 2226 } else { 2227 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2228 tty->print_cr("----------init store has invalid value -----"); 2229 store->dump(); 2230 val->dump(); 2231 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2232 } 2233 for (EdgeIterator j(val); j.has_next(); j.next()) { 2234 PointsToNode* obj = j.get(); 2235 if (obj->is_JavaObject()) { 2236 if (!field->points_to(obj->as_JavaObject())) { 2237 missed_obj = obj; 2238 break; 2239 } 2240 } 2241 } 2242 } 2243 if (missed_obj != nullptr) { 2244 tty->print_cr("----------field---------------------------------"); 2245 field->dump(); 2246 tty->print_cr("----------missed referernce to object-----------"); 2247 missed_obj->dump(); 2248 tty->print_cr("----------object referernced by init store -----"); 2249 store->dump(); 2250 val->dump(); 2251 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2252 } 2253 } 2254 #endif 2255 } else { 2256 // There could be initializing stores which follow allocation. 2257 // For example, a volatile field store is not collected 2258 // by Initialize node. 2259 // 2260 // Need to check for dependent loads to separate such stores from 2261 // stores which follow loads. For now, add initial value null so 2262 // that compare pointers optimization works correctly. 2263 } 2264 } 2265 if (value == nullptr) { 2266 // A field's initializing value was not recorded. Add null. 2267 if (add_edge(field, null_obj)) { 2268 // New edge was added 2269 new_edges++; 2270 add_field_uses_to_worklist(field->as_Field()); 2271 } 2272 } 2273 } 2274 } 2275 } 2276 return new_edges; 2277 } 2278 2279 // Adjust scalar_replaceable state after Connection Graph is built. 2280 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2281 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2282 // returns true. If one of the constraints in this method set 'jobj' to NSR 2283 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2284 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2285 // that other object and the Phi will become a reducible Phi. 2286 // There could be multiple merges involving the same jobj. 2287 Unique_Node_List candidates; 2288 2289 // Search for non-escaping objects which are not scalar replaceable 2290 // and mark them to propagate the state to referenced objects. 2291 2292 for (UseIterator i(jobj); i.has_next(); i.next()) { 2293 PointsToNode* use = i.get(); 2294 if (use->is_Arraycopy()) { 2295 continue; 2296 } 2297 if (use->is_Field()) { 2298 FieldNode* field = use->as_Field(); 2299 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2300 // 1. An object is not scalar replaceable if the field into which it is 2301 // stored has unknown offset (stored into unknown element of an array). 2302 if (field->offset() == Type::OffsetBot) { 2303 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2304 return; 2305 } 2306 for (BaseIterator i(field); i.has_next(); i.next()) { 2307 PointsToNode* base = i.get(); 2308 // 2. An object is not scalar replaceable if the field into which it is 2309 // stored has multiple bases one of which is null. 2310 if ((base == null_obj) && (field->base_count() > 1)) { 2311 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2312 return; 2313 } 2314 // 2.5. An object is not scalar replaceable if the field into which it is 2315 // stored has NSR base. 2316 if (!base->scalar_replaceable()) { 2317 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2318 return; 2319 } 2320 } 2321 } 2322 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2323 // 3. An object is not scalar replaceable if it is merged with other objects 2324 // and we can't remove the merge 2325 for (EdgeIterator j(use); j.has_next(); j.next()) { 2326 PointsToNode* ptn = j.get(); 2327 if (ptn->is_JavaObject() && ptn != jobj) { 2328 Node* use_n = use->ideal_node(); 2329 2330 // If it's already a candidate or confirmed reducible merge we can skip verification 2331 if (candidates.member(use_n)) { 2332 continue; 2333 } else if (reducible_merges.member(use_n)) { 2334 candidates.push(use_n); 2335 continue; 2336 } 2337 2338 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2339 candidates.push(use_n); 2340 } else { 2341 // Mark all objects as NSR if we can't remove the merge 2342 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2343 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2344 } 2345 } 2346 } 2347 if (!jobj->scalar_replaceable()) { 2348 return; 2349 } 2350 } 2351 2352 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2353 if (j.get()->is_Arraycopy()) { 2354 continue; 2355 } 2356 2357 // Non-escaping object node should point only to field nodes. 2358 FieldNode* field = j.get()->as_Field(); 2359 int offset = field->as_Field()->offset(); 2360 2361 // 4. An object is not scalar replaceable if it has a field with unknown 2362 // offset (array's element is accessed in loop). 2363 if (offset == Type::OffsetBot) { 2364 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2365 return; 2366 } 2367 // 5. Currently an object is not scalar replaceable if a LoadStore node 2368 // access its field since the field value is unknown after it. 2369 // 2370 Node* n = field->ideal_node(); 2371 2372 // Test for an unsafe access that was parsed as maybe off heap 2373 // (with a CheckCastPP to raw memory). 2374 assert(n->is_AddP(), "expect an address computation"); 2375 if (n->in(AddPNode::Base)->is_top() && 2376 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2377 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2378 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2379 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2380 return; 2381 } 2382 2383 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2384 Node* u = n->fast_out(i); 2385 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2386 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2387 return; 2388 } 2389 } 2390 2391 // 6. Or the address may point to more then one object. This may produce 2392 // the false positive result (set not scalar replaceable) 2393 // since the flow-insensitive escape analysis can't separate 2394 // the case when stores overwrite the field's value from the case 2395 // when stores happened on different control branches. 2396 // 2397 // Note: it will disable scalar replacement in some cases: 2398 // 2399 // Point p[] = new Point[1]; 2400 // p[0] = new Point(); // Will be not scalar replaced 2401 // 2402 // but it will save us from incorrect optimizations in next cases: 2403 // 2404 // Point p[] = new Point[1]; 2405 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2406 // 2407 if (field->base_count() > 1 && candidates.size() == 0) { 2408 for (BaseIterator i(field); i.has_next(); i.next()) { 2409 PointsToNode* base = i.get(); 2410 // Don't take into account LocalVar nodes which 2411 // may point to only one object which should be also 2412 // this field's base by now. 2413 if (base->is_JavaObject() && base != jobj) { 2414 // Mark all bases. 2415 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 2416 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 2417 } 2418 } 2419 2420 if (!jobj->scalar_replaceable()) { 2421 return; 2422 } 2423 } 2424 } 2425 2426 // The candidate is truly a reducible merge only if none of the other 2427 // constraints ruled it as NSR. There could be multiple merges involving the 2428 // same jobj. 2429 assert(jobj->scalar_replaceable(), "sanity"); 2430 for (uint i = 0; i < candidates.size(); i++ ) { 2431 Node* candidate = candidates.at(i); 2432 reducible_merges.push(candidate); 2433 } 2434 } 2435 2436 // Propagate NSR (Not scalar replaceable) state. 2437 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 2438 int jobj_length = jobj_worklist.length(); 2439 bool found_nsr_alloc = true; 2440 while (found_nsr_alloc) { 2441 found_nsr_alloc = false; 2442 for (int next = 0; next < jobj_length; ++next) { 2443 JavaObjectNode* jobj = jobj_worklist.at(next); 2444 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 2445 PointsToNode* use = i.get(); 2446 if (use->is_Field()) { 2447 FieldNode* field = use->as_Field(); 2448 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2449 assert(field->offset() != Type::OffsetBot, "sanity"); 2450 for (BaseIterator i(field); i.has_next(); i.next()) { 2451 PointsToNode* base = i.get(); 2452 // An object is not scalar replaceable if the field into which 2453 // it is stored has NSR base. 2454 if ((base != null_obj) && !base->scalar_replaceable()) { 2455 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2456 found_nsr_alloc = true; 2457 break; 2458 } 2459 } 2460 } 2461 } 2462 } 2463 } 2464 } 2465 2466 #ifdef ASSERT 2467 void ConnectionGraph::verify_connection_graph( 2468 GrowableArray<PointsToNode*>& ptnodes_worklist, 2469 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2470 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2471 GrowableArray<Node*>& addp_worklist) { 2472 // Verify that graph is complete - no new edges could be added. 2473 int java_objects_length = java_objects_worklist.length(); 2474 int non_escaped_length = non_escaped_allocs_worklist.length(); 2475 int new_edges = 0; 2476 for (int next = 0; next < java_objects_length; ++next) { 2477 JavaObjectNode* ptn = java_objects_worklist.at(next); 2478 new_edges += add_java_object_edges(ptn, true); 2479 } 2480 assert(new_edges == 0, "graph was not complete"); 2481 // Verify that escape state is final. 2482 int length = non_escaped_allocs_worklist.length(); 2483 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2484 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2485 (non_escaped_length == length) && 2486 (_worklist.length() == 0), "escape state was not final"); 2487 2488 // Verify fields information. 2489 int addp_length = addp_worklist.length(); 2490 for (int next = 0; next < addp_length; ++next ) { 2491 Node* n = addp_worklist.at(next); 2492 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2493 if (field->is_oop()) { 2494 // Verify that field has all bases 2495 Node* base = get_addp_base(n); 2496 PointsToNode* ptn = ptnode_adr(base->_idx); 2497 if (ptn->is_JavaObject()) { 2498 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2499 } else { 2500 assert(ptn->is_LocalVar(), "sanity"); 2501 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2502 PointsToNode* e = i.get(); 2503 if (e->is_JavaObject()) { 2504 assert(field->has_base(e->as_JavaObject()), "sanity"); 2505 } 2506 } 2507 } 2508 // Verify that all fields have initializing values. 2509 if (field->edge_count() == 0) { 2510 tty->print_cr("----------field does not have references----------"); 2511 field->dump(); 2512 for (BaseIterator i(field); i.has_next(); i.next()) { 2513 PointsToNode* base = i.get(); 2514 tty->print_cr("----------field has next base---------------------"); 2515 base->dump(); 2516 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2517 tty->print_cr("----------base has fields-------------------------"); 2518 for (EdgeIterator j(base); j.has_next(); j.next()) { 2519 j.get()->dump(); 2520 } 2521 tty->print_cr("----------base has references---------------------"); 2522 for (UseIterator j(base); j.has_next(); j.next()) { 2523 j.get()->dump(); 2524 } 2525 } 2526 } 2527 for (UseIterator i(field); i.has_next(); i.next()) { 2528 i.get()->dump(); 2529 } 2530 assert(field->edge_count() > 0, "sanity"); 2531 } 2532 } 2533 } 2534 } 2535 #endif 2536 2537 // Optimize ideal graph. 2538 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2539 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2540 Compile* C = _compile; 2541 PhaseIterGVN* igvn = _igvn; 2542 if (EliminateLocks) { 2543 // Mark locks before changing ideal graph. 2544 int cnt = C->macro_count(); 2545 for (int i = 0; i < cnt; i++) { 2546 Node *n = C->macro_node(i); 2547 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2548 AbstractLockNode* alock = n->as_AbstractLock(); 2549 if (!alock->is_non_esc_obj()) { 2550 if (not_global_escape(alock->obj_node())) { 2551 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2552 // The lock could be marked eliminated by lock coarsening 2553 // code during first IGVN before EA. Replace coarsened flag 2554 // to eliminate all associated locks/unlocks. 2555 #ifdef ASSERT 2556 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2557 #endif 2558 alock->set_non_esc_obj(); 2559 } 2560 } 2561 } 2562 } 2563 } 2564 2565 if (OptimizePtrCompare) { 2566 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2567 Node *n = ptr_cmp_worklist.at(i); 2568 const TypeInt* tcmp = optimize_ptr_compare(n); 2569 if (tcmp->singleton()) { 2570 Node* cmp = igvn->makecon(tcmp); 2571 #ifndef PRODUCT 2572 if (PrintOptimizePtrCompare) { 2573 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2574 if (Verbose) { 2575 n->dump(1); 2576 } 2577 } 2578 #endif 2579 igvn->replace_node(n, cmp); 2580 } 2581 } 2582 } 2583 2584 // For MemBarStoreStore nodes added in library_call.cpp, check 2585 // escape status of associated AllocateNode and optimize out 2586 // MemBarStoreStore node if the allocated object never escapes. 2587 for (int i = 0; i < storestore_worklist.length(); i++) { 2588 Node* storestore = storestore_worklist.at(i); 2589 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2590 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2591 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2592 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2593 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2594 igvn->register_new_node_with_optimizer(mb); 2595 igvn->replace_node(storestore, mb); 2596 } 2597 } 2598 } 2599 2600 // Optimize objects compare. 2601 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2602 assert(OptimizePtrCompare, "sanity"); 2603 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2604 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2605 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2606 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2607 2608 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2609 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2610 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2611 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2612 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2613 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2614 2615 // Check simple cases first. 2616 if (jobj1 != nullptr) { 2617 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2618 if (jobj1 == jobj2) { 2619 // Comparing the same not escaping object. 2620 return EQ; 2621 } 2622 Node* obj = jobj1->ideal_node(); 2623 // Comparing not escaping allocation. 2624 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2625 !ptn2->points_to(jobj1)) { 2626 return NE; // This includes nullness check. 2627 } 2628 } 2629 } 2630 if (jobj2 != nullptr) { 2631 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2632 Node* obj = jobj2->ideal_node(); 2633 // Comparing not escaping allocation. 2634 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2635 !ptn1->points_to(jobj2)) { 2636 return NE; // This includes nullness check. 2637 } 2638 } 2639 } 2640 if (jobj1 != nullptr && jobj1 != phantom_obj && 2641 jobj2 != nullptr && jobj2 != phantom_obj && 2642 jobj1->ideal_node()->is_Con() && 2643 jobj2->ideal_node()->is_Con()) { 2644 // Klass or String constants compare. Need to be careful with 2645 // compressed pointers - compare types of ConN and ConP instead of nodes. 2646 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2647 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2648 if (t1->make_ptr() == t2->make_ptr()) { 2649 return EQ; 2650 } else { 2651 return NE; 2652 } 2653 } 2654 if (ptn1->meet(ptn2)) { 2655 return UNKNOWN; // Sets are not disjoint 2656 } 2657 2658 // Sets are disjoint. 2659 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2660 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2661 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2662 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2663 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2664 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2665 // Check nullness of unknown object. 2666 return UNKNOWN; 2667 } 2668 2669 // Disjointness by itself is not sufficient since 2670 // alias analysis is not complete for escaped objects. 2671 // Disjoint sets are definitely unrelated only when 2672 // at least one set has only not escaping allocations. 2673 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2674 if (ptn1->non_escaping_allocation()) { 2675 return NE; 2676 } 2677 } 2678 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2679 if (ptn2->non_escaping_allocation()) { 2680 return NE; 2681 } 2682 } 2683 return UNKNOWN; 2684 } 2685 2686 // Connection Graph construction functions. 2687 2688 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2689 PointsToNode* ptadr = _nodes.at(n->_idx); 2690 if (ptadr != nullptr) { 2691 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2692 return; 2693 } 2694 Compile* C = _compile; 2695 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2696 map_ideal_node(n, ptadr); 2697 } 2698 2699 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2700 PointsToNode* ptadr = _nodes.at(n->_idx); 2701 if (ptadr != nullptr) { 2702 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2703 return ptadr; 2704 } 2705 Compile* C = _compile; 2706 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2707 map_ideal_node(n, ptadr); 2708 return ptadr; 2709 } 2710 2711 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2712 PointsToNode* ptadr = _nodes.at(n->_idx); 2713 if (ptadr != nullptr) { 2714 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2715 return; 2716 } 2717 bool unsafe = false; 2718 bool is_oop = is_oop_field(n, offset, &unsafe); 2719 if (unsafe) { 2720 es = PointsToNode::GlobalEscape; 2721 } 2722 Compile* C = _compile; 2723 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2724 map_ideal_node(n, field); 2725 } 2726 2727 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2728 PointsToNode* src, PointsToNode* dst) { 2729 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2730 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 2731 PointsToNode* ptadr = _nodes.at(n->_idx); 2732 if (ptadr != nullptr) { 2733 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2734 return; 2735 } 2736 Compile* C = _compile; 2737 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2738 map_ideal_node(n, ptadr); 2739 // Add edge from arraycopy node to source object. 2740 (void)add_edge(ptadr, src); 2741 src->set_arraycopy_src(); 2742 // Add edge from destination object to arraycopy node. 2743 (void)add_edge(dst, ptadr); 2744 dst->set_arraycopy_dst(); 2745 } 2746 2747 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2748 const Type* adr_type = n->as_AddP()->bottom_type(); 2749 BasicType bt = T_INT; 2750 if (offset == Type::OffsetBot) { 2751 // Check only oop fields. 2752 if (!adr_type->isa_aryptr() || 2753 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2754 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 2755 // OffsetBot is used to reference array's element. Ignore first AddP. 2756 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 2757 bt = T_OBJECT; 2758 } 2759 } 2760 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2761 if (adr_type->isa_instptr()) { 2762 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2763 if (field != nullptr) { 2764 bt = field->layout_type(); 2765 } else { 2766 // Check for unsafe oop field access 2767 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2768 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2769 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2770 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2771 bt = T_OBJECT; 2772 (*unsafe) = true; 2773 } 2774 } 2775 } else if (adr_type->isa_aryptr()) { 2776 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2777 // Ignore array length load. 2778 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 2779 // Ignore first AddP. 2780 } else { 2781 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2782 bt = elemtype->array_element_basic_type(); 2783 } 2784 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2785 // Allocation initialization, ThreadLocal field access, unsafe access 2786 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2787 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2788 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2789 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2790 bt = T_OBJECT; 2791 } 2792 } 2793 } 2794 // Note: T_NARROWOOP is not classed as a real reference type 2795 return (is_reference_type(bt) || bt == T_NARROWOOP); 2796 } 2797 2798 // Returns unique pointed java object or null. 2799 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 2800 // If the node was created after the escape computation we can't answer. 2801 uint idx = n->_idx; 2802 if (idx >= nodes_size()) { 2803 return nullptr; 2804 } 2805 PointsToNode* ptn = ptnode_adr(idx); 2806 if (ptn == nullptr) { 2807 return nullptr; 2808 } 2809 if (ptn->is_JavaObject()) { 2810 return ptn->as_JavaObject(); 2811 } 2812 assert(ptn->is_LocalVar(), "sanity"); 2813 // Check all java objects it points to. 2814 JavaObjectNode* jobj = nullptr; 2815 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2816 PointsToNode* e = i.get(); 2817 if (e->is_JavaObject()) { 2818 if (jobj == nullptr) { 2819 jobj = e->as_JavaObject(); 2820 } else if (jobj != e) { 2821 return nullptr; 2822 } 2823 } 2824 } 2825 return jobj; 2826 } 2827 2828 // Return true if this node points only to non-escaping allocations. 2829 bool PointsToNode::non_escaping_allocation() { 2830 if (is_JavaObject()) { 2831 Node* n = ideal_node(); 2832 if (n->is_Allocate() || n->is_CallStaticJava()) { 2833 return (escape_state() == PointsToNode::NoEscape); 2834 } else { 2835 return false; 2836 } 2837 } 2838 assert(is_LocalVar(), "sanity"); 2839 // Check all java objects it points to. 2840 for (EdgeIterator i(this); i.has_next(); i.next()) { 2841 PointsToNode* e = i.get(); 2842 if (e->is_JavaObject()) { 2843 Node* n = e->ideal_node(); 2844 if ((e->escape_state() != PointsToNode::NoEscape) || 2845 !(n->is_Allocate() || n->is_CallStaticJava())) { 2846 return false; 2847 } 2848 } 2849 } 2850 return true; 2851 } 2852 2853 // Return true if we know the node does not escape globally. 2854 bool ConnectionGraph::not_global_escape(Node *n) { 2855 assert(!_collecting, "should not call during graph construction"); 2856 // If the node was created after the escape computation we can't answer. 2857 uint idx = n->_idx; 2858 if (idx >= nodes_size()) { 2859 return false; 2860 } 2861 PointsToNode* ptn = ptnode_adr(idx); 2862 if (ptn == nullptr) { 2863 return false; // not in congraph (e.g. ConI) 2864 } 2865 PointsToNode::EscapeState es = ptn->escape_state(); 2866 // If we have already computed a value, return it. 2867 if (es >= PointsToNode::GlobalEscape) { 2868 return false; 2869 } 2870 if (ptn->is_JavaObject()) { 2871 return true; // (es < PointsToNode::GlobalEscape); 2872 } 2873 assert(ptn->is_LocalVar(), "sanity"); 2874 // Check all java objects it points to. 2875 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2876 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2877 return false; 2878 } 2879 } 2880 return true; 2881 } 2882 2883 2884 // Helper functions 2885 2886 // Return true if this node points to specified node or nodes it points to. 2887 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2888 if (is_JavaObject()) { 2889 return (this == ptn); 2890 } 2891 assert(is_LocalVar() || is_Field(), "sanity"); 2892 for (EdgeIterator i(this); i.has_next(); i.next()) { 2893 if (i.get() == ptn) { 2894 return true; 2895 } 2896 } 2897 return false; 2898 } 2899 2900 // Return true if one node points to an other. 2901 bool PointsToNode::meet(PointsToNode* ptn) { 2902 if (this == ptn) { 2903 return true; 2904 } else if (ptn->is_JavaObject()) { 2905 return this->points_to(ptn->as_JavaObject()); 2906 } else if (this->is_JavaObject()) { 2907 return ptn->points_to(this->as_JavaObject()); 2908 } 2909 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2910 int ptn_count = ptn->edge_count(); 2911 for (EdgeIterator i(this); i.has_next(); i.next()) { 2912 PointsToNode* this_e = i.get(); 2913 for (int j = 0; j < ptn_count; j++) { 2914 if (this_e == ptn->edge(j)) { 2915 return true; 2916 } 2917 } 2918 } 2919 return false; 2920 } 2921 2922 #ifdef ASSERT 2923 // Return true if bases point to this java object. 2924 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2925 for (BaseIterator i(this); i.has_next(); i.next()) { 2926 if (i.get() == jobj) { 2927 return true; 2928 } 2929 } 2930 return false; 2931 } 2932 #endif 2933 2934 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2935 // Handle simple case first. 2936 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 2937 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2938 return true; 2939 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2940 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2941 Node* addp_use = addp->fast_out(i); 2942 if (addp_use->is_Store()) { 2943 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2944 if (addp_use->fast_out(j)->is_Initialize()) { 2945 return true; 2946 } 2947 } 2948 } 2949 } 2950 } 2951 return false; 2952 } 2953 2954 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 2955 const Type *adr_type = phase->type(adr); 2956 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 2957 // We are computing a raw address for a store captured by an Initialize 2958 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2959 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2960 assert(offs != Type::OffsetBot || 2961 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2962 "offset must be a constant or it is initialization of array"); 2963 return offs; 2964 } 2965 const TypePtr *t_ptr = adr_type->isa_ptr(); 2966 assert(t_ptr != nullptr, "must be a pointer type"); 2967 return t_ptr->offset(); 2968 } 2969 2970 Node* ConnectionGraph::get_addp_base(Node *addp) { 2971 assert(addp->is_AddP(), "must be AddP"); 2972 // 2973 // AddP cases for Base and Address inputs: 2974 // case #1. Direct object's field reference: 2975 // Allocate 2976 // | 2977 // Proj #5 ( oop result ) 2978 // | 2979 // CheckCastPP (cast to instance type) 2980 // | | 2981 // AddP ( base == address ) 2982 // 2983 // case #2. Indirect object's field reference: 2984 // Phi 2985 // | 2986 // CastPP (cast to instance type) 2987 // | | 2988 // AddP ( base == address ) 2989 // 2990 // case #3. Raw object's field reference for Initialize node: 2991 // Allocate 2992 // | 2993 // Proj #5 ( oop result ) 2994 // top | 2995 // \ | 2996 // AddP ( base == top ) 2997 // 2998 // case #4. Array's element reference: 2999 // {CheckCastPP | CastPP} 3000 // | | | 3001 // | AddP ( array's element offset ) 3002 // | | 3003 // AddP ( array's offset ) 3004 // 3005 // case #5. Raw object's field reference for arraycopy stub call: 3006 // The inline_native_clone() case when the arraycopy stub is called 3007 // after the allocation before Initialize and CheckCastPP nodes. 3008 // Allocate 3009 // | 3010 // Proj #5 ( oop result ) 3011 // | | 3012 // AddP ( base == address ) 3013 // 3014 // case #6. Constant Pool, ThreadLocal, CastX2P or 3015 // Raw object's field reference: 3016 // {ConP, ThreadLocal, CastX2P, raw Load} 3017 // top | 3018 // \ | 3019 // AddP ( base == top ) 3020 // 3021 // case #7. Klass's field reference. 3022 // LoadKlass 3023 // | | 3024 // AddP ( base == address ) 3025 // 3026 // case #8. narrow Klass's field reference. 3027 // LoadNKlass 3028 // | 3029 // DecodeN 3030 // | | 3031 // AddP ( base == address ) 3032 // 3033 // case #9. Mixed unsafe access 3034 // {instance} 3035 // | 3036 // CheckCastPP (raw) 3037 // top | 3038 // \ | 3039 // AddP ( base == top ) 3040 // 3041 Node *base = addp->in(AddPNode::Base); 3042 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3043 base = addp->in(AddPNode::Address); 3044 while (base->is_AddP()) { 3045 // Case #6 (unsafe access) may have several chained AddP nodes. 3046 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3047 base = base->in(AddPNode::Address); 3048 } 3049 if (base->Opcode() == Op_CheckCastPP && 3050 base->bottom_type()->isa_rawptr() && 3051 _igvn->type(base->in(1))->isa_oopptr()) { 3052 base = base->in(1); // Case #9 3053 } else { 3054 Node* uncast_base = base->uncast(); 3055 int opcode = uncast_base->Opcode(); 3056 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3057 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3058 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3059 is_captured_store_address(addp), "sanity"); 3060 } 3061 } 3062 return base; 3063 } 3064 3065 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3066 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3067 Node* addp2 = addp->raw_out(0); 3068 if (addp->outcnt() == 1 && addp2->is_AddP() && 3069 addp2->in(AddPNode::Base) == n && 3070 addp2->in(AddPNode::Address) == addp) { 3071 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3072 // 3073 // Find array's offset to push it on worklist first and 3074 // as result process an array's element offset first (pushed second) 3075 // to avoid CastPP for the array's offset. 3076 // Otherwise the inserted CastPP (LocalVar) will point to what 3077 // the AddP (Field) points to. Which would be wrong since 3078 // the algorithm expects the CastPP has the same point as 3079 // as AddP's base CheckCastPP (LocalVar). 3080 // 3081 // ArrayAllocation 3082 // | 3083 // CheckCastPP 3084 // | 3085 // memProj (from ArrayAllocation CheckCastPP) 3086 // | || 3087 // | || Int (element index) 3088 // | || | ConI (log(element size)) 3089 // | || | / 3090 // | || LShift 3091 // | || / 3092 // | AddP (array's element offset) 3093 // | | 3094 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3095 // | / / 3096 // AddP (array's offset) 3097 // | 3098 // Load/Store (memory operation on array's element) 3099 // 3100 return addp2; 3101 } 3102 return nullptr; 3103 } 3104 3105 // 3106 // Adjust the type and inputs of an AddP which computes the 3107 // address of a field of an instance 3108 // 3109 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3110 PhaseGVN* igvn = _igvn; 3111 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3112 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3113 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3114 if (t == nullptr) { 3115 // We are computing a raw address for a store captured by an Initialize 3116 // compute an appropriate address type (cases #3 and #5). 3117 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3118 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3119 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3120 assert(offs != Type::OffsetBot, "offset must be a constant"); 3121 t = base_t->add_offset(offs)->is_oopptr(); 3122 } 3123 int inst_id = base_t->instance_id(); 3124 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3125 "old type must be non-instance or match new type"); 3126 3127 // The type 't' could be subclass of 'base_t'. 3128 // As result t->offset() could be large then base_t's size and it will 3129 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3130 // constructor verifies correctness of the offset. 3131 // 3132 // It could happened on subclass's branch (from the type profiling 3133 // inlining) which was not eliminated during parsing since the exactness 3134 // of the allocation type was not propagated to the subclass type check. 3135 // 3136 // Or the type 't' could be not related to 'base_t' at all. 3137 // It could happened when CHA type is different from MDO type on a dead path 3138 // (for example, from instanceof check) which is not collapsed during parsing. 3139 // 3140 // Do nothing for such AddP node and don't process its users since 3141 // this code branch will go away. 3142 // 3143 if (!t->is_known_instance() && 3144 !base_t->maybe_java_subtype_of(t)) { 3145 return false; // bail out 3146 } 3147 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 3148 // Do NOT remove the next line: ensure a new alias index is allocated 3149 // for the instance type. Note: C++ will not remove it since the call 3150 // has side effect. 3151 int alias_idx = _compile->get_alias_index(tinst); 3152 igvn->set_type(addp, tinst); 3153 // record the allocation in the node map 3154 set_map(addp, get_map(base->_idx)); 3155 // Set addp's Base and Address to 'base'. 3156 Node *abase = addp->in(AddPNode::Base); 3157 Node *adr = addp->in(AddPNode::Address); 3158 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3159 adr->in(0)->_idx == (uint)inst_id) { 3160 // Skip AddP cases #3 and #5. 3161 } else { 3162 assert(!abase->is_top(), "sanity"); // AddP case #3 3163 if (abase != base) { 3164 igvn->hash_delete(addp); 3165 addp->set_req(AddPNode::Base, base); 3166 if (abase == adr) { 3167 addp->set_req(AddPNode::Address, base); 3168 } else { 3169 // AddP case #4 (adr is array's element offset AddP node) 3170 #ifdef ASSERT 3171 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3172 assert(adr->is_AddP() && atype != nullptr && 3173 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3174 #endif 3175 } 3176 igvn->hash_insert(addp); 3177 } 3178 } 3179 // Put on IGVN worklist since at least addp's type was changed above. 3180 record_for_optimizer(addp); 3181 return true; 3182 } 3183 3184 // 3185 // Create a new version of orig_phi if necessary. Returns either the newly 3186 // created phi or an existing phi. Sets create_new to indicate whether a new 3187 // phi was created. Cache the last newly created phi in the node map. 3188 // 3189 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3190 Compile *C = _compile; 3191 PhaseGVN* igvn = _igvn; 3192 new_created = false; 3193 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3194 // nothing to do if orig_phi is bottom memory or matches alias_idx 3195 if (phi_alias_idx == alias_idx) { 3196 return orig_phi; 3197 } 3198 // Have we recently created a Phi for this alias index? 3199 PhiNode *result = get_map_phi(orig_phi->_idx); 3200 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3201 return result; 3202 } 3203 // Previous check may fail when the same wide memory Phi was split into Phis 3204 // for different memory slices. Search all Phis for this region. 3205 if (result != nullptr) { 3206 Node* region = orig_phi->in(0); 3207 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3208 Node* phi = region->fast_out(i); 3209 if (phi->is_Phi() && 3210 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3211 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3212 return phi->as_Phi(); 3213 } 3214 } 3215 } 3216 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3217 if (C->do_escape_analysis() == true && !C->failing()) { 3218 // Retry compilation without escape analysis. 3219 // If this is the first failure, the sentinel string will "stick" 3220 // to the Compile object, and the C2Compiler will see it and retry. 3221 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3222 } 3223 return nullptr; 3224 } 3225 orig_phi_worklist.append_if_missing(orig_phi); 3226 const TypePtr *atype = C->get_adr_type(alias_idx); 3227 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3228 C->copy_node_notes_to(result, orig_phi); 3229 igvn->set_type(result, result->bottom_type()); 3230 record_for_optimizer(result); 3231 set_map(orig_phi, result); 3232 new_created = true; 3233 return result; 3234 } 3235 3236 // 3237 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3238 // specified alias index. 3239 // 3240 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 3241 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3242 Compile *C = _compile; 3243 PhaseGVN* igvn = _igvn; 3244 bool new_phi_created; 3245 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3246 if (!new_phi_created) { 3247 return result; 3248 } 3249 GrowableArray<PhiNode *> phi_list; 3250 GrowableArray<uint> cur_input; 3251 PhiNode *phi = orig_phi; 3252 uint idx = 1; 3253 bool finished = false; 3254 while(!finished) { 3255 while (idx < phi->req()) { 3256 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 3257 if (mem != nullptr && mem->is_Phi()) { 3258 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3259 if (new_phi_created) { 3260 // found an phi for which we created a new split, push current one on worklist and begin 3261 // processing new one 3262 phi_list.push(phi); 3263 cur_input.push(idx); 3264 phi = mem->as_Phi(); 3265 result = newphi; 3266 idx = 1; 3267 continue; 3268 } else { 3269 mem = newphi; 3270 } 3271 } 3272 if (C->failing()) { 3273 return nullptr; 3274 } 3275 result->set_req(idx++, mem); 3276 } 3277 #ifdef ASSERT 3278 // verify that the new Phi has an input for each input of the original 3279 assert( phi->req() == result->req(), "must have same number of inputs."); 3280 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3281 #endif 3282 // Check if all new phi's inputs have specified alias index. 3283 // Otherwise use old phi. 3284 for (uint i = 1; i < phi->req(); i++) { 3285 Node* in = result->in(i); 3286 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3287 } 3288 // we have finished processing a Phi, see if there are any more to do 3289 finished = (phi_list.length() == 0 ); 3290 if (!finished) { 3291 phi = phi_list.pop(); 3292 idx = cur_input.pop(); 3293 PhiNode *prev_result = get_map_phi(phi->_idx); 3294 prev_result->set_req(idx++, result); 3295 result = prev_result; 3296 } 3297 } 3298 return result; 3299 } 3300 3301 // 3302 // The next methods are derived from methods in MemNode. 3303 // 3304 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3305 Node *mem = mmem; 3306 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3307 // means an array I have not precisely typed yet. Do not do any 3308 // alias stuff with it any time soon. 3309 if (toop->base() != Type::AnyPtr && 3310 !(toop->isa_instptr() && 3311 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3312 toop->offset() == Type::OffsetBot)) { 3313 mem = mmem->memory_at(alias_idx); 3314 // Update input if it is progress over what we have now 3315 } 3316 return mem; 3317 } 3318 3319 // 3320 // Move memory users to their memory slices. 3321 // 3322 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 3323 Compile* C = _compile; 3324 PhaseGVN* igvn = _igvn; 3325 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 3326 assert(tp != nullptr, "ptr type"); 3327 int alias_idx = C->get_alias_index(tp); 3328 int general_idx = C->get_general_index(alias_idx); 3329 3330 // Move users first 3331 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3332 Node* use = n->fast_out(i); 3333 if (use->is_MergeMem()) { 3334 MergeMemNode* mmem = use->as_MergeMem(); 3335 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 3336 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 3337 continue; // Nothing to do 3338 } 3339 // Replace previous general reference to mem node. 3340 uint orig_uniq = C->unique(); 3341 Node* m = find_inst_mem(n, general_idx, orig_phis); 3342 assert(orig_uniq == C->unique(), "no new nodes"); 3343 mmem->set_memory_at(general_idx, m); 3344 --imax; 3345 --i; 3346 } else if (use->is_MemBar()) { 3347 assert(!use->is_Initialize(), "initializing stores should not be moved"); 3348 if (use->req() > MemBarNode::Precedent && 3349 use->in(MemBarNode::Precedent) == n) { 3350 // Don't move related membars. 3351 record_for_optimizer(use); 3352 continue; 3353 } 3354 tp = use->as_MemBar()->adr_type()->isa_ptr(); 3355 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 3356 alias_idx == general_idx) { 3357 continue; // Nothing to do 3358 } 3359 // Move to general memory slice. 3360 uint orig_uniq = C->unique(); 3361 Node* m = find_inst_mem(n, general_idx, orig_phis); 3362 assert(orig_uniq == C->unique(), "no new nodes"); 3363 igvn->hash_delete(use); 3364 imax -= use->replace_edge(n, m, igvn); 3365 igvn->hash_insert(use); 3366 record_for_optimizer(use); 3367 --i; 3368 #ifdef ASSERT 3369 } else if (use->is_Mem()) { 3370 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 3371 // Don't move related cardmark. 3372 continue; 3373 } 3374 // Memory nodes should have new memory input. 3375 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 3376 assert(tp != nullptr, "ptr type"); 3377 int idx = C->get_alias_index(tp); 3378 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 3379 "Following memory nodes should have new memory input or be on the same memory slice"); 3380 } else if (use->is_Phi()) { 3381 // Phi nodes should be split and moved already. 3382 tp = use->as_Phi()->adr_type()->isa_ptr(); 3383 assert(tp != nullptr, "ptr type"); 3384 int idx = C->get_alias_index(tp); 3385 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 3386 } else { 3387 use->dump(); 3388 assert(false, "should not be here"); 3389 #endif 3390 } 3391 } 3392 } 3393 3394 // 3395 // Search memory chain of "mem" to find a MemNode whose address 3396 // is the specified alias index. 3397 // 3398 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 3399 if (orig_mem == nullptr) { 3400 return orig_mem; 3401 } 3402 Compile* C = _compile; 3403 PhaseGVN* igvn = _igvn; 3404 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 3405 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 3406 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 3407 Node *prev = nullptr; 3408 Node *result = orig_mem; 3409 while (prev != result) { 3410 prev = result; 3411 if (result == start_mem) { 3412 break; // hit one of our sentinels 3413 } 3414 if (result->is_Mem()) { 3415 const Type *at = igvn->type(result->in(MemNode::Address)); 3416 if (at == Type::TOP) { 3417 break; // Dead 3418 } 3419 assert (at->isa_ptr() != nullptr, "pointer type required."); 3420 int idx = C->get_alias_index(at->is_ptr()); 3421 if (idx == alias_idx) { 3422 break; // Found 3423 } 3424 if (!is_instance && (at->isa_oopptr() == nullptr || 3425 !at->is_oopptr()->is_known_instance())) { 3426 break; // Do not skip store to general memory slice. 3427 } 3428 result = result->in(MemNode::Memory); 3429 } 3430 if (!is_instance) { 3431 continue; // don't search further for non-instance types 3432 } 3433 // skip over a call which does not affect this memory slice 3434 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 3435 Node *proj_in = result->in(0); 3436 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 3437 break; // hit one of our sentinels 3438 } else if (proj_in->is_Call()) { 3439 // ArrayCopy node processed here as well 3440 CallNode *call = proj_in->as_Call(); 3441 if (!call->may_modify(toop, igvn)) { 3442 result = call->in(TypeFunc::Memory); 3443 } 3444 } else if (proj_in->is_Initialize()) { 3445 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 3446 // Stop if this is the initialization for the object instance which 3447 // which contains this memory slice, otherwise skip over it. 3448 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 3449 result = proj_in->in(TypeFunc::Memory); 3450 } 3451 } else if (proj_in->is_MemBar()) { 3452 // Check if there is an array copy for a clone 3453 // Step over GC barrier when ReduceInitialCardMarks is disabled 3454 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3455 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3456 3457 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3458 // Stop if it is a clone 3459 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3460 if (ac->may_modify(toop, igvn)) { 3461 break; 3462 } 3463 } 3464 result = proj_in->in(TypeFunc::Memory); 3465 } 3466 } else if (result->is_MergeMem()) { 3467 MergeMemNode *mmem = result->as_MergeMem(); 3468 result = step_through_mergemem(mmem, alias_idx, toop); 3469 if (result == mmem->base_memory()) { 3470 // Didn't find instance memory, search through general slice recursively. 3471 result = mmem->memory_at(C->get_general_index(alias_idx)); 3472 result = find_inst_mem(result, alias_idx, orig_phis); 3473 if (C->failing()) { 3474 return nullptr; 3475 } 3476 mmem->set_memory_at(alias_idx, result); 3477 } 3478 } else if (result->is_Phi() && 3479 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3480 Node *un = result->as_Phi()->unique_input(igvn); 3481 if (un != nullptr) { 3482 orig_phis.append_if_missing(result->as_Phi()); 3483 result = un; 3484 } else { 3485 break; 3486 } 3487 } else if (result->is_ClearArray()) { 3488 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3489 // Can not bypass initialization of the instance 3490 // we are looking for. 3491 break; 3492 } 3493 // Otherwise skip it (the call updated 'result' value). 3494 } else if (result->Opcode() == Op_SCMemProj) { 3495 Node* mem = result->in(0); 3496 Node* adr = nullptr; 3497 if (mem->is_LoadStore()) { 3498 adr = mem->in(MemNode::Address); 3499 } else { 3500 assert(mem->Opcode() == Op_EncodeISOArray || 3501 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3502 adr = mem->in(3); // Memory edge corresponds to destination array 3503 } 3504 const Type *at = igvn->type(adr); 3505 if (at != Type::TOP) { 3506 assert(at->isa_ptr() != nullptr, "pointer type required."); 3507 int idx = C->get_alias_index(at->is_ptr()); 3508 if (idx == alias_idx) { 3509 // Assert in debug mode 3510 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3511 break; // In product mode return SCMemProj node 3512 } 3513 } 3514 result = mem->in(MemNode::Memory); 3515 } else if (result->Opcode() == Op_StrInflatedCopy) { 3516 Node* adr = result->in(3); // Memory edge corresponds to destination array 3517 const Type *at = igvn->type(adr); 3518 if (at != Type::TOP) { 3519 assert(at->isa_ptr() != nullptr, "pointer type required."); 3520 int idx = C->get_alias_index(at->is_ptr()); 3521 if (idx == alias_idx) { 3522 // Assert in debug mode 3523 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3524 break; // In product mode return SCMemProj node 3525 } 3526 } 3527 result = result->in(MemNode::Memory); 3528 } 3529 } 3530 if (result->is_Phi()) { 3531 PhiNode *mphi = result->as_Phi(); 3532 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3533 const TypePtr *t = mphi->adr_type(); 3534 if (!is_instance) { 3535 // Push all non-instance Phis on the orig_phis worklist to update inputs 3536 // during Phase 4 if needed. 3537 orig_phis.append_if_missing(mphi); 3538 } else if (C->get_alias_index(t) != alias_idx) { 3539 // Create a new Phi with the specified alias index type. 3540 result = split_memory_phi(mphi, alias_idx, orig_phis); 3541 } 3542 } 3543 // the result is either MemNode, PhiNode, InitializeNode. 3544 return result; 3545 } 3546 3547 // 3548 // Convert the types of non-escaped object to instance types where possible, 3549 // propagate the new type information through the graph, and update memory 3550 // edges and MergeMem inputs to reflect the new type. 3551 // 3552 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3553 // The processing is done in 4 phases: 3554 // 3555 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3556 // types for the CheckCastPP for allocations where possible. 3557 // Propagate the new types through users as follows: 3558 // casts and Phi: push users on alloc_worklist 3559 // AddP: cast Base and Address inputs to the instance type 3560 // push any AddP users on alloc_worklist and push any memnode 3561 // users onto memnode_worklist. 3562 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3563 // search the Memory chain for a store with the appropriate type 3564 // address type. If a Phi is found, create a new version with 3565 // the appropriate memory slices from each of the Phi inputs. 3566 // For stores, process the users as follows: 3567 // MemNode: push on memnode_worklist 3568 // MergeMem: push on mergemem_worklist 3569 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3570 // moving the first node encountered of each instance type to the 3571 // the input corresponding to its alias index. 3572 // appropriate memory slice. 3573 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3574 // 3575 // In the following example, the CheckCastPP nodes are the cast of allocation 3576 // results and the allocation of node 29 is non-escaped and eligible to be an 3577 // instance type. 3578 // 3579 // We start with: 3580 // 3581 // 7 Parm #memory 3582 // 10 ConI "12" 3583 // 19 CheckCastPP "Foo" 3584 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3585 // 29 CheckCastPP "Foo" 3586 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3587 // 3588 // 40 StoreP 25 7 20 ... alias_index=4 3589 // 50 StoreP 35 40 30 ... alias_index=4 3590 // 60 StoreP 45 50 20 ... alias_index=4 3591 // 70 LoadP _ 60 30 ... alias_index=4 3592 // 80 Phi 75 50 60 Memory alias_index=4 3593 // 90 LoadP _ 80 30 ... alias_index=4 3594 // 100 LoadP _ 80 20 ... alias_index=4 3595 // 3596 // 3597 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3598 // and creating a new alias index for node 30. This gives: 3599 // 3600 // 7 Parm #memory 3601 // 10 ConI "12" 3602 // 19 CheckCastPP "Foo" 3603 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3604 // 29 CheckCastPP "Foo" iid=24 3605 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3606 // 3607 // 40 StoreP 25 7 20 ... alias_index=4 3608 // 50 StoreP 35 40 30 ... alias_index=6 3609 // 60 StoreP 45 50 20 ... alias_index=4 3610 // 70 LoadP _ 60 30 ... alias_index=6 3611 // 80 Phi 75 50 60 Memory alias_index=4 3612 // 90 LoadP _ 80 30 ... alias_index=6 3613 // 100 LoadP _ 80 20 ... alias_index=4 3614 // 3615 // In phase 2, new memory inputs are computed for the loads and stores, 3616 // And a new version of the phi is created. In phase 4, the inputs to 3617 // node 80 are updated and then the memory nodes are updated with the 3618 // values computed in phase 2. This results in: 3619 // 3620 // 7 Parm #memory 3621 // 10 ConI "12" 3622 // 19 CheckCastPP "Foo" 3623 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3624 // 29 CheckCastPP "Foo" iid=24 3625 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3626 // 3627 // 40 StoreP 25 7 20 ... alias_index=4 3628 // 50 StoreP 35 7 30 ... alias_index=6 3629 // 60 StoreP 45 40 20 ... alias_index=4 3630 // 70 LoadP _ 50 30 ... alias_index=6 3631 // 80 Phi 75 40 60 Memory alias_index=4 3632 // 120 Phi 75 50 50 Memory alias_index=6 3633 // 90 LoadP _ 120 30 ... alias_index=6 3634 // 100 LoadP _ 80 20 ... alias_index=4 3635 // 3636 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3637 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3638 GrowableArray<MergeMemNode*> &mergemem_worklist, 3639 Unique_Node_List &reducible_merges) { 3640 DEBUG_ONLY(Unique_Node_List reduced_merges;) 3641 GrowableArray<Node *> memnode_worklist; 3642 GrowableArray<PhiNode *> orig_phis; 3643 PhaseIterGVN *igvn = _igvn; 3644 uint new_index_start = (uint) _compile->num_alias_types(); 3645 VectorSet visited; 3646 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3647 uint unique_old = _compile->unique(); 3648 3649 // Phase 1: Process possible allocations from alloc_worklist. 3650 // Create instance types for the CheckCastPP for allocations where possible. 3651 // 3652 // (Note: don't forget to change the order of the second AddP node on 3653 // the alloc_worklist if the order of the worklist processing is changed, 3654 // see the comment in find_second_addp().) 3655 // 3656 while (alloc_worklist.length() != 0) { 3657 Node *n = alloc_worklist.pop(); 3658 uint ni = n->_idx; 3659 if (n->is_Call()) { 3660 CallNode *alloc = n->as_Call(); 3661 // copy escape information to call node 3662 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3663 PointsToNode::EscapeState es = ptn->escape_state(); 3664 // We have an allocation or call which returns a Java object, 3665 // see if it is non-escaped. 3666 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3667 continue; 3668 } 3669 // Find CheckCastPP for the allocate or for the return value of a call 3670 n = alloc->result_cast(); 3671 if (n == nullptr) { // No uses except Initialize node 3672 if (alloc->is_Allocate()) { 3673 // Set the scalar_replaceable flag for allocation 3674 // so it could be eliminated if it has no uses. 3675 alloc->as_Allocate()->_is_scalar_replaceable = true; 3676 } 3677 continue; 3678 } 3679 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3680 // we could reach here for allocate case if one init is associated with many allocs. 3681 if (alloc->is_Allocate()) { 3682 alloc->as_Allocate()->_is_scalar_replaceable = false; 3683 } 3684 continue; 3685 } 3686 3687 // The inline code for Object.clone() casts the allocation result to 3688 // java.lang.Object and then to the actual type of the allocated 3689 // object. Detect this case and use the second cast. 3690 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3691 // the allocation result is cast to java.lang.Object and then 3692 // to the actual Array type. 3693 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3694 && (alloc->is_AllocateArray() || 3695 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3696 Node *cast2 = nullptr; 3697 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3698 Node *use = n->fast_out(i); 3699 if (use->is_CheckCastPP()) { 3700 cast2 = use; 3701 break; 3702 } 3703 } 3704 if (cast2 != nullptr) { 3705 n = cast2; 3706 } else { 3707 // Non-scalar replaceable if the allocation type is unknown statically 3708 // (reflection allocation), the object can't be restored during 3709 // deoptimization without precise type. 3710 continue; 3711 } 3712 } 3713 3714 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3715 if (t == nullptr) { 3716 continue; // not a TypeOopPtr 3717 } 3718 if (!t->klass_is_exact()) { 3719 continue; // not an unique type 3720 } 3721 if (alloc->is_Allocate()) { 3722 // Set the scalar_replaceable flag for allocation 3723 // so it could be eliminated. 3724 alloc->as_Allocate()->_is_scalar_replaceable = true; 3725 } 3726 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3727 // in order for an object to be scalar-replaceable, it must be: 3728 // - a direct allocation (not a call returning an object) 3729 // - non-escaping 3730 // - eligible to be a unique type 3731 // - not determined to be ineligible by escape analysis 3732 set_map(alloc, n); 3733 set_map(n, alloc); 3734 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3735 igvn->hash_delete(n); 3736 igvn->set_type(n, tinst); 3737 n->raise_bottom_type(tinst); 3738 igvn->hash_insert(n); 3739 record_for_optimizer(n); 3740 // Allocate an alias index for the header fields. Accesses to 3741 // the header emitted during macro expansion wouldn't have 3742 // correct memory state otherwise. 3743 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3744 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3745 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3746 3747 // First, put on the worklist all Field edges from Connection Graph 3748 // which is more accurate than putting immediate users from Ideal Graph. 3749 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3750 PointsToNode* tgt = e.get(); 3751 if (tgt->is_Arraycopy()) { 3752 continue; 3753 } 3754 Node* use = tgt->ideal_node(); 3755 assert(tgt->is_Field() && use->is_AddP(), 3756 "only AddP nodes are Field edges in CG"); 3757 if (use->outcnt() > 0) { // Don't process dead nodes 3758 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3759 if (addp2 != nullptr) { 3760 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3761 alloc_worklist.append_if_missing(addp2); 3762 } 3763 alloc_worklist.append_if_missing(use); 3764 } 3765 } 3766 3767 // An allocation may have an Initialize which has raw stores. Scan 3768 // the users of the raw allocation result and push AddP users 3769 // on alloc_worklist. 3770 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3771 assert (raw_result != nullptr, "must have an allocation result"); 3772 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3773 Node *use = raw_result->fast_out(i); 3774 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3775 Node* addp2 = find_second_addp(use, raw_result); 3776 if (addp2 != nullptr) { 3777 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3778 alloc_worklist.append_if_missing(addp2); 3779 } 3780 alloc_worklist.append_if_missing(use); 3781 } else if (use->is_MemBar()) { 3782 memnode_worklist.append_if_missing(use); 3783 } 3784 } 3785 } 3786 } else if (n->is_AddP()) { 3787 Node* addp_base = get_addp_base(n); 3788 if (addp_base != nullptr && reducible_merges.member(addp_base)) { 3789 // This AddP will go away when we reduce the the Phi 3790 continue; 3791 } 3792 JavaObjectNode* jobj = unique_java_object(addp_base); 3793 if (jobj == nullptr || jobj == phantom_obj) { 3794 #ifdef ASSERT 3795 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3796 ptnode_adr(n->_idx)->dump(); 3797 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3798 #endif 3799 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3800 return; 3801 } 3802 Node *base = get_map(jobj->idx()); // CheckCastPP node 3803 if (!split_AddP(n, base)) continue; // wrong type from dead path 3804 } else if (n->is_Phi() || 3805 n->is_CheckCastPP() || 3806 n->is_EncodeP() || 3807 n->is_DecodeN() || 3808 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3809 if (visited.test_set(n->_idx)) { 3810 assert(n->is_Phi(), "loops only through Phi's"); 3811 continue; // already processed 3812 } 3813 // Reducible Phi's will be removed from the graph after split_unique_types finishes 3814 if (reducible_merges.member(n)) { 3815 // Split loads through phi 3816 reduce_phi_on_field_access(n->as_Phi(), alloc_worklist); 3817 #ifdef ASSERT 3818 if (VerifyReduceAllocationMerges) { 3819 reduced_merges.push(n); 3820 } 3821 #endif 3822 continue; 3823 } 3824 JavaObjectNode* jobj = unique_java_object(n); 3825 if (jobj == nullptr || jobj == phantom_obj) { 3826 #ifdef ASSERT 3827 ptnode_adr(n->_idx)->dump(); 3828 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3829 #endif 3830 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3831 return; 3832 } else { 3833 Node *val = get_map(jobj->idx()); // CheckCastPP node 3834 TypeNode *tn = n->as_Type(); 3835 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3836 assert(tinst != nullptr && tinst->is_known_instance() && 3837 tinst->instance_id() == jobj->idx() , "instance type expected."); 3838 3839 const Type *tn_type = igvn->type(tn); 3840 const TypeOopPtr *tn_t; 3841 if (tn_type->isa_narrowoop()) { 3842 tn_t = tn_type->make_ptr()->isa_oopptr(); 3843 } else { 3844 tn_t = tn_type->isa_oopptr(); 3845 } 3846 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 3847 if (tn_type->isa_narrowoop()) { 3848 tn_type = tinst->make_narrowoop(); 3849 } else { 3850 tn_type = tinst; 3851 } 3852 igvn->hash_delete(tn); 3853 igvn->set_type(tn, tn_type); 3854 tn->set_type(tn_type); 3855 igvn->hash_insert(tn); 3856 record_for_optimizer(n); 3857 } else { 3858 assert(tn_type == TypePtr::NULL_PTR || 3859 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t), 3860 "unexpected type"); 3861 continue; // Skip dead path with different type 3862 } 3863 } 3864 } else { 3865 debug_only(n->dump();) 3866 assert(false, "EA: unexpected node"); 3867 continue; 3868 } 3869 // push allocation's users on appropriate worklist 3870 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3871 Node *use = n->fast_out(i); 3872 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3873 // Load/store to instance's field 3874 memnode_worklist.append_if_missing(use); 3875 } else if (use->is_MemBar()) { 3876 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3877 memnode_worklist.append_if_missing(use); 3878 } 3879 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3880 Node* addp2 = find_second_addp(use, n); 3881 if (addp2 != nullptr) { 3882 alloc_worklist.append_if_missing(addp2); 3883 } 3884 alloc_worklist.append_if_missing(use); 3885 } else if (use->is_Phi() || 3886 use->is_CheckCastPP() || 3887 use->is_EncodeNarrowPtr() || 3888 use->is_DecodeNarrowPtr() || 3889 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3890 alloc_worklist.append_if_missing(use); 3891 #ifdef ASSERT 3892 } else if (use->is_Mem()) { 3893 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3894 } else if (use->is_MergeMem()) { 3895 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3896 } else if (use->is_SafePoint()) { 3897 // Look for MergeMem nodes for calls which reference unique allocation 3898 // (through CheckCastPP nodes) even for debug info. 3899 Node* m = use->in(TypeFunc::Memory); 3900 if (m->is_MergeMem()) { 3901 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3902 } 3903 } else if (use->Opcode() == Op_EncodeISOArray) { 3904 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3905 // EncodeISOArray overwrites destination array 3906 memnode_worklist.append_if_missing(use); 3907 } 3908 } else { 3909 uint op = use->Opcode(); 3910 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3911 (use->in(MemNode::Memory) == n)) { 3912 // They overwrite memory edge corresponding to destination array, 3913 memnode_worklist.append_if_missing(use); 3914 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3915 op == Op_CastP2X || op == Op_StoreCM || 3916 op == Op_FastLock || op == Op_AryEq || 3917 op == Op_StrComp || op == Op_CountPositives || 3918 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3919 op == Op_StrEquals || op == Op_VectorizedHashCode || 3920 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3921 op == Op_SubTypeCheck || 3922 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3923 n->dump(); 3924 use->dump(); 3925 assert(false, "EA: missing allocation reference path"); 3926 } 3927 #endif 3928 } 3929 } 3930 3931 } 3932 3933 #ifdef ASSERT 3934 if (VerifyReduceAllocationMerges) { 3935 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints. 3936 for (uint i = 0; i < reducible_merges.size(); i++) { 3937 Node* phi = reducible_merges.at(i); 3938 3939 if (!reduced_merges.member(phi)) { 3940 phi->dump(2); 3941 phi->dump(-2); 3942 assert(false, "This reducible merge wasn't reduced."); 3943 } 3944 3945 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 3946 Node* use = phi->fast_out(j); 3947 if (!use->is_SafePoint()) { 3948 phi->dump(2); 3949 phi->dump(-2); 3950 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 3951 } 3952 } 3953 } 3954 } 3955 #endif 3956 3957 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3958 // type, record it in the ArrayCopy node so we know what memory this 3959 // node uses/modified. 3960 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3961 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3962 Node* dest = ac->in(ArrayCopyNode::Dest); 3963 if (dest->is_AddP()) { 3964 dest = get_addp_base(dest); 3965 } 3966 JavaObjectNode* jobj = unique_java_object(dest); 3967 if (jobj != nullptr) { 3968 Node *base = get_map(jobj->idx()); 3969 if (base != nullptr) { 3970 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3971 ac->_dest_type = base_t; 3972 } 3973 } 3974 Node* src = ac->in(ArrayCopyNode::Src); 3975 if (src->is_AddP()) { 3976 src = get_addp_base(src); 3977 } 3978 jobj = unique_java_object(src); 3979 if (jobj != nullptr) { 3980 Node* base = get_map(jobj->idx()); 3981 if (base != nullptr) { 3982 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3983 ac->_src_type = base_t; 3984 } 3985 } 3986 } 3987 3988 // New alias types were created in split_AddP(). 3989 uint new_index_end = (uint) _compile->num_alias_types(); 3990 3991 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3992 // compute new values for Memory inputs (the Memory inputs are not 3993 // actually updated until phase 4.) 3994 if (memnode_worklist.length() == 0) 3995 return; // nothing to do 3996 while (memnode_worklist.length() != 0) { 3997 Node *n = memnode_worklist.pop(); 3998 if (visited.test_set(n->_idx)) { 3999 continue; 4000 } 4001 if (n->is_Phi() || n->is_ClearArray()) { 4002 // we don't need to do anything, but the users must be pushed 4003 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4004 // we don't need to do anything, but the users must be pushed 4005 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4006 if (n == nullptr) { 4007 continue; 4008 } 4009 } else if (n->Opcode() == Op_StrCompressedCopy || 4010 n->Opcode() == Op_EncodeISOArray) { 4011 // get the memory projection 4012 n = n->find_out_with(Op_SCMemProj); 4013 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4014 } else { 4015 assert(n->is_Mem(), "memory node required."); 4016 Node *addr = n->in(MemNode::Address); 4017 const Type *addr_t = igvn->type(addr); 4018 if (addr_t == Type::TOP) { 4019 continue; 4020 } 4021 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4022 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4023 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4024 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4025 if (_compile->failing()) { 4026 return; 4027 } 4028 if (mem != n->in(MemNode::Memory)) { 4029 // We delay the memory edge update since we need old one in 4030 // MergeMem code below when instances memory slices are separated. 4031 set_map(n, mem); 4032 } 4033 if (n->is_Load()) { 4034 continue; // don't push users 4035 } else if (n->is_LoadStore()) { 4036 // get the memory projection 4037 n = n->find_out_with(Op_SCMemProj); 4038 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4039 } 4040 } 4041 // push user on appropriate worklist 4042 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4043 Node *use = n->fast_out(i); 4044 if (use->is_Phi() || use->is_ClearArray()) { 4045 memnode_worklist.append_if_missing(use); 4046 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4047 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 4048 continue; 4049 } 4050 memnode_worklist.append_if_missing(use); 4051 } else if (use->is_MemBar()) { 4052 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4053 memnode_worklist.append_if_missing(use); 4054 } 4055 #ifdef ASSERT 4056 } else if(use->is_Mem()) { 4057 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4058 } else if (use->is_MergeMem()) { 4059 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4060 } else if (use->Opcode() == Op_EncodeISOArray) { 4061 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4062 // EncodeISOArray overwrites destination array 4063 memnode_worklist.append_if_missing(use); 4064 } 4065 } else { 4066 uint op = use->Opcode(); 4067 if ((use->in(MemNode::Memory) == n) && 4068 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4069 // They overwrite memory edge corresponding to destination array, 4070 memnode_worklist.append_if_missing(use); 4071 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4072 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4073 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4074 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 4075 n->dump(); 4076 use->dump(); 4077 assert(false, "EA: missing memory path"); 4078 } 4079 #endif 4080 } 4081 } 4082 } 4083 4084 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4085 // Walk each memory slice moving the first node encountered of each 4086 // instance type to the input corresponding to its alias index. 4087 uint length = mergemem_worklist.length(); 4088 for( uint next = 0; next < length; ++next ) { 4089 MergeMemNode* nmm = mergemem_worklist.at(next); 4090 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4091 // Note: we don't want to use MergeMemStream here because we only want to 4092 // scan inputs which exist at the start, not ones we add during processing. 4093 // Note 2: MergeMem may already contains instance memory slices added 4094 // during find_inst_mem() call when memory nodes were processed above. 4095 igvn->hash_delete(nmm); 4096 uint nslices = MIN2(nmm->req(), new_index_start); 4097 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4098 Node* mem = nmm->in(i); 4099 Node* cur = nullptr; 4100 if (mem == nullptr || mem->is_top()) { 4101 continue; 4102 } 4103 // First, update mergemem by moving memory nodes to corresponding slices 4104 // if their type became more precise since this mergemem was created. 4105 while (mem->is_Mem()) { 4106 const Type *at = igvn->type(mem->in(MemNode::Address)); 4107 if (at != Type::TOP) { 4108 assert (at->isa_ptr() != nullptr, "pointer type required."); 4109 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4110 if (idx == i) { 4111 if (cur == nullptr) { 4112 cur = mem; 4113 } 4114 } else { 4115 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4116 nmm->set_memory_at(idx, mem); 4117 } 4118 } 4119 } 4120 mem = mem->in(MemNode::Memory); 4121 } 4122 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4123 // Find any instance of the current type if we haven't encountered 4124 // already a memory slice of the instance along the memory chain. 4125 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4126 if((uint)_compile->get_general_index(ni) == i) { 4127 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4128 if (nmm->is_empty_memory(m)) { 4129 Node* result = find_inst_mem(mem, ni, orig_phis); 4130 if (_compile->failing()) { 4131 return; 4132 } 4133 nmm->set_memory_at(ni, result); 4134 } 4135 } 4136 } 4137 } 4138 // Find the rest of instances values 4139 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4140 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4141 Node* result = step_through_mergemem(nmm, ni, tinst); 4142 if (result == nmm->base_memory()) { 4143 // Didn't find instance memory, search through general slice recursively. 4144 result = nmm->memory_at(_compile->get_general_index(ni)); 4145 result = find_inst_mem(result, ni, orig_phis); 4146 if (_compile->failing()) { 4147 return; 4148 } 4149 nmm->set_memory_at(ni, result); 4150 } 4151 } 4152 igvn->hash_insert(nmm); 4153 record_for_optimizer(nmm); 4154 } 4155 4156 // Phase 4: Update the inputs of non-instance memory Phis and 4157 // the Memory input of memnodes 4158 // First update the inputs of any non-instance Phi's from 4159 // which we split out an instance Phi. Note we don't have 4160 // to recursively process Phi's encountered on the input memory 4161 // chains as is done in split_memory_phi() since they will 4162 // also be processed here. 4163 for (int j = 0; j < orig_phis.length(); j++) { 4164 PhiNode *phi = orig_phis.at(j); 4165 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4166 igvn->hash_delete(phi); 4167 for (uint i = 1; i < phi->req(); i++) { 4168 Node *mem = phi->in(i); 4169 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4170 if (_compile->failing()) { 4171 return; 4172 } 4173 if (mem != new_mem) { 4174 phi->set_req(i, new_mem); 4175 } 4176 } 4177 igvn->hash_insert(phi); 4178 record_for_optimizer(phi); 4179 } 4180 4181 // Update the memory inputs of MemNodes with the value we computed 4182 // in Phase 2 and move stores memory users to corresponding memory slices. 4183 // Disable memory split verification code until the fix for 6984348. 4184 // Currently it produces false negative results since it does not cover all cases. 4185 #if 0 // ifdef ASSERT 4186 visited.Reset(); 4187 Node_Stack old_mems(arena, _compile->unique() >> 2); 4188 #endif 4189 for (uint i = 0; i < ideal_nodes.size(); i++) { 4190 Node* n = ideal_nodes.at(i); 4191 Node* nmem = get_map(n->_idx); 4192 assert(nmem != nullptr, "sanity"); 4193 if (n->is_Mem()) { 4194 #if 0 // ifdef ASSERT 4195 Node* old_mem = n->in(MemNode::Memory); 4196 if (!visited.test_set(old_mem->_idx)) { 4197 old_mems.push(old_mem, old_mem->outcnt()); 4198 } 4199 #endif 4200 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4201 if (!n->is_Load()) { 4202 // Move memory users of a store first. 4203 move_inst_mem(n, orig_phis); 4204 } 4205 // Now update memory input 4206 igvn->hash_delete(n); 4207 n->set_req(MemNode::Memory, nmem); 4208 igvn->hash_insert(n); 4209 record_for_optimizer(n); 4210 } else { 4211 assert(n->is_Allocate() || n->is_CheckCastPP() || 4212 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4213 } 4214 } 4215 #if 0 // ifdef ASSERT 4216 // Verify that memory was split correctly 4217 while (old_mems.is_nonempty()) { 4218 Node* old_mem = old_mems.node(); 4219 uint old_cnt = old_mems.index(); 4220 old_mems.pop(); 4221 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4222 } 4223 #endif 4224 } 4225 4226 #ifndef PRODUCT 4227 int ConnectionGraph::_no_escape_counter = 0; 4228 int ConnectionGraph::_arg_escape_counter = 0; 4229 int ConnectionGraph::_global_escape_counter = 0; 4230 4231 static const char *node_type_names[] = { 4232 "UnknownType", 4233 "JavaObject", 4234 "LocalVar", 4235 "Field", 4236 "Arraycopy" 4237 }; 4238 4239 static const char *esc_names[] = { 4240 "UnknownEscape", 4241 "NoEscape", 4242 "ArgEscape", 4243 "GlobalEscape" 4244 }; 4245 4246 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4247 NodeType nt = node_type(); 4248 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4249 if (print_state) { 4250 EscapeState es = escape_state(); 4251 EscapeState fields_es = fields_escape_state(); 4252 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4253 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4254 out->print("NSR "); 4255 } 4256 } 4257 } 4258 4259 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4260 dump_header(print_state, out); 4261 if (is_Field()) { 4262 FieldNode* f = (FieldNode*)this; 4263 if (f->is_oop()) { 4264 out->print("oop "); 4265 } 4266 if (f->offset() > 0) { 4267 out->print("+%d ", f->offset()); 4268 } 4269 out->print("("); 4270 for (BaseIterator i(f); i.has_next(); i.next()) { 4271 PointsToNode* b = i.get(); 4272 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4273 } 4274 out->print(" )"); 4275 } 4276 out->print("["); 4277 for (EdgeIterator i(this); i.has_next(); i.next()) { 4278 PointsToNode* e = i.get(); 4279 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4280 } 4281 out->print(" ["); 4282 for (UseIterator i(this); i.has_next(); i.next()) { 4283 PointsToNode* u = i.get(); 4284 bool is_base = false; 4285 if (PointsToNode::is_base_use(u)) { 4286 is_base = true; 4287 u = PointsToNode::get_use_node(u)->as_Field(); 4288 } 4289 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 4290 } 4291 out->print(" ]] "); 4292 if (_node == nullptr) { 4293 out->print("<null>%s", newline ? "\n" : ""); 4294 } else { 4295 _node->dump(newline ? "\n" : "", false, out); 4296 } 4297 } 4298 4299 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 4300 bool first = true; 4301 int ptnodes_length = ptnodes_worklist.length(); 4302 for (int i = 0; i < ptnodes_length; i++) { 4303 PointsToNode *ptn = ptnodes_worklist.at(i); 4304 if (ptn == nullptr || !ptn->is_JavaObject()) { 4305 continue; 4306 } 4307 PointsToNode::EscapeState es = ptn->escape_state(); 4308 if ((es != PointsToNode::NoEscape) && !Verbose) { 4309 continue; 4310 } 4311 Node* n = ptn->ideal_node(); 4312 if (n->is_Allocate() || (n->is_CallStaticJava() && 4313 n->as_CallStaticJava()->is_boxing_method())) { 4314 if (first) { 4315 tty->cr(); 4316 tty->print("======== Connection graph for "); 4317 _compile->method()->print_short_name(); 4318 tty->cr(); 4319 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 4320 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 4321 tty->cr(); 4322 first = false; 4323 } 4324 ptn->dump(); 4325 // Print all locals and fields which reference this allocation 4326 for (UseIterator j(ptn); j.has_next(); j.next()) { 4327 PointsToNode* use = j.get(); 4328 if (use->is_LocalVar()) { 4329 use->dump(Verbose); 4330 } else if (Verbose) { 4331 use->dump(); 4332 } 4333 } 4334 tty->cr(); 4335 } 4336 } 4337 } 4338 4339 void ConnectionGraph::print_statistics() { 4340 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 4341 } 4342 4343 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 4344 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 4345 return; 4346 } 4347 for (int next = 0; next < java_objects_worklist.length(); ++next) { 4348 JavaObjectNode* ptn = java_objects_worklist.at(next); 4349 if (ptn->ideal_node()->is_Allocate()) { 4350 if (ptn->escape_state() == PointsToNode::NoEscape) { 4351 Atomic::inc(&ConnectionGraph::_no_escape_counter); 4352 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 4353 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 4354 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 4355 Atomic::inc(&ConnectionGraph::_global_escape_counter); 4356 } else { 4357 assert(false, "Unexpected Escape State"); 4358 } 4359 } 4360 } 4361 } 4362 4363 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 4364 if (_compile->directive()->TraceEscapeAnalysisOption) { 4365 assert(ptn != nullptr, "should not be null"); 4366 assert(reason != nullptr, "should not be null"); 4367 ptn->dump_header(true); 4368 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 4369 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 4370 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 4371 } 4372 } 4373 4374 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 4375 if (_compile->directive()->TraceEscapeAnalysisOption) { 4376 stringStream ss; 4377 ss.print("propagated from: "); 4378 from->dump(true, &ss, false); 4379 return ss.as_string(); 4380 } else { 4381 return nullptr; 4382 } 4383 } 4384 4385 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 4386 if (_compile->directive()->TraceEscapeAnalysisOption) { 4387 stringStream ss; 4388 ss.print("escapes as arg to:"); 4389 call->dump("", false, &ss); 4390 return ss.as_string(); 4391 } else { 4392 return nullptr; 4393 } 4394 } 4395 4396 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 4397 if (_compile->directive()->TraceEscapeAnalysisOption) { 4398 stringStream ss; 4399 ss.print("is merged with other object: "); 4400 other->dump_header(true, &ss); 4401 return ss.as_string(); 4402 } else { 4403 return nullptr; 4404 } 4405 } 4406 4407 #endif 4408 4409 void ConnectionGraph::record_for_optimizer(Node *n) { 4410 _igvn->_worklist.push(n); 4411 _igvn->add_users_to_worklist(n); 4412 }