1 /* 2 * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/metaspace.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "opto/c2compiler.hpp" 35 #include "opto/arraycopynode.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/compile.hpp" 39 #include "opto/escape.hpp" 40 #include "opto/macro.hpp" 41 #include "opto/phaseX.hpp" 42 #include "opto/movenode.hpp" 43 #include "opto/rootnode.hpp" 44 #include "utilities/macros.hpp" 45 46 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 47 // If ReduceAllocationMerges is enabled we might call split_through_phi during 48 // split_unique_types and that will create additional nodes that need to be 49 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 50 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 51 // the array will be reallocated. 52 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 53 _in_worklist(C->comp_arena()), 54 _next_pidx(0), 55 _collecting(true), 56 _verify(false), 57 _compile(C), 58 _igvn(igvn), 59 _invocation(invocation), 60 _build_iterations(0), 61 _build_time(0.), 62 _node_map(C->comp_arena()) { 63 // Add unknown java object. 64 add_java_object(C->top(), PointsToNode::GlobalEscape); 65 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 66 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 67 // Add ConP and ConN null oop nodes 68 Node* oop_null = igvn->zerocon(T_OBJECT); 69 assert(oop_null->_idx < nodes_size(), "should be created already"); 70 add_java_object(oop_null, PointsToNode::NoEscape); 71 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 72 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 73 if (UseCompressedOops) { 74 Node* noop_null = igvn->zerocon(T_NARROWOOP); 75 assert(noop_null->_idx < nodes_size(), "should be created already"); 76 map_ideal_node(noop_null, null_obj); 77 } 78 } 79 80 bool ConnectionGraph::has_candidates(Compile *C) { 81 // EA brings benefits only when the code has allocations and/or locks which 82 // are represented by ideal Macro nodes. 83 int cnt = C->macro_count(); 84 for (int i = 0; i < cnt; i++) { 85 Node *n = C->macro_node(i); 86 if (n->is_Allocate()) { 87 return true; 88 } 89 if (n->is_Lock()) { 90 Node* obj = n->as_Lock()->obj_node()->uncast(); 91 if (!(obj->is_Parm() || obj->is_Con())) { 92 return true; 93 } 94 } 95 if (n->is_CallStaticJava() && 96 n->as_CallStaticJava()->is_boxing_method()) { 97 return true; 98 } 99 } 100 return false; 101 } 102 103 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 104 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 105 ResourceMark rm; 106 107 // Add ConP and ConN null oop nodes before ConnectionGraph construction 108 // to create space for them in ConnectionGraph::_nodes[]. 109 Node* oop_null = igvn->zerocon(T_OBJECT); 110 Node* noop_null = igvn->zerocon(T_NARROWOOP); 111 int invocation = 0; 112 if (C->congraph() != nullptr) { 113 invocation = C->congraph()->_invocation + 1; 114 } 115 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 116 // Perform escape analysis 117 if (congraph->compute_escape()) { 118 // There are non escaping objects. 119 C->set_congraph(congraph); 120 } 121 // Cleanup. 122 if (oop_null->outcnt() == 0) { 123 igvn->hash_delete(oop_null); 124 } 125 if (noop_null->outcnt() == 0) { 126 igvn->hash_delete(noop_null); 127 } 128 } 129 130 bool ConnectionGraph::compute_escape() { 131 Compile* C = _compile; 132 PhaseGVN* igvn = _igvn; 133 134 // Worklists used by EA. 135 Unique_Node_List delayed_worklist; 136 Unique_Node_List reducible_merges; 137 GrowableArray<Node*> alloc_worklist; 138 GrowableArray<Node*> ptr_cmp_worklist; 139 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 140 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 141 GrowableArray<PointsToNode*> ptnodes_worklist; 142 GrowableArray<JavaObjectNode*> java_objects_worklist; 143 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 144 GrowableArray<FieldNode*> oop_fields_worklist; 145 GrowableArray<SafePointNode*> sfn_worklist; 146 GrowableArray<MergeMemNode*> mergemem_worklist; 147 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 148 149 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 150 151 // 1. Populate Connection Graph (CG) with PointsTo nodes. 152 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 153 // Initialize worklist 154 if (C->root() != nullptr) { 155 ideal_nodes.push(C->root()); 156 } 157 // Processed ideal nodes are unique on ideal_nodes list 158 // but several ideal nodes are mapped to the phantom_obj. 159 // To avoid duplicated entries on the following worklists 160 // add the phantom_obj only once to them. 161 ptnodes_worklist.append(phantom_obj); 162 java_objects_worklist.append(phantom_obj); 163 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 164 Node* n = ideal_nodes.at(next); 165 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && 166 !n->in(MemNode::Address)->is_AddP() && 167 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { 168 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA 169 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); 170 _igvn->register_new_node_with_optimizer(addp); 171 _igvn->replace_input_of(n, MemNode::Address, addp); 172 ideal_nodes.push(addp); 173 _nodes.at_put_grow(addp->_idx, nullptr, nullptr); 174 } 175 // Create PointsTo nodes and add them to Connection Graph. Called 176 // only once per ideal node since ideal_nodes is Unique_Node list. 177 add_node_to_connection_graph(n, &delayed_worklist); 178 PointsToNode* ptn = ptnode_adr(n->_idx); 179 if (ptn != nullptr && ptn != phantom_obj) { 180 ptnodes_worklist.append(ptn); 181 if (ptn->is_JavaObject()) { 182 java_objects_worklist.append(ptn->as_JavaObject()); 183 if ((n->is_Allocate() || n->is_CallStaticJava()) && 184 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 185 // Only allocations and java static calls results are interesting. 186 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 187 } 188 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 189 oop_fields_worklist.append(ptn->as_Field()); 190 } 191 } 192 // Collect some interesting nodes for further use. 193 switch (n->Opcode()) { 194 case Op_MergeMem: 195 // Collect all MergeMem nodes to add memory slices for 196 // scalar replaceable objects in split_unique_types(). 197 mergemem_worklist.append(n->as_MergeMem()); 198 break; 199 case Op_CmpP: 200 case Op_CmpN: 201 // Collect compare pointers nodes. 202 if (OptimizePtrCompare) { 203 ptr_cmp_worklist.append(n); 204 } 205 break; 206 case Op_MemBarStoreStore: 207 // Collect all MemBarStoreStore nodes so that depending on the 208 // escape status of the associated Allocate node some of them 209 // may be eliminated. 210 storestore_worklist.append(n->as_MemBarStoreStore()); 211 break; 212 case Op_MemBarRelease: 213 if (n->req() > MemBarNode::Precedent) { 214 record_for_optimizer(n); 215 } 216 break; 217 #ifdef ASSERT 218 case Op_AddP: 219 // Collect address nodes for graph verification. 220 addp_worklist.append(n); 221 break; 222 #endif 223 case Op_ArrayCopy: 224 // Keep a list of ArrayCopy nodes so if one of its input is non 225 // escaping, we can record a unique type 226 arraycopy_worklist.append(n->as_ArrayCopy()); 227 break; 228 default: 229 // not interested now, ignore... 230 break; 231 } 232 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 233 Node* m = n->fast_out(i); // Get user 234 ideal_nodes.push(m); 235 } 236 if (n->is_SafePoint()) { 237 sfn_worklist.append(n->as_SafePoint()); 238 } 239 } 240 241 #ifndef PRODUCT 242 if (_compile->directive()->TraceEscapeAnalysisOption) { 243 tty->print("+++++ Initial worklist for "); 244 _compile->method()->print_name(); 245 tty->print_cr(" (ea_inv=%d)", _invocation); 246 for (int i = 0; i < ptnodes_worklist.length(); i++) { 247 PointsToNode* ptn = ptnodes_worklist.at(i); 248 ptn->dump(); 249 } 250 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 251 } 252 #endif 253 254 if (non_escaped_allocs_worklist.length() == 0) { 255 _collecting = false; 256 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 257 return false; // Nothing to do. 258 } 259 // Add final simple edges to graph. 260 while(delayed_worklist.size() > 0) { 261 Node* n = delayed_worklist.pop(); 262 add_final_edges(n); 263 } 264 265 #ifdef ASSERT 266 if (VerifyConnectionGraph) { 267 // Verify that no new simple edges could be created and all 268 // local vars has edges. 269 _verify = true; 270 int ptnodes_length = ptnodes_worklist.length(); 271 for (int next = 0; next < ptnodes_length; ++next) { 272 PointsToNode* ptn = ptnodes_worklist.at(next); 273 add_final_edges(ptn->ideal_node()); 274 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 275 ptn->dump(); 276 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 277 } 278 } 279 _verify = false; 280 } 281 #endif 282 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 283 // processing, calls to CI to resolve symbols (types, fields, methods) 284 // referenced in bytecode. During symbol resolution VM may throw 285 // an exception which CI cleans and converts to compilation failure. 286 if (C->failing()) { 287 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 288 return false; 289 } 290 291 // 2. Finish Graph construction by propagating references to all 292 // java objects through graph. 293 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 294 java_objects_worklist, oop_fields_worklist)) { 295 // All objects escaped or hit time or iterations limits. 296 _collecting = false; 297 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 298 return false; 299 } 300 301 // 3. Adjust scalar_replaceable state of nonescaping objects and push 302 // scalar replaceable allocations on alloc_worklist for processing 303 // in split_unique_types(). 304 GrowableArray<JavaObjectNode*> jobj_worklist; 305 int non_escaped_length = non_escaped_allocs_worklist.length(); 306 bool found_nsr_alloc = false; 307 for (int next = 0; next < non_escaped_length; next++) { 308 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 309 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 310 Node* n = ptn->ideal_node(); 311 if (n->is_Allocate()) { 312 n->as_Allocate()->_is_non_escaping = noescape; 313 } 314 if (noescape && ptn->scalar_replaceable()) { 315 adjust_scalar_replaceable_state(ptn, reducible_merges); 316 if (ptn->scalar_replaceable()) { 317 jobj_worklist.push(ptn); 318 } else { 319 found_nsr_alloc = true; 320 } 321 } 322 } 323 324 // Propagate NSR (Not Scalar Replaceable) state. 325 if (found_nsr_alloc) { 326 find_scalar_replaceable_allocs(jobj_worklist); 327 } 328 329 // alloc_worklist will be processed in reverse push order. 330 // Therefore the reducible Phis will be processed for last and that's what we 331 // want because by then the scalarizable inputs of the merge will already have 332 // an unique instance type. 333 for (uint i = 0; i < reducible_merges.size(); i++ ) { 334 Node* n = reducible_merges.at(i); 335 alloc_worklist.append(n); 336 } 337 338 for (int next = 0; next < jobj_worklist.length(); ++next) { 339 JavaObjectNode* jobj = jobj_worklist.at(next); 340 if (jobj->scalar_replaceable()) { 341 alloc_worklist.append(jobj->ideal_node()); 342 } 343 } 344 345 #ifdef ASSERT 346 if (VerifyConnectionGraph) { 347 // Verify that graph is complete - no new edges could be added or needed. 348 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 349 java_objects_worklist, addp_worklist); 350 } 351 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 352 assert(null_obj->escape_state() == PointsToNode::NoEscape && 353 null_obj->edge_count() == 0 && 354 !null_obj->arraycopy_src() && 355 !null_obj->arraycopy_dst(), "sanity"); 356 #endif 357 358 _collecting = false; 359 360 } // TracePhase t3("connectionGraph") 361 362 // 4. Optimize ideal graph based on EA information. 363 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 364 if (has_non_escaping_obj) { 365 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 366 } 367 368 #ifndef PRODUCT 369 if (PrintEscapeAnalysis) { 370 dump(ptnodes_worklist); // Dump ConnectionGraph 371 } 372 #endif 373 374 #ifdef ASSERT 375 if (VerifyConnectionGraph) { 376 int alloc_length = alloc_worklist.length(); 377 for (int next = 0; next < alloc_length; ++next) { 378 Node* n = alloc_worklist.at(next); 379 PointsToNode* ptn = ptnode_adr(n->_idx); 380 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 381 } 382 } 383 384 if (VerifyReduceAllocationMerges) { 385 for (uint i = 0; i < reducible_merges.size(); i++ ) { 386 Node* n = reducible_merges.at(i); 387 if (!can_reduce_phi(n->as_Phi())) { 388 TraceReduceAllocationMerges = true; 389 n->dump(2); 390 n->dump(-2); 391 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 392 } 393 } 394 } 395 #endif 396 397 // 5. Separate memory graph for scalar replaceable allcations. 398 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 399 if (has_scalar_replaceable_candidates && EliminateAllocations) { 400 assert(C->do_aliasing(), "Aliasing should be enabled"); 401 // Now use the escape information to create unique types for 402 // scalar replaceable objects. 403 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 404 if (C->failing()) { 405 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 406 return false; 407 } 408 C->print_method(PHASE_AFTER_EA, 2); 409 410 #ifdef ASSERT 411 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 412 tty->print("=== No allocations eliminated for "); 413 C->method()->print_short_name(); 414 if (!EliminateAllocations) { 415 tty->print(" since EliminateAllocations is off ==="); 416 } else if(!has_scalar_replaceable_candidates) { 417 tty->print(" since there are no scalar replaceable candidates ==="); 418 } 419 tty->cr(); 420 #endif 421 } 422 423 // 6. Remove reducible allocation merges from ideal graph 424 if (reducible_merges.size() > 0) { 425 bool delay = _igvn->delay_transform(); 426 _igvn->set_delay_transform(true); 427 for (uint i = 0; i < reducible_merges.size(); i++ ) { 428 Node* n = reducible_merges.at(i); 429 reduce_phi(n->as_Phi()); 430 if (C->failing()) { 431 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 432 return false; 433 } 434 } 435 _igvn->set_delay_transform(delay); 436 } 437 438 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 439 // java calls if they pass ArgEscape objects as parameters. 440 if (has_non_escaping_obj && 441 (C->env()->should_retain_local_variables() || 442 C->env()->jvmti_can_get_owned_monitor_info() || 443 C->env()->jvmti_can_walk_any_space() || 444 DeoptimizeObjectsALot)) { 445 int sfn_length = sfn_worklist.length(); 446 for (int next = 0; next < sfn_length; next++) { 447 SafePointNode* sfn = sfn_worklist.at(next); 448 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 449 if (sfn->is_CallJava()) { 450 CallJavaNode* call = sfn->as_CallJava(); 451 call->set_arg_escape(has_arg_escape(call)); 452 } 453 } 454 } 455 456 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 457 return has_non_escaping_obj; 458 } 459 460 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 461 // if at least one scalar replaceable allocation participates in the merge and 462 // no input to the Phi is nullable. 463 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 464 // Check if there is a scalar replaceable allocate in the Phi 465 bool found_sr_allocate = false; 466 467 for (uint i = 1; i < ophi->req(); i++) { 468 // Right now we can't restore a "null" pointer during deoptimization 469 const Type* inp_t = _igvn->type(ophi->in(i)); 470 if (inp_t == nullptr || inp_t->make_oopptr() == nullptr || inp_t->make_oopptr()->maybe_null()) { 471 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Input %d is nullable.", ophi->_idx, _invocation, i);) 472 return false; 473 } 474 475 // We are looking for at least one SR object in the merge 476 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 477 if (ptn != nullptr && ptn->scalar_replaceable()) { 478 assert(ptn->ideal_node() != nullptr && ptn->ideal_node()->is_Allocate(), "sanity"); 479 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 480 481 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 482 found_sr_allocate = true; 483 } else { 484 ptn->set_scalar_replaceable(false); 485 } 486 } 487 } 488 489 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 490 return found_sr_allocate; 491 } 492 493 // Check if we are able to untangle the merge. Right now we only reduce Phis 494 // which are only used as debug information. 495 bool ConnectionGraph::can_reduce_phi_check_users(PhiNode* ophi) const { 496 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 497 Node* use = ophi->fast_out(i); 498 499 if (use->is_SafePoint()) { 500 if (use->is_Call() && use->as_Call()->has_non_debug_use(ophi)) { 501 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", ophi->_idx, _invocation);) 502 return false; 503 } 504 } else if (use->is_AddP()) { 505 Node* addp = use; 506 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 507 Node* use_use = addp->fast_out(j); 508 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 509 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", ophi->_idx, _invocation, use_use->Name());) 510 return false; 511 } 512 } 513 } else { 514 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", ophi->_idx, _invocation, use->_idx, use->Name());) 515 return false; 516 } 517 } 518 519 return true; 520 } 521 522 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 523 // only used in some certain code shapes. Check comments in 524 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 525 // details. 526 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 527 // If there was an error attempting to reduce allocation merges for this 528 // method we might have disabled the compilation and be retrying with RAM 529 // disabled. 530 // If EliminateAllocations is False, there is no point in reducing merges. 531 if (!_compile->do_reduce_allocation_merges()) { 532 return false; 533 } 534 535 const Type* phi_t = _igvn->type(ophi); 536 if (phi_t == nullptr || phi_t->make_ptr() == nullptr || 537 phi_t->make_ptr()->isa_instptr() == nullptr || 538 !phi_t->make_ptr()->isa_instptr()->klass_is_exact()) { 539 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can NOT reduce Phi %d during invocation %d because it's nullable.", ophi->_idx, _invocation); }) 540 return false; 541 } 542 543 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_phi_check_users(ophi)) { 544 return false; 545 } 546 547 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 548 return true; 549 } 550 551 void ConnectionGraph::reduce_phi_on_field_access(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist) { 552 // We'll pass this to 'split_through_phi' so that it'll do the split even 553 // though the load doesn't have an unique instance type. 554 bool ignore_missing_instance_id = true; 555 556 #ifdef ASSERT 557 if (VerifyReduceAllocationMerges && !can_reduce_phi(ophi)) { 558 TraceReduceAllocationMerges = true; 559 ophi->dump(2); 560 ophi->dump(-2); 561 assert(can_reduce_phi(ophi), "Sanity: previous reducible Phi is no longer reducible inside reduce_phi_on_field_access."); 562 } 563 #endif 564 565 // Iterate over Phi outputs looking for an AddP 566 for (int j = ophi->outcnt()-1; j >= 0;) { 567 Node* previous_addp = ophi->raw_out(j); 568 if (previous_addp->is_AddP()) { 569 // All AddPs are present in the connection graph 570 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 571 572 // Iterate over AddP looking for a Load 573 for (int k = previous_addp->outcnt()-1; k >= 0;) { 574 Node* previous_load = previous_addp->raw_out(k); 575 if (previous_load->is_Load()) { 576 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 577 _igvn->replace_node(previous_load, data_phi); 578 assert(data_phi != nullptr, "Output of split_through_phi is null."); 579 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 580 assert(data_phi->is_Phi(), "Return of split_through_phi should be a Phi."); 581 582 // Push the newly created AddP on alloc_worklist and patch 583 // the connection graph. Note that the changes in the CG below 584 // won't affect the ES of objects since the new nodes have the 585 // same status as the old ones. 586 for (uint i = 1; i < data_phi->req(); i++) { 587 Node* new_load = data_phi->in(i); 588 if (new_load->is_Load()) { 589 Node* new_addp = new_load->in(MemNode::Address); 590 Node* base = get_addp_base(new_addp); 591 592 // The base might not be something that we can create an unique 593 // type for. If that's the case we are done with that input. 594 PointsToNode* jobj_ptn = unique_java_object(base); 595 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 596 continue; 597 } 598 599 // Push to alloc_worklist since the base has an unique_type 600 alloc_worklist.append_if_missing(new_addp); 601 602 // Now let's add the node to the connection graph 603 _nodes.at_grow(new_addp->_idx, nullptr); 604 add_field(new_addp, fn->escape_state(), fn->offset()); 605 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 606 607 // If the load doesn't load an object then it won't be 608 // part of the connection graph 609 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 610 if (curr_load_ptn != nullptr) { 611 _nodes.at_grow(new_load->_idx, nullptr); 612 add_local_var(new_load, curr_load_ptn->escape_state()); 613 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 614 } 615 } 616 } 617 } 618 k = MIN2(--k, (int)previous_addp->outcnt()-1); 619 } 620 621 // Remove the old AddP from the processing list because it's dead now 622 alloc_worklist.remove_if_existing(previous_addp); 623 _igvn->remove_globally_dead_node(previous_addp); 624 } 625 j = MIN2(--j, (int)ophi->outcnt()-1); 626 } 627 628 #ifdef ASSERT 629 if (VerifyReduceAllocationMerges) { 630 for (uint j = 0; j < ophi->outcnt(); j++) { 631 Node* use = ophi->raw_out(j); 632 if (!use->is_SafePoint()) { 633 ophi->dump(2); 634 ophi->dump(-2); 635 assert(false, "Should be a SafePoint."); 636 } 637 } 638 } 639 #endif 640 } 641 642 // This method will create a SafePointScalarObjectNode for each combination of 643 // scalar replaceable allocation in 'ophi' and SafePoint node in 'safepoints'. 644 // The method will create a SafePointScalarMERGEnode for each combination of 645 // 'ophi' and SafePoint node in 'safepoints'. 646 // Each SafePointScalarMergeNode created here may describe multiple scalar 647 // replaced objects - check detailed description in SafePointScalarMergeNode 648 // class header. 649 // 650 // This method will set entries in the Phi that are scalar replaceable to 'null'. 651 void ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi, Unique_Node_List* safepoints) { 652 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 653 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 654 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 655 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 656 uint number_of_sr_objects = 0; 657 PhaseMacroExpand mexp(*_igvn); 658 659 _igvn->hash_delete(ophi); 660 661 // Fill in the 'selector' Phi. If index 'i' of the selector is: 662 // -> a '-1' constant, the i'th input of the original Phi is NSR. 663 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and the 664 // info about the scalarized object will be at index x of 665 // ObjectMergeValue::possible_objects 666 for (uint i = 1; i < ophi->req(); i++) { 667 Node* base = ophi->in(i); 668 JavaObjectNode* ptn = unique_java_object(base); 669 670 if (ptn != nullptr && ptn->scalar_replaceable()) { 671 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 672 selector->set_req(i, sr_obj_idx); 673 number_of_sr_objects++; 674 } 675 } 676 677 // Update the debug information of all safepoints in turn 678 for (uint spi = 0; spi < safepoints->size(); spi++) { 679 SafePointNode* sfpt = safepoints->at(spi)->as_SafePoint(); 680 JVMState *jvms = sfpt->jvms(); 681 uint merge_idx = (sfpt->req() - jvms->scloff()); 682 int debug_start = jvms->debug_start(); 683 684 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 685 smerge->init_req(0, _compile->root()); 686 _igvn->register_new_node_with_optimizer(smerge); 687 688 // The next two inputs are: 689 // (1) A copy of the original pointer to NSR objects. 690 // (2) A selector, used to decide if we need to rematerialize an object 691 // or use the pointer to a NSR object. 692 // See more details of these fields in the declaration of SafePointScalarMergeNode 693 sfpt->add_req(ophi); 694 sfpt->add_req(selector); 695 696 for (uint i = 1; i < ophi->req(); i++) { 697 Node* base = ophi->in(i); 698 JavaObjectNode* ptn = unique_java_object(base); 699 700 // If the base is not scalar replaceable we don't need to register information about 701 // it at this time. 702 if (ptn == nullptr || !ptn->scalar_replaceable()) { 703 continue; 704 } 705 706 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 707 Unique_Node_List value_worklist; 708 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist); 709 guarantee(value_worklist.size() == 0, "Unimplemented: Valhalla support for 8287061"); 710 if (sobj == nullptr) { 711 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 712 return; 713 } 714 715 // Now make a pass over the debug information replacing any references 716 // to the allocated object with "sobj" 717 Node* ccpp = alloc->result_cast(); 718 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 719 720 // Register the scalarized object as a candidate for reallocation 721 smerge->add_req(sobj); 722 } 723 724 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge" 725 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn); 726 727 // The call to 'replace_edges_in_range' above might have removed the 728 // reference to ophi that we need at _merge_pointer_idx. The line below make 729 // sure the reference is maintained. 730 sfpt->set_req(smerge->merge_pointer_idx(jvms), ophi); 731 _igvn->_worklist.push(sfpt); 732 } 733 734 // Now we can change ophi since we don't need to know the types 735 // of the input allocations anymore. 736 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 737 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 738 for (uint i = 1; i < ophi->req(); i++) { 739 Node* base = ophi->in(i); 740 JavaObjectNode* ptn = unique_java_object(base); 741 742 if (ptn != nullptr && ptn->scalar_replaceable()) { 743 new_phi->set_req(i, null_ptr); 744 } else { 745 new_phi->set_req(i, ophi->in(i)); 746 } 747 } 748 749 _igvn->replace_node(ophi, new_phi); 750 _igvn->hash_insert(ophi); 751 _igvn->_worklist.push(ophi); 752 } 753 754 void ConnectionGraph::reduce_phi(PhiNode* ophi) { 755 Unique_Node_List safepoints; 756 757 for (uint i = 0; i < ophi->outcnt(); i++) { 758 Node* use = ophi->raw_out(i); 759 760 // All SafePoint nodes using the same Phi node use the same debug 761 // information (regarding the Phi). Furthermore, reducing the Phi used by a 762 // SafePoint requires changing the Phi. Therefore, I collect all safepoints 763 // and patch them all at once later. 764 if (use->is_SafePoint()) { 765 safepoints.push(use->as_SafePoint()); 766 } else { 767 #ifdef ASSERT 768 ophi->dump(-3); 769 assert(false, "Unexpected user of reducible Phi %d -> %d:%s", ophi->_idx, use->_idx, use->Name()); 770 #endif 771 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 772 return; 773 } 774 } 775 776 if (safepoints.size() > 0) { 777 reduce_phi_on_safepoints(ophi, &safepoints); 778 } 779 } 780 781 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 782 if (!C->do_reduce_allocation_merges()) return; 783 784 Unique_Node_List ideal_nodes; 785 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 786 ideal_nodes.push(root); 787 788 for (uint next = 0; next < ideal_nodes.size(); ++next) { 789 Node* n = ideal_nodes.at(next); 790 791 if (n->is_SafePointScalarMerge()) { 792 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 793 794 // Validate inputs of merge 795 for (uint i = 1; i < merge->req(); i++) { 796 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 797 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 798 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 799 } 800 } 801 802 // Validate users of merge 803 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 804 Node* sfpt = merge->fast_out(i); 805 if (sfpt->is_SafePoint()) { 806 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 807 808 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 809 assert(false, "SafePointScalarMerge nodes can't be nested."); 810 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 811 } 812 } else { 813 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 814 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 815 } 816 } 817 } 818 819 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 820 Node* m = n->fast_out(i); 821 ideal_nodes.push(m); 822 } 823 } 824 } 825 826 // Returns true if there is an object in the scope of sfn that does not escape globally. 827 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 828 Compile* C = _compile; 829 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 830 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 831 DeoptimizeObjectsALot) { 832 // Jvmti agents can access locals. Must provide info about local objects at runtime. 833 int num_locs = jvms->loc_size(); 834 for (int idx = 0; idx < num_locs; idx++) { 835 Node* l = sfn->local(jvms, idx); 836 if (not_global_escape(l)) { 837 return true; 838 } 839 } 840 } 841 if (C->env()->jvmti_can_get_owned_monitor_info() || 842 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 843 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 844 int num_mon = jvms->nof_monitors(); 845 for (int idx = 0; idx < num_mon; idx++) { 846 Node* m = sfn->monitor_obj(jvms, idx); 847 if (m != nullptr && not_global_escape(m)) { 848 return true; 849 } 850 } 851 } 852 } 853 return false; 854 } 855 856 // Returns true if at least one of the arguments to the call is an object 857 // that does not escape globally. 858 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 859 if (call->method() != nullptr) { 860 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 861 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 862 Node* p = call->in(idx); 863 if (not_global_escape(p)) { 864 return true; 865 } 866 } 867 } else { 868 const char* name = call->as_CallStaticJava()->_name; 869 assert(name != nullptr, "no name"); 870 // no arg escapes through uncommon traps 871 if (strcmp(name, "uncommon_trap") != 0) { 872 // process_call_arguments() assumes that all arguments escape globally 873 const TypeTuple* d = call->tf()->domain_sig(); 874 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 875 const Type* at = d->field_at(i); 876 if (at->isa_oopptr() != nullptr) { 877 return true; 878 } 879 } 880 } 881 } 882 return false; 883 } 884 885 886 887 // Utility function for nodes that load an object 888 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 889 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 890 // ThreadLocal has RawPtr type. 891 const Type* t = _igvn->type(n); 892 if (t->make_ptr() != nullptr) { 893 Node* adr = n->in(MemNode::Address); 894 #ifdef ASSERT 895 if (!adr->is_AddP()) { 896 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 897 } else { 898 assert((ptnode_adr(adr->_idx) == nullptr || 899 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 900 } 901 #endif 902 add_local_var_and_edge(n, PointsToNode::NoEscape, 903 adr, delayed_worklist); 904 } 905 } 906 907 // Populate Connection Graph with PointsTo nodes and create simple 908 // connection graph edges. 909 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 910 assert(!_verify, "this method should not be called for verification"); 911 PhaseGVN* igvn = _igvn; 912 uint n_idx = n->_idx; 913 PointsToNode* n_ptn = ptnode_adr(n_idx); 914 if (n_ptn != nullptr) { 915 return; // No need to redefine PointsTo node during first iteration. 916 } 917 int opcode = n->Opcode(); 918 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 919 if (gc_handled) { 920 return; // Ignore node if already handled by GC. 921 } 922 923 if (n->is_Call()) { 924 // Arguments to allocation and locking don't escape. 925 if (n->is_AbstractLock()) { 926 // Put Lock and Unlock nodes on IGVN worklist to process them during 927 // first IGVN optimization when escape information is still available. 928 record_for_optimizer(n); 929 } else if (n->is_Allocate()) { 930 add_call_node(n->as_Call()); 931 record_for_optimizer(n); 932 } else { 933 if (n->is_CallStaticJava()) { 934 const char* name = n->as_CallStaticJava()->_name; 935 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 936 return; // Skip uncommon traps 937 } 938 } 939 // Don't mark as processed since call's arguments have to be processed. 940 delayed_worklist->push(n); 941 // Check if a call returns an object. 942 if ((n->as_Call()->returns_pointer() && 943 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 944 (n->is_CallStaticJava() && 945 n->as_CallStaticJava()->is_boxing_method())) { 946 add_call_node(n->as_Call()); 947 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) { 948 bool returns_oop = false; 949 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 950 ProjNode* pn = n->fast_out(i)->as_Proj(); 951 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 952 returns_oop = true; 953 } 954 } 955 if (returns_oop) { 956 add_call_node(n->as_Call()); 957 } 958 } 959 } 960 return; 961 } 962 // Put this check here to process call arguments since some call nodes 963 // point to phantom_obj. 964 if (n_ptn == phantom_obj || n_ptn == null_obj) { 965 return; // Skip predefined nodes. 966 } 967 switch (opcode) { 968 case Op_AddP: { 969 Node* base = get_addp_base(n); 970 PointsToNode* ptn_base = ptnode_adr(base->_idx); 971 // Field nodes are created for all field types. They are used in 972 // adjust_scalar_replaceable_state() and split_unique_types(). 973 // Note, non-oop fields will have only base edges in Connection 974 // Graph because such fields are not used for oop loads and stores. 975 int offset = address_offset(n, igvn); 976 add_field(n, PointsToNode::NoEscape, offset); 977 if (ptn_base == nullptr) { 978 delayed_worklist->push(n); // Process it later. 979 } else { 980 n_ptn = ptnode_adr(n_idx); 981 add_base(n_ptn->as_Field(), ptn_base); 982 } 983 break; 984 } 985 case Op_CastX2P: { 986 map_ideal_node(n, phantom_obj); 987 break; 988 } 989 case Op_InlineType: 990 case Op_CastPP: 991 case Op_CheckCastPP: 992 case Op_EncodeP: 993 case Op_DecodeN: 994 case Op_EncodePKlass: 995 case Op_DecodeNKlass: { 996 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 997 break; 998 } 999 case Op_CMoveP: { 1000 add_local_var(n, PointsToNode::NoEscape); 1001 // Do not add edges during first iteration because some could be 1002 // not defined yet. 1003 delayed_worklist->push(n); 1004 break; 1005 } 1006 case Op_ConP: 1007 case Op_ConN: 1008 case Op_ConNKlass: { 1009 // assume all oop constants globally escape except for null 1010 PointsToNode::EscapeState es; 1011 const Type* t = igvn->type(n); 1012 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 1013 es = PointsToNode::NoEscape; 1014 } else { 1015 es = PointsToNode::GlobalEscape; 1016 } 1017 PointsToNode* ptn_con = add_java_object(n, es); 1018 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 1019 break; 1020 } 1021 case Op_CreateEx: { 1022 // assume that all exception objects globally escape 1023 map_ideal_node(n, phantom_obj); 1024 break; 1025 } 1026 case Op_LoadKlass: 1027 case Op_LoadNKlass: { 1028 // Unknown class is loaded 1029 map_ideal_node(n, phantom_obj); 1030 break; 1031 } 1032 case Op_LoadP: 1033 case Op_LoadN: { 1034 add_objload_to_connection_graph(n, delayed_worklist); 1035 break; 1036 } 1037 case Op_Parm: { 1038 map_ideal_node(n, phantom_obj); 1039 break; 1040 } 1041 case Op_PartialSubtypeCheck: { 1042 // Produces Null or notNull and is used in only in CmpP so 1043 // phantom_obj could be used. 1044 map_ideal_node(n, phantom_obj); // Result is unknown 1045 break; 1046 } 1047 case Op_Phi: { 1048 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1049 // ThreadLocal has RawPtr type. 1050 const Type* t = n->as_Phi()->type(); 1051 if (t->make_ptr() != nullptr) { 1052 add_local_var(n, PointsToNode::NoEscape); 1053 // Do not add edges during first iteration because some could be 1054 // not defined yet. 1055 delayed_worklist->push(n); 1056 } 1057 break; 1058 } 1059 case Op_Proj: { 1060 // we are only interested in the oop result projection from a call 1061 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 1062 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 1063 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1064 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1065 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1066 } 1067 break; 1068 } 1069 case Op_Rethrow: // Exception object escapes 1070 case Op_Return: { 1071 if (n->req() > TypeFunc::Parms && 1072 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1073 // Treat Return value as LocalVar with GlobalEscape escape state. 1074 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1075 } 1076 break; 1077 } 1078 case Op_CompareAndExchangeP: 1079 case Op_CompareAndExchangeN: 1080 case Op_GetAndSetP: 1081 case Op_GetAndSetN: { 1082 add_objload_to_connection_graph(n, delayed_worklist); 1083 // fall-through 1084 } 1085 case Op_StoreP: 1086 case Op_StoreN: 1087 case Op_StoreNKlass: 1088 case Op_WeakCompareAndSwapP: 1089 case Op_WeakCompareAndSwapN: 1090 case Op_CompareAndSwapP: 1091 case Op_CompareAndSwapN: { 1092 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1093 break; 1094 } 1095 case Op_AryEq: 1096 case Op_CountPositives: 1097 case Op_StrComp: 1098 case Op_StrEquals: 1099 case Op_StrIndexOf: 1100 case Op_StrIndexOfChar: 1101 case Op_StrInflatedCopy: 1102 case Op_StrCompressedCopy: 1103 case Op_VectorizedHashCode: 1104 case Op_EncodeISOArray: { 1105 add_local_var(n, PointsToNode::ArgEscape); 1106 delayed_worklist->push(n); // Process it later. 1107 break; 1108 } 1109 case Op_ThreadLocal: { 1110 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1111 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1112 break; 1113 } 1114 case Op_Blackhole: { 1115 // All blackhole pointer arguments are globally escaping. 1116 // Only do this if there is at least one pointer argument. 1117 // Do not add edges during first iteration because some could be 1118 // not defined yet, defer to final step. 1119 for (uint i = 0; i < n->req(); i++) { 1120 Node* in = n->in(i); 1121 if (in != nullptr) { 1122 const Type* at = _igvn->type(in); 1123 if (!at->isa_ptr()) continue; 1124 1125 add_local_var(n, PointsToNode::GlobalEscape); 1126 delayed_worklist->push(n); 1127 break; 1128 } 1129 } 1130 break; 1131 } 1132 default: 1133 ; // Do nothing for nodes not related to EA. 1134 } 1135 return; 1136 } 1137 1138 // Add final simple edges to graph. 1139 void ConnectionGraph::add_final_edges(Node *n) { 1140 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1141 #ifdef ASSERT 1142 if (_verify && n_ptn->is_JavaObject()) 1143 return; // This method does not change graph for JavaObject. 1144 #endif 1145 1146 if (n->is_Call()) { 1147 process_call_arguments(n->as_Call()); 1148 return; 1149 } 1150 assert(n->is_Store() || n->is_LoadStore() || 1151 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr), 1152 "node should be registered already"); 1153 int opcode = n->Opcode(); 1154 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1155 if (gc_handled) { 1156 return; // Ignore node if already handled by GC. 1157 } 1158 switch (opcode) { 1159 case Op_AddP: { 1160 Node* base = get_addp_base(n); 1161 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1162 assert(ptn_base != nullptr, "field's base should be registered"); 1163 add_base(n_ptn->as_Field(), ptn_base); 1164 break; 1165 } 1166 case Op_InlineType: 1167 case Op_CastPP: 1168 case Op_CheckCastPP: 1169 case Op_EncodeP: 1170 case Op_DecodeN: 1171 case Op_EncodePKlass: 1172 case Op_DecodeNKlass: { 1173 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1174 break; 1175 } 1176 case Op_CMoveP: { 1177 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1178 Node* in = n->in(i); 1179 if (in == nullptr) { 1180 continue; // ignore null 1181 } 1182 Node* uncast_in = in->uncast(); 1183 if (uncast_in->is_top() || uncast_in == n) { 1184 continue; // ignore top or inputs which go back this node 1185 } 1186 PointsToNode* ptn = ptnode_adr(in->_idx); 1187 assert(ptn != nullptr, "node should be registered"); 1188 add_edge(n_ptn, ptn); 1189 } 1190 break; 1191 } 1192 case Op_LoadP: 1193 case Op_LoadN: { 1194 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1195 // ThreadLocal has RawPtr type. 1196 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1197 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1198 break; 1199 } 1200 case Op_Phi: { 1201 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1202 // ThreadLocal has RawPtr type. 1203 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1204 for (uint i = 1; i < n->req(); i++) { 1205 Node* in = n->in(i); 1206 if (in == nullptr) { 1207 continue; // ignore null 1208 } 1209 Node* uncast_in = in->uncast(); 1210 if (uncast_in->is_top() || uncast_in == n) { 1211 continue; // ignore top or inputs which go back this node 1212 } 1213 PointsToNode* ptn = ptnode_adr(in->_idx); 1214 assert(ptn != nullptr, "node should be registered"); 1215 add_edge(n_ptn, ptn); 1216 } 1217 break; 1218 } 1219 case Op_Proj: { 1220 // we are only interested in the oop result projection from a call 1221 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1222 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1223 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1224 break; 1225 } 1226 case Op_Rethrow: // Exception object escapes 1227 case Op_Return: { 1228 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1229 "Unexpected node type"); 1230 // Treat Return value as LocalVar with GlobalEscape escape state. 1231 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1232 break; 1233 } 1234 case Op_CompareAndExchangeP: 1235 case Op_CompareAndExchangeN: 1236 case Op_GetAndSetP: 1237 case Op_GetAndSetN:{ 1238 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1239 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1240 // fall-through 1241 } 1242 case Op_CompareAndSwapP: 1243 case Op_CompareAndSwapN: 1244 case Op_WeakCompareAndSwapP: 1245 case Op_WeakCompareAndSwapN: 1246 case Op_StoreP: 1247 case Op_StoreN: 1248 case Op_StoreNKlass:{ 1249 add_final_edges_unsafe_access(n, opcode); 1250 break; 1251 } 1252 case Op_VectorizedHashCode: 1253 case Op_AryEq: 1254 case Op_CountPositives: 1255 case Op_StrComp: 1256 case Op_StrEquals: 1257 case Op_StrIndexOf: 1258 case Op_StrIndexOfChar: 1259 case Op_StrInflatedCopy: 1260 case Op_StrCompressedCopy: 1261 case Op_EncodeISOArray: { 1262 // char[]/byte[] arrays passed to string intrinsic do not escape but 1263 // they are not scalar replaceable. Adjust escape state for them. 1264 // Start from in(2) edge since in(1) is memory edge. 1265 for (uint i = 2; i < n->req(); i++) { 1266 Node* adr = n->in(i); 1267 const Type* at = _igvn->type(adr); 1268 if (!adr->is_top() && at->isa_ptr()) { 1269 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1270 at->isa_ptr() != nullptr, "expecting a pointer"); 1271 if (adr->is_AddP()) { 1272 adr = get_addp_base(adr); 1273 } 1274 PointsToNode* ptn = ptnode_adr(adr->_idx); 1275 assert(ptn != nullptr, "node should be registered"); 1276 add_edge(n_ptn, ptn); 1277 } 1278 } 1279 break; 1280 } 1281 case Op_Blackhole: { 1282 // All blackhole pointer arguments are globally escaping. 1283 for (uint i = 0; i < n->req(); i++) { 1284 Node* in = n->in(i); 1285 if (in != nullptr) { 1286 const Type* at = _igvn->type(in); 1287 if (!at->isa_ptr()) continue; 1288 1289 if (in->is_AddP()) { 1290 in = get_addp_base(in); 1291 } 1292 1293 PointsToNode* ptn = ptnode_adr(in->_idx); 1294 assert(ptn != nullptr, "should be defined already"); 1295 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1296 add_edge(n_ptn, ptn); 1297 } 1298 } 1299 break; 1300 } 1301 default: { 1302 // This method should be called only for EA specific nodes which may 1303 // miss some edges when they were created. 1304 #ifdef ASSERT 1305 n->dump(1); 1306 #endif 1307 guarantee(false, "unknown node"); 1308 } 1309 } 1310 return; 1311 } 1312 1313 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1314 Node* adr = n->in(MemNode::Address); 1315 const Type* adr_type = _igvn->type(adr); 1316 adr_type = adr_type->make_ptr(); 1317 if (adr_type == nullptr) { 1318 return; // skip dead nodes 1319 } 1320 if (adr_type->isa_oopptr() 1321 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1322 && adr_type == TypeRawPtr::NOTNULL 1323 && is_captured_store_address(adr))) { 1324 delayed_worklist->push(n); // Process it later. 1325 #ifdef ASSERT 1326 assert (adr->is_AddP(), "expecting an AddP"); 1327 if (adr_type == TypeRawPtr::NOTNULL) { 1328 // Verify a raw address for a store captured by Initialize node. 1329 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1330 assert(offs != Type::OffsetBot, "offset must be a constant"); 1331 } 1332 #endif 1333 } else { 1334 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1335 if (adr->is_BoxLock()) { 1336 return; 1337 } 1338 // Stored value escapes in unsafe access. 1339 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1340 delayed_worklist->push(n); // Process unsafe access later. 1341 return; 1342 } 1343 #ifdef ASSERT 1344 n->dump(1); 1345 assert(false, "not unsafe"); 1346 #endif 1347 } 1348 } 1349 1350 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1351 Node* adr = n->in(MemNode::Address); 1352 const Type *adr_type = _igvn->type(adr); 1353 adr_type = adr_type->make_ptr(); 1354 #ifdef ASSERT 1355 if (adr_type == nullptr) { 1356 n->dump(1); 1357 assert(adr_type != nullptr, "dead node should not be on list"); 1358 return true; 1359 } 1360 #endif 1361 1362 if (adr_type->isa_oopptr() 1363 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1364 && adr_type == TypeRawPtr::NOTNULL 1365 && is_captured_store_address(adr))) { 1366 // Point Address to Value 1367 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1368 assert(adr_ptn != nullptr && 1369 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1370 Node* val = n->in(MemNode::ValueIn); 1371 PointsToNode* ptn = ptnode_adr(val->_idx); 1372 assert(ptn != nullptr, "node should be registered"); 1373 add_edge(adr_ptn, ptn); 1374 return true; 1375 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1376 // Stored value escapes in unsafe access. 1377 Node* val = n->in(MemNode::ValueIn); 1378 PointsToNode* ptn = ptnode_adr(val->_idx); 1379 assert(ptn != nullptr, "node should be registered"); 1380 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1381 // Add edge to object for unsafe access with offset. 1382 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1383 assert(adr_ptn != nullptr, "node should be registered"); 1384 if (adr_ptn->is_Field()) { 1385 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1386 add_edge(adr_ptn, ptn); 1387 } 1388 return true; 1389 } 1390 #ifdef ASSERT 1391 n->dump(1); 1392 assert(false, "not unsafe"); 1393 #endif 1394 return false; 1395 } 1396 1397 void ConnectionGraph::add_call_node(CallNode* call) { 1398 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer"); 1399 uint call_idx = call->_idx; 1400 if (call->is_Allocate()) { 1401 Node* k = call->in(AllocateNode::KlassNode); 1402 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1403 assert(kt != nullptr, "TypeKlassPtr required."); 1404 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1405 bool scalar_replaceable = true; 1406 NOT_PRODUCT(const char* nsr_reason = ""); 1407 if (call->is_AllocateArray()) { 1408 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1409 es = PointsToNode::GlobalEscape; 1410 } else { 1411 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1412 if (length < 0) { 1413 // Not scalar replaceable if the length is not constant. 1414 scalar_replaceable = false; 1415 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1416 } else if (length > EliminateAllocationArraySizeLimit) { 1417 // Not scalar replaceable if the length is too big. 1418 scalar_replaceable = false; 1419 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1420 } 1421 } 1422 } else { // Allocate instance 1423 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1424 es = PointsToNode::GlobalEscape; 1425 } else { 1426 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1427 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 1428 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 1429 ik->is_subclass_of(_compile->env()->Reference_klass()) || 1430 !ik->can_be_instantiated() || 1431 ik->has_finalizer()) { 1432 es = PointsToNode::GlobalEscape; 1433 } else { 1434 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 1435 if (nfields > EliminateAllocationFieldsLimit) { 1436 // Not scalar replaceable if there are too many fields. 1437 scalar_replaceable = false; 1438 NOT_PRODUCT(nsr_reason = "has too many fields"); 1439 } 1440 } 1441 } 1442 } 1443 add_java_object(call, es); 1444 PointsToNode* ptn = ptnode_adr(call_idx); 1445 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1446 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1447 } 1448 } else if (call->is_CallStaticJava()) { 1449 // Call nodes could be different types: 1450 // 1451 // 1. CallDynamicJavaNode (what happened during call is unknown): 1452 // 1453 // - mapped to GlobalEscape JavaObject node if oop is returned; 1454 // 1455 // - all oop arguments are escaping globally; 1456 // 1457 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1458 // 1459 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1460 // 1461 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1462 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1463 // during call is returned; 1464 // - mapped to ArgEscape LocalVar node pointed to object arguments 1465 // which are returned and does not escape during call; 1466 // 1467 // - oop arguments escaping status is defined by bytecode analysis; 1468 // 1469 // For a static call, we know exactly what method is being called. 1470 // Use bytecode estimator to record whether the call's return value escapes. 1471 ciMethod* meth = call->as_CallJava()->method(); 1472 if (meth == nullptr) { 1473 const char* name = call->as_CallStaticJava()->_name; 1474 assert(strncmp(name, "_multianewarray", 15) == 0 || 1475 strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check"); 1476 // Returns a newly allocated non-escaped object. 1477 add_java_object(call, PointsToNode::NoEscape); 1478 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1479 } else if (meth->is_boxing_method()) { 1480 // Returns boxing object 1481 PointsToNode::EscapeState es; 1482 vmIntrinsics::ID intr = meth->intrinsic_id(); 1483 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1484 // It does not escape if object is always allocated. 1485 es = PointsToNode::NoEscape; 1486 } else { 1487 // It escapes globally if object could be loaded from cache. 1488 es = PointsToNode::GlobalEscape; 1489 } 1490 add_java_object(call, es); 1491 if (es == PointsToNode::GlobalEscape) { 1492 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 1493 } 1494 } else { 1495 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1496 call_analyzer->copy_dependencies(_compile->dependencies()); 1497 if (call_analyzer->is_return_allocated()) { 1498 // Returns a newly allocated non-escaped object, simply 1499 // update dependency information. 1500 // Mark it as NoEscape so that objects referenced by 1501 // it's fields will be marked as NoEscape at least. 1502 add_java_object(call, PointsToNode::NoEscape); 1503 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1504 } else { 1505 // Determine whether any arguments are returned. 1506 const TypeTuple* d = call->tf()->domain_cc(); 1507 bool ret_arg = false; 1508 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1509 if (d->field_at(i)->isa_ptr() != nullptr && 1510 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1511 ret_arg = true; 1512 break; 1513 } 1514 } 1515 if (ret_arg) { 1516 add_local_var(call, PointsToNode::ArgEscape); 1517 } else { 1518 // Returns unknown object. 1519 map_ideal_node(call, phantom_obj); 1520 } 1521 } 1522 } 1523 } else { 1524 // An other type of call, assume the worst case: 1525 // returned value is unknown and globally escapes. 1526 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1527 map_ideal_node(call, phantom_obj); 1528 } 1529 } 1530 1531 void ConnectionGraph::process_call_arguments(CallNode *call) { 1532 bool is_arraycopy = false; 1533 switch (call->Opcode()) { 1534 #ifdef ASSERT 1535 case Op_Allocate: 1536 case Op_AllocateArray: 1537 case Op_Lock: 1538 case Op_Unlock: 1539 assert(false, "should be done already"); 1540 break; 1541 #endif 1542 case Op_ArrayCopy: 1543 case Op_CallLeafNoFP: 1544 // Most array copies are ArrayCopy nodes at this point but there 1545 // are still a few direct calls to the copy subroutines (See 1546 // PhaseStringOpts::copy_string()) 1547 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1548 call->as_CallLeaf()->is_call_to_arraycopystub(); 1549 // fall through 1550 case Op_CallLeafVector: 1551 case Op_CallLeaf: { 1552 // Stub calls, objects do not escape but they are not scale replaceable. 1553 // Adjust escape state for outgoing arguments. 1554 const TypeTuple * d = call->tf()->domain_sig(); 1555 bool src_has_oops = false; 1556 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1557 const Type* at = d->field_at(i); 1558 Node *arg = call->in(i); 1559 if (arg == nullptr) { 1560 continue; 1561 } 1562 const Type *aat = _igvn->type(arg); 1563 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1564 continue; 1565 } 1566 if (arg->is_AddP()) { 1567 // 1568 // The inline_native_clone() case when the arraycopy stub is called 1569 // after the allocation before Initialize and CheckCastPP nodes. 1570 // Or normal arraycopy for object arrays case. 1571 // 1572 // Set AddP's base (Allocate) as not scalar replaceable since 1573 // pointer to the base (with offset) is passed as argument. 1574 // 1575 arg = get_addp_base(arg); 1576 } 1577 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1578 assert(arg_ptn != nullptr, "should be registered"); 1579 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1580 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1581 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1582 aat->isa_ptr() != nullptr, "expecting an Ptr"); 1583 bool arg_has_oops = aat->isa_oopptr() && 1584 (aat->isa_instptr() || 1585 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) || 1586 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr && 1587 aat->isa_aryptr()->is_flat() && 1588 aat->isa_aryptr()->elem()->inline_klass()->contains_oops())); 1589 if (i == TypeFunc::Parms) { 1590 src_has_oops = arg_has_oops; 1591 } 1592 // 1593 // src or dst could be j.l.Object when other is basic type array: 1594 // 1595 // arraycopy(char[],0,Object*,0,size); 1596 // arraycopy(Object*,0,char[],0,size); 1597 // 1598 // Don't add edges in such cases. 1599 // 1600 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1601 arg_has_oops && (i > TypeFunc::Parms); 1602 #ifdef ASSERT 1603 if (!(is_arraycopy || 1604 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1605 (call->as_CallLeaf()->_name != nullptr && 1606 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1607 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1608 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1609 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1610 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1611 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1612 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1613 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1614 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1615 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1616 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1617 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1618 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1619 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1620 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1621 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1622 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1623 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1624 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1625 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1626 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1627 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1628 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1629 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1630 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1631 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1632 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1633 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1634 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1635 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1636 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1637 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1638 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 || 1639 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 || 1640 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1641 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1642 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1643 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 1644 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 1645 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1646 ))) { 1647 call->dump(); 1648 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1649 } 1650 #endif 1651 // Always process arraycopy's destination object since 1652 // we need to add all possible edges to references in 1653 // source object. 1654 if (arg_esc >= PointsToNode::ArgEscape && 1655 !arg_is_arraycopy_dest) { 1656 continue; 1657 } 1658 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1659 if (call->is_ArrayCopy()) { 1660 ArrayCopyNode* ac = call->as_ArrayCopy(); 1661 if (ac->is_clonebasic() || 1662 ac->is_arraycopy_validated() || 1663 ac->is_copyof_validated() || 1664 ac->is_copyofrange_validated()) { 1665 es = PointsToNode::NoEscape; 1666 } 1667 } 1668 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1669 if (arg_is_arraycopy_dest) { 1670 Node* src = call->in(TypeFunc::Parms); 1671 if (src->is_AddP()) { 1672 src = get_addp_base(src); 1673 } 1674 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1675 assert(src_ptn != nullptr, "should be registered"); 1676 if (arg_ptn != src_ptn) { 1677 // Special arraycopy edge: 1678 // A destination object's field can't have the source object 1679 // as base since objects escape states are not related. 1680 // Only escape state of destination object's fields affects 1681 // escape state of fields in source object. 1682 add_arraycopy(call, es, src_ptn, arg_ptn); 1683 } 1684 } 1685 } 1686 } 1687 break; 1688 } 1689 case Op_CallStaticJava: { 1690 // For a static call, we know exactly what method is being called. 1691 // Use bytecode estimator to record the call's escape affects 1692 #ifdef ASSERT 1693 const char* name = call->as_CallStaticJava()->_name; 1694 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1695 #endif 1696 ciMethod* meth = call->as_CallJava()->method(); 1697 if ((meth != nullptr) && meth->is_boxing_method()) { 1698 break; // Boxing methods do not modify any oops. 1699 } 1700 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 1701 // fall-through if not a Java method or no analyzer information 1702 if (call_analyzer != nullptr) { 1703 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1704 const TypeTuple* d = call->tf()->domain_cc(); 1705 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1706 const Type* at = d->field_at(i); 1707 int k = i - TypeFunc::Parms; 1708 Node* arg = call->in(i); 1709 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1710 if (at->isa_ptr() != nullptr && 1711 call_analyzer->is_arg_returned(k)) { 1712 // The call returns arguments. 1713 if (call_ptn != nullptr) { // Is call's result used? 1714 assert(call_ptn->is_LocalVar(), "node should be registered"); 1715 assert(arg_ptn != nullptr, "node should be registered"); 1716 add_edge(call_ptn, arg_ptn); 1717 } 1718 } 1719 if (at->isa_oopptr() != nullptr && 1720 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1721 if (!call_analyzer->is_arg_stack(k)) { 1722 // The argument global escapes 1723 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1724 } else { 1725 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1726 if (!call_analyzer->is_arg_local(k)) { 1727 // The argument itself doesn't escape, but any fields might 1728 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1729 } 1730 } 1731 } 1732 } 1733 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 1734 // The call returns arguments. 1735 assert(call_ptn->edge_count() > 0, "sanity"); 1736 if (!call_analyzer->is_return_local()) { 1737 // Returns also unknown object. 1738 add_edge(call_ptn, phantom_obj); 1739 } 1740 } 1741 break; 1742 } 1743 } 1744 default: { 1745 // Fall-through here if not a Java method or no analyzer information 1746 // or some other type of call, assume the worst case: all arguments 1747 // globally escape. 1748 const TypeTuple* d = call->tf()->domain_cc(); 1749 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1750 const Type* at = d->field_at(i); 1751 if (at->isa_oopptr() != nullptr) { 1752 Node* arg = call->in(i); 1753 if (arg->is_AddP()) { 1754 arg = get_addp_base(arg); 1755 } 1756 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 1757 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1758 } 1759 } 1760 } 1761 } 1762 } 1763 1764 1765 // Finish Graph construction. 1766 bool ConnectionGraph::complete_connection_graph( 1767 GrowableArray<PointsToNode*>& ptnodes_worklist, 1768 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1769 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1770 GrowableArray<FieldNode*>& oop_fields_worklist) { 1771 // Normally only 1-3 passes needed to build Connection Graph depending 1772 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1773 // Set limit to 20 to catch situation when something did go wrong and 1774 // bailout Escape Analysis. 1775 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1776 #define GRAPH_BUILD_ITER_LIMIT 20 1777 1778 // Propagate GlobalEscape and ArgEscape escape states and check that 1779 // we still have non-escaping objects. The method pushs on _worklist 1780 // Field nodes which reference phantom_object. 1781 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1782 return false; // Nothing to do. 1783 } 1784 // Now propagate references to all JavaObject nodes. 1785 int java_objects_length = java_objects_worklist.length(); 1786 elapsedTimer build_time; 1787 build_time.start(); 1788 elapsedTimer time; 1789 bool timeout = false; 1790 int new_edges = 1; 1791 int iterations = 0; 1792 do { 1793 while ((new_edges > 0) && 1794 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1795 double start_time = time.seconds(); 1796 time.start(); 1797 new_edges = 0; 1798 // Propagate references to phantom_object for nodes pushed on _worklist 1799 // by find_non_escaped_objects() and find_field_value(). 1800 new_edges += add_java_object_edges(phantom_obj, false); 1801 for (int next = 0; next < java_objects_length; ++next) { 1802 JavaObjectNode* ptn = java_objects_worklist.at(next); 1803 new_edges += add_java_object_edges(ptn, true); 1804 1805 #define SAMPLE_SIZE 4 1806 if ((next % SAMPLE_SIZE) == 0) { 1807 // Each 4 iterations calculate how much time it will take 1808 // to complete graph construction. 1809 time.stop(); 1810 // Poll for requests from shutdown mechanism to quiesce compiler 1811 // because Connection graph construction may take long time. 1812 CompileBroker::maybe_block(); 1813 double stop_time = time.seconds(); 1814 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1815 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1816 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1817 timeout = true; 1818 break; // Timeout 1819 } 1820 start_time = stop_time; 1821 time.start(); 1822 } 1823 #undef SAMPLE_SIZE 1824 1825 } 1826 if (timeout) break; 1827 if (new_edges > 0) { 1828 // Update escape states on each iteration if graph was updated. 1829 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1830 return false; // Nothing to do. 1831 } 1832 } 1833 time.stop(); 1834 if (time.seconds() >= EscapeAnalysisTimeout) { 1835 timeout = true; 1836 break; 1837 } 1838 } 1839 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1840 time.start(); 1841 // Find fields which have unknown value. 1842 int fields_length = oop_fields_worklist.length(); 1843 for (int next = 0; next < fields_length; next++) { 1844 FieldNode* field = oop_fields_worklist.at(next); 1845 if (field->edge_count() == 0) { 1846 new_edges += find_field_value(field); 1847 // This code may added new edges to phantom_object. 1848 // Need an other cycle to propagate references to phantom_object. 1849 } 1850 } 1851 time.stop(); 1852 if (time.seconds() >= EscapeAnalysisTimeout) { 1853 timeout = true; 1854 break; 1855 } 1856 } else { 1857 new_edges = 0; // Bailout 1858 } 1859 } while (new_edges > 0); 1860 1861 build_time.stop(); 1862 _build_time = build_time.seconds(); 1863 _build_iterations = iterations; 1864 1865 // Bailout if passed limits. 1866 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1867 Compile* C = _compile; 1868 if (C->log() != nullptr) { 1869 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1870 C->log()->text("%s", timeout ? "time" : "iterations"); 1871 C->log()->end_elem(" limit'"); 1872 } 1873 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1874 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1875 // Possible infinite build_connection_graph loop, 1876 // bailout (no changes to ideal graph were made). 1877 return false; 1878 } 1879 1880 #undef GRAPH_BUILD_ITER_LIMIT 1881 1882 // Find fields initialized by null for non-escaping Allocations. 1883 int non_escaped_length = non_escaped_allocs_worklist.length(); 1884 for (int next = 0; next < non_escaped_length; next++) { 1885 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1886 PointsToNode::EscapeState es = ptn->escape_state(); 1887 assert(es <= PointsToNode::ArgEscape, "sanity"); 1888 if (es == PointsToNode::NoEscape) { 1889 if (find_init_values_null(ptn, _igvn) > 0) { 1890 // Adding references to null object does not change escape states 1891 // since it does not escape. Also no fields are added to null object. 1892 add_java_object_edges(null_obj, false); 1893 } 1894 } 1895 Node* n = ptn->ideal_node(); 1896 if (n->is_Allocate()) { 1897 // The object allocated by this Allocate node will never be 1898 // seen by an other thread. Mark it so that when it is 1899 // expanded no MemBarStoreStore is added. 1900 InitializeNode* ini = n->as_Allocate()->initialization(); 1901 if (ini != nullptr) 1902 ini->set_does_not_escape(); 1903 } 1904 } 1905 return true; // Finished graph construction. 1906 } 1907 1908 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1909 // and check that we still have non-escaping java objects. 1910 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1911 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1912 GrowableArray<PointsToNode*> escape_worklist; 1913 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1914 int ptnodes_length = ptnodes_worklist.length(); 1915 for (int next = 0; next < ptnodes_length; ++next) { 1916 PointsToNode* ptn = ptnodes_worklist.at(next); 1917 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1918 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1919 escape_worklist.push(ptn); 1920 } 1921 } 1922 // Set escape states to referenced nodes (edges list). 1923 while (escape_worklist.length() > 0) { 1924 PointsToNode* ptn = escape_worklist.pop(); 1925 PointsToNode::EscapeState es = ptn->escape_state(); 1926 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1927 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1928 es >= PointsToNode::ArgEscape) { 1929 // GlobalEscape or ArgEscape state of field means it has unknown value. 1930 if (add_edge(ptn, phantom_obj)) { 1931 // New edge was added 1932 add_field_uses_to_worklist(ptn->as_Field()); 1933 } 1934 } 1935 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1936 PointsToNode* e = i.get(); 1937 if (e->is_Arraycopy()) { 1938 assert(ptn->arraycopy_dst(), "sanity"); 1939 // Propagate only fields escape state through arraycopy edge. 1940 if (e->fields_escape_state() < field_es) { 1941 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1942 escape_worklist.push(e); 1943 } 1944 } else if (es >= field_es) { 1945 // fields_escape_state is also set to 'es' if it is less than 'es'. 1946 if (e->escape_state() < es) { 1947 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1948 escape_worklist.push(e); 1949 } 1950 } else { 1951 // Propagate field escape state. 1952 bool es_changed = false; 1953 if (e->fields_escape_state() < field_es) { 1954 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1955 es_changed = true; 1956 } 1957 if ((e->escape_state() < field_es) && 1958 e->is_Field() && ptn->is_JavaObject() && 1959 e->as_Field()->is_oop()) { 1960 // Change escape state of referenced fields. 1961 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1962 es_changed = true; 1963 } else if (e->escape_state() < es) { 1964 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1965 es_changed = true; 1966 } 1967 if (es_changed) { 1968 escape_worklist.push(e); 1969 } 1970 } 1971 } 1972 } 1973 // Remove escaped objects from non_escaped list. 1974 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1975 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1976 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1977 non_escaped_allocs_worklist.delete_at(next); 1978 } 1979 if (ptn->escape_state() == PointsToNode::NoEscape) { 1980 // Find fields in non-escaped allocations which have unknown value. 1981 find_init_values_phantom(ptn); 1982 } 1983 } 1984 return (non_escaped_allocs_worklist.length() > 0); 1985 } 1986 1987 // Add all references to JavaObject node by walking over all uses. 1988 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1989 int new_edges = 0; 1990 if (populate_worklist) { 1991 // Populate _worklist by uses of jobj's uses. 1992 for (UseIterator i(jobj); i.has_next(); i.next()) { 1993 PointsToNode* use = i.get(); 1994 if (use->is_Arraycopy()) { 1995 continue; 1996 } 1997 add_uses_to_worklist(use); 1998 if (use->is_Field() && use->as_Field()->is_oop()) { 1999 // Put on worklist all field's uses (loads) and 2000 // related field nodes (same base and offset). 2001 add_field_uses_to_worklist(use->as_Field()); 2002 } 2003 } 2004 } 2005 for (int l = 0; l < _worklist.length(); l++) { 2006 PointsToNode* use = _worklist.at(l); 2007 if (PointsToNode::is_base_use(use)) { 2008 // Add reference from jobj to field and from field to jobj (field's base). 2009 use = PointsToNode::get_use_node(use)->as_Field(); 2010 if (add_base(use->as_Field(), jobj)) { 2011 new_edges++; 2012 } 2013 continue; 2014 } 2015 assert(!use->is_JavaObject(), "sanity"); 2016 if (use->is_Arraycopy()) { 2017 if (jobj == null_obj) { // null object does not have field edges 2018 continue; 2019 } 2020 // Added edge from Arraycopy node to arraycopy's source java object 2021 if (add_edge(use, jobj)) { 2022 jobj->set_arraycopy_src(); 2023 new_edges++; 2024 } 2025 // and stop here. 2026 continue; 2027 } 2028 if (!add_edge(use, jobj)) { 2029 continue; // No new edge added, there was such edge already. 2030 } 2031 new_edges++; 2032 if (use->is_LocalVar()) { 2033 add_uses_to_worklist(use); 2034 if (use->arraycopy_dst()) { 2035 for (EdgeIterator i(use); i.has_next(); i.next()) { 2036 PointsToNode* e = i.get(); 2037 if (e->is_Arraycopy()) { 2038 if (jobj == null_obj) { // null object does not have field edges 2039 continue; 2040 } 2041 // Add edge from arraycopy's destination java object to Arraycopy node. 2042 if (add_edge(jobj, e)) { 2043 new_edges++; 2044 jobj->set_arraycopy_dst(); 2045 } 2046 } 2047 } 2048 } 2049 } else { 2050 // Added new edge to stored in field values. 2051 // Put on worklist all field's uses (loads) and 2052 // related field nodes (same base and offset). 2053 add_field_uses_to_worklist(use->as_Field()); 2054 } 2055 } 2056 _worklist.clear(); 2057 _in_worklist.reset(); 2058 return new_edges; 2059 } 2060 2061 // Put on worklist all related field nodes. 2062 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2063 assert(field->is_oop(), "sanity"); 2064 int offset = field->offset(); 2065 add_uses_to_worklist(field); 2066 // Loop over all bases of this field and push on worklist Field nodes 2067 // with the same offset and base (since they may reference the same field). 2068 for (BaseIterator i(field); i.has_next(); i.next()) { 2069 PointsToNode* base = i.get(); 2070 add_fields_to_worklist(field, base); 2071 // Check if the base was source object of arraycopy and go over arraycopy's 2072 // destination objects since values stored to a field of source object are 2073 // accessible by uses (loads) of fields of destination objects. 2074 if (base->arraycopy_src()) { 2075 for (UseIterator j(base); j.has_next(); j.next()) { 2076 PointsToNode* arycp = j.get(); 2077 if (arycp->is_Arraycopy()) { 2078 for (UseIterator k(arycp); k.has_next(); k.next()) { 2079 PointsToNode* abase = k.get(); 2080 if (abase->arraycopy_dst() && abase != base) { 2081 // Look for the same arraycopy reference. 2082 add_fields_to_worklist(field, abase); 2083 } 2084 } 2085 } 2086 } 2087 } 2088 } 2089 } 2090 2091 // Put on worklist all related field nodes. 2092 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2093 int offset = field->offset(); 2094 if (base->is_LocalVar()) { 2095 for (UseIterator j(base); j.has_next(); j.next()) { 2096 PointsToNode* f = j.get(); 2097 if (PointsToNode::is_base_use(f)) { // Field 2098 f = PointsToNode::get_use_node(f); 2099 if (f == field || !f->as_Field()->is_oop()) { 2100 continue; 2101 } 2102 int offs = f->as_Field()->offset(); 2103 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2104 add_to_worklist(f); 2105 } 2106 } 2107 } 2108 } else { 2109 assert(base->is_JavaObject(), "sanity"); 2110 if (// Skip phantom_object since it is only used to indicate that 2111 // this field's content globally escapes. 2112 (base != phantom_obj) && 2113 // null object node does not have fields. 2114 (base != null_obj)) { 2115 for (EdgeIterator i(base); i.has_next(); i.next()) { 2116 PointsToNode* f = i.get(); 2117 // Skip arraycopy edge since store to destination object field 2118 // does not update value in source object field. 2119 if (f->is_Arraycopy()) { 2120 assert(base->arraycopy_dst(), "sanity"); 2121 continue; 2122 } 2123 if (f == field || !f->as_Field()->is_oop()) { 2124 continue; 2125 } 2126 int offs = f->as_Field()->offset(); 2127 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2128 add_to_worklist(f); 2129 } 2130 } 2131 } 2132 } 2133 } 2134 2135 // Find fields which have unknown value. 2136 int ConnectionGraph::find_field_value(FieldNode* field) { 2137 // Escaped fields should have init value already. 2138 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2139 int new_edges = 0; 2140 for (BaseIterator i(field); i.has_next(); i.next()) { 2141 PointsToNode* base = i.get(); 2142 if (base->is_JavaObject()) { 2143 // Skip Allocate's fields which will be processed later. 2144 if (base->ideal_node()->is_Allocate()) { 2145 return 0; 2146 } 2147 assert(base == null_obj, "only null ptr base expected here"); 2148 } 2149 } 2150 if (add_edge(field, phantom_obj)) { 2151 // New edge was added 2152 new_edges++; 2153 add_field_uses_to_worklist(field); 2154 } 2155 return new_edges; 2156 } 2157 2158 // Find fields initializing values for allocations. 2159 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2160 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2161 PointsToNode* init_val = phantom_obj; 2162 Node* alloc = pta->ideal_node(); 2163 2164 // Do nothing for Allocate nodes since its fields values are 2165 // "known" unless they are initialized by arraycopy/clone. 2166 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2167 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2168 // Non-flat inline type arrays are initialized with 2169 // the default value instead of null. Handle them here. 2170 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx); 2171 assert(init_val != nullptr, "default value should be registered"); 2172 } else { 2173 return 0; 2174 } 2175 } 2176 // Non-escaped allocation returned from Java or runtime call has unknown values in fields. 2177 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity"); 2178 #ifdef ASSERT 2179 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) { 2180 const char* name = alloc->as_CallStaticJava()->_name; 2181 assert(strncmp(name, "_multianewarray", 15) == 0 || 2182 strncmp(name, "_load_unknown_inline", 20) == 0, "sanity"); 2183 } 2184 #endif 2185 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2186 int new_edges = 0; 2187 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2188 PointsToNode* field = i.get(); 2189 if (field->is_Field() && field->as_Field()->is_oop()) { 2190 if (add_edge(field, init_val)) { 2191 // New edge was added 2192 new_edges++; 2193 add_field_uses_to_worklist(field->as_Field()); 2194 } 2195 } 2196 } 2197 return new_edges; 2198 } 2199 2200 // Find fields initializing values for allocations. 2201 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2202 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2203 Node* alloc = pta->ideal_node(); 2204 // Do nothing for Call nodes since its fields values are unknown. 2205 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2206 return 0; 2207 } 2208 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2209 bool visited_bottom_offset = false; 2210 GrowableArray<int> offsets_worklist; 2211 int new_edges = 0; 2212 2213 // Check if an oop field's initializing value is recorded and add 2214 // a corresponding null if field's value if it is not recorded. 2215 // Connection Graph does not record a default initialization by null 2216 // captured by Initialize node. 2217 // 2218 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2219 PointsToNode* field = i.get(); // Field (AddP) 2220 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2221 continue; // Not oop field 2222 } 2223 int offset = field->as_Field()->offset(); 2224 if (offset == Type::OffsetBot) { 2225 if (!visited_bottom_offset) { 2226 // OffsetBot is used to reference array's element, 2227 // always add reference to null to all Field nodes since we don't 2228 // known which element is referenced. 2229 if (add_edge(field, null_obj)) { 2230 // New edge was added 2231 new_edges++; 2232 add_field_uses_to_worklist(field->as_Field()); 2233 visited_bottom_offset = true; 2234 } 2235 } 2236 } else { 2237 // Check only oop fields. 2238 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2239 if (adr_type->isa_rawptr()) { 2240 #ifdef ASSERT 2241 // Raw pointers are used for initializing stores so skip it 2242 // since it should be recorded already 2243 Node* base = get_addp_base(field->ideal_node()); 2244 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2245 #endif 2246 continue; 2247 } 2248 if (!offsets_worklist.contains(offset)) { 2249 offsets_worklist.append(offset); 2250 Node* value = nullptr; 2251 if (ini != nullptr) { 2252 // StoreP::memory_type() == T_ADDRESS 2253 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2254 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2255 // Make sure initializing store has the same type as this AddP. 2256 // This AddP may reference non existing field because it is on a 2257 // dead branch of bimorphic call which is not eliminated yet. 2258 if (store != nullptr && store->is_Store() && 2259 store->as_Store()->memory_type() == ft) { 2260 value = store->in(MemNode::ValueIn); 2261 #ifdef ASSERT 2262 if (VerifyConnectionGraph) { 2263 // Verify that AddP already points to all objects the value points to. 2264 PointsToNode* val = ptnode_adr(value->_idx); 2265 assert((val != nullptr), "should be processed already"); 2266 PointsToNode* missed_obj = nullptr; 2267 if (val->is_JavaObject()) { 2268 if (!field->points_to(val->as_JavaObject())) { 2269 missed_obj = val; 2270 } 2271 } else { 2272 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2273 tty->print_cr("----------init store has invalid value -----"); 2274 store->dump(); 2275 val->dump(); 2276 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2277 } 2278 for (EdgeIterator j(val); j.has_next(); j.next()) { 2279 PointsToNode* obj = j.get(); 2280 if (obj->is_JavaObject()) { 2281 if (!field->points_to(obj->as_JavaObject())) { 2282 missed_obj = obj; 2283 break; 2284 } 2285 } 2286 } 2287 } 2288 if (missed_obj != nullptr) { 2289 tty->print_cr("----------field---------------------------------"); 2290 field->dump(); 2291 tty->print_cr("----------missed reference to object------------"); 2292 missed_obj->dump(); 2293 tty->print_cr("----------object referenced by init store-------"); 2294 store->dump(); 2295 val->dump(); 2296 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2297 } 2298 } 2299 #endif 2300 } else { 2301 // There could be initializing stores which follow allocation. 2302 // For example, a volatile field store is not collected 2303 // by Initialize node. 2304 // 2305 // Need to check for dependent loads to separate such stores from 2306 // stores which follow loads. For now, add initial value null so 2307 // that compare pointers optimization works correctly. 2308 } 2309 } 2310 if (value == nullptr) { 2311 // A field's initializing value was not recorded. Add null. 2312 if (add_edge(field, null_obj)) { 2313 // New edge was added 2314 new_edges++; 2315 add_field_uses_to_worklist(field->as_Field()); 2316 } 2317 } 2318 } 2319 } 2320 } 2321 return new_edges; 2322 } 2323 2324 // Adjust scalar_replaceable state after Connection Graph is built. 2325 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2326 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2327 // returns true. If one of the constraints in this method set 'jobj' to NSR 2328 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2329 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2330 // that other object and the Phi will become a reducible Phi. 2331 // There could be multiple merges involving the same jobj. 2332 Unique_Node_List candidates; 2333 2334 // Search for non-escaping objects which are not scalar replaceable 2335 // and mark them to propagate the state to referenced objects. 2336 2337 for (UseIterator i(jobj); i.has_next(); i.next()) { 2338 PointsToNode* use = i.get(); 2339 if (use->is_Arraycopy()) { 2340 continue; 2341 } 2342 if (use->is_Field()) { 2343 FieldNode* field = use->as_Field(); 2344 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2345 // 1. An object is not scalar replaceable if the field into which it is 2346 // stored has unknown offset (stored into unknown element of an array). 2347 if (field->offset() == Type::OffsetBot) { 2348 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2349 return; 2350 } 2351 for (BaseIterator i(field); i.has_next(); i.next()) { 2352 PointsToNode* base = i.get(); 2353 // 2. An object is not scalar replaceable if the field into which it is 2354 // stored has multiple bases one of which is null. 2355 if ((base == null_obj) && (field->base_count() > 1)) { 2356 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2357 return; 2358 } 2359 // 2.5. An object is not scalar replaceable if the field into which it is 2360 // stored has NSR base. 2361 if (!base->scalar_replaceable()) { 2362 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2363 return; 2364 } 2365 } 2366 } 2367 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2368 // 3. An object is not scalar replaceable if it is merged with other objects 2369 // and we can't remove the merge 2370 for (EdgeIterator j(use); j.has_next(); j.next()) { 2371 PointsToNode* ptn = j.get(); 2372 if (ptn->is_JavaObject() && ptn != jobj) { 2373 Node* use_n = use->ideal_node(); 2374 2375 // If it's already a candidate or confirmed reducible merge we can skip verification 2376 if (candidates.member(use_n)) { 2377 continue; 2378 } else if (reducible_merges.member(use_n)) { 2379 candidates.push(use_n); 2380 continue; 2381 } 2382 2383 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2384 candidates.push(use_n); 2385 } else { 2386 // Mark all objects as NSR if we can't remove the merge 2387 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2388 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2389 } 2390 } 2391 } 2392 if (!jobj->scalar_replaceable()) { 2393 return; 2394 } 2395 } 2396 2397 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2398 if (j.get()->is_Arraycopy()) { 2399 continue; 2400 } 2401 2402 // Non-escaping object node should point only to field nodes. 2403 FieldNode* field = j.get()->as_Field(); 2404 int offset = field->as_Field()->offset(); 2405 2406 // 4. An object is not scalar replaceable if it has a field with unknown 2407 // offset (array's element is accessed in loop). 2408 if (offset == Type::OffsetBot) { 2409 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2410 return; 2411 } 2412 // 5. Currently an object is not scalar replaceable if a LoadStore node 2413 // access its field since the field value is unknown after it. 2414 // 2415 Node* n = field->ideal_node(); 2416 2417 // Test for an unsafe access that was parsed as maybe off heap 2418 // (with a CheckCastPP to raw memory). 2419 assert(n->is_AddP(), "expect an address computation"); 2420 if (n->in(AddPNode::Base)->is_top() && 2421 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2422 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2423 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2424 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2425 return; 2426 } 2427 2428 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2429 Node* u = n->fast_out(i); 2430 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2431 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2432 return; 2433 } 2434 } 2435 2436 // 6. Or the address may point to more then one object. This may produce 2437 // the false positive result (set not scalar replaceable) 2438 // since the flow-insensitive escape analysis can't separate 2439 // the case when stores overwrite the field's value from the case 2440 // when stores happened on different control branches. 2441 // 2442 // Note: it will disable scalar replacement in some cases: 2443 // 2444 // Point p[] = new Point[1]; 2445 // p[0] = new Point(); // Will be not scalar replaced 2446 // 2447 // but it will save us from incorrect optimizations in next cases: 2448 // 2449 // Point p[] = new Point[1]; 2450 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2451 // 2452 if (field->base_count() > 1 && candidates.size() == 0) { 2453 for (BaseIterator i(field); i.has_next(); i.next()) { 2454 PointsToNode* base = i.get(); 2455 // Don't take into account LocalVar nodes which 2456 // may point to only one object which should be also 2457 // this field's base by now. 2458 if (base->is_JavaObject() && base != jobj) { 2459 // Mark all bases. 2460 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 2461 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 2462 } 2463 } 2464 2465 if (!jobj->scalar_replaceable()) { 2466 return; 2467 } 2468 } 2469 } 2470 2471 // The candidate is truly a reducible merge only if none of the other 2472 // constraints ruled it as NSR. There could be multiple merges involving the 2473 // same jobj. 2474 assert(jobj->scalar_replaceable(), "sanity"); 2475 for (uint i = 0; i < candidates.size(); i++ ) { 2476 Node* candidate = candidates.at(i); 2477 reducible_merges.push(candidate); 2478 } 2479 } 2480 2481 // Propagate NSR (Not scalar replaceable) state. 2482 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 2483 int jobj_length = jobj_worklist.length(); 2484 bool found_nsr_alloc = true; 2485 while (found_nsr_alloc) { 2486 found_nsr_alloc = false; 2487 for (int next = 0; next < jobj_length; ++next) { 2488 JavaObjectNode* jobj = jobj_worklist.at(next); 2489 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 2490 PointsToNode* use = i.get(); 2491 if (use->is_Field()) { 2492 FieldNode* field = use->as_Field(); 2493 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2494 assert(field->offset() != Type::OffsetBot, "sanity"); 2495 for (BaseIterator i(field); i.has_next(); i.next()) { 2496 PointsToNode* base = i.get(); 2497 // An object is not scalar replaceable if the field into which 2498 // it is stored has NSR base. 2499 if ((base != null_obj) && !base->scalar_replaceable()) { 2500 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2501 found_nsr_alloc = true; 2502 break; 2503 } 2504 } 2505 } 2506 } 2507 } 2508 } 2509 } 2510 2511 #ifdef ASSERT 2512 void ConnectionGraph::verify_connection_graph( 2513 GrowableArray<PointsToNode*>& ptnodes_worklist, 2514 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2515 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2516 GrowableArray<Node*>& addp_worklist) { 2517 // Verify that graph is complete - no new edges could be added. 2518 int java_objects_length = java_objects_worklist.length(); 2519 int non_escaped_length = non_escaped_allocs_worklist.length(); 2520 int new_edges = 0; 2521 for (int next = 0; next < java_objects_length; ++next) { 2522 JavaObjectNode* ptn = java_objects_worklist.at(next); 2523 new_edges += add_java_object_edges(ptn, true); 2524 } 2525 assert(new_edges == 0, "graph was not complete"); 2526 // Verify that escape state is final. 2527 int length = non_escaped_allocs_worklist.length(); 2528 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2529 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2530 (non_escaped_length == length) && 2531 (_worklist.length() == 0), "escape state was not final"); 2532 2533 // Verify fields information. 2534 int addp_length = addp_worklist.length(); 2535 for (int next = 0; next < addp_length; ++next ) { 2536 Node* n = addp_worklist.at(next); 2537 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2538 if (field->is_oop()) { 2539 // Verify that field has all bases 2540 Node* base = get_addp_base(n); 2541 PointsToNode* ptn = ptnode_adr(base->_idx); 2542 if (ptn->is_JavaObject()) { 2543 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2544 } else { 2545 assert(ptn->is_LocalVar(), "sanity"); 2546 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2547 PointsToNode* e = i.get(); 2548 if (e->is_JavaObject()) { 2549 assert(field->has_base(e->as_JavaObject()), "sanity"); 2550 } 2551 } 2552 } 2553 // Verify that all fields have initializing values. 2554 if (field->edge_count() == 0) { 2555 tty->print_cr("----------field does not have references----------"); 2556 field->dump(); 2557 for (BaseIterator i(field); i.has_next(); i.next()) { 2558 PointsToNode* base = i.get(); 2559 tty->print_cr("----------field has next base---------------------"); 2560 base->dump(); 2561 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2562 tty->print_cr("----------base has fields-------------------------"); 2563 for (EdgeIterator j(base); j.has_next(); j.next()) { 2564 j.get()->dump(); 2565 } 2566 tty->print_cr("----------base has references---------------------"); 2567 for (UseIterator j(base); j.has_next(); j.next()) { 2568 j.get()->dump(); 2569 } 2570 } 2571 } 2572 for (UseIterator i(field); i.has_next(); i.next()) { 2573 i.get()->dump(); 2574 } 2575 assert(field->edge_count() > 0, "sanity"); 2576 } 2577 } 2578 } 2579 } 2580 #endif 2581 2582 // Optimize ideal graph. 2583 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2584 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2585 Compile* C = _compile; 2586 PhaseIterGVN* igvn = _igvn; 2587 if (EliminateLocks) { 2588 // Mark locks before changing ideal graph. 2589 int cnt = C->macro_count(); 2590 for (int i = 0; i < cnt; i++) { 2591 Node *n = C->macro_node(i); 2592 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2593 AbstractLockNode* alock = n->as_AbstractLock(); 2594 if (!alock->is_non_esc_obj()) { 2595 const Type* obj_type = igvn->type(alock->obj_node()); 2596 if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) { 2597 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2598 // The lock could be marked eliminated by lock coarsening 2599 // code during first IGVN before EA. Replace coarsened flag 2600 // to eliminate all associated locks/unlocks. 2601 #ifdef ASSERT 2602 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2603 #endif 2604 alock->set_non_esc_obj(); 2605 } 2606 } 2607 } 2608 } 2609 } 2610 2611 if (OptimizePtrCompare) { 2612 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2613 Node *n = ptr_cmp_worklist.at(i); 2614 const TypeInt* tcmp = optimize_ptr_compare(n); 2615 if (tcmp->singleton()) { 2616 Node* cmp = igvn->makecon(tcmp); 2617 #ifndef PRODUCT 2618 if (PrintOptimizePtrCompare) { 2619 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2620 if (Verbose) { 2621 n->dump(1); 2622 } 2623 } 2624 #endif 2625 igvn->replace_node(n, cmp); 2626 } 2627 } 2628 } 2629 2630 // For MemBarStoreStore nodes added in library_call.cpp, check 2631 // escape status of associated AllocateNode and optimize out 2632 // MemBarStoreStore node if the allocated object never escapes. 2633 for (int i = 0; i < storestore_worklist.length(); i++) { 2634 Node* storestore = storestore_worklist.at(i); 2635 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2636 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2637 if (alloc->in(AllocateNode::InlineType) != nullptr) { 2638 // Non-escaping inline type buffer allocations don't require a membar 2639 storestore->as_MemBar()->remove(_igvn); 2640 } else { 2641 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2642 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2643 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2644 igvn->register_new_node_with_optimizer(mb); 2645 igvn->replace_node(storestore, mb); 2646 } 2647 } 2648 } 2649 } 2650 2651 // Optimize objects compare. 2652 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2653 assert(OptimizePtrCompare, "sanity"); 2654 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2655 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2656 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2657 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2658 2659 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2660 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2661 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2662 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2663 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2664 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2665 2666 // Check simple cases first. 2667 if (jobj1 != nullptr) { 2668 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2669 if (jobj1 == jobj2) { 2670 // Comparing the same not escaping object. 2671 return EQ; 2672 } 2673 Node* obj = jobj1->ideal_node(); 2674 // Comparing not escaping allocation. 2675 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2676 !ptn2->points_to(jobj1)) { 2677 return NE; // This includes nullness check. 2678 } 2679 } 2680 } 2681 if (jobj2 != nullptr) { 2682 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2683 Node* obj = jobj2->ideal_node(); 2684 // Comparing not escaping allocation. 2685 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2686 !ptn1->points_to(jobj2)) { 2687 return NE; // This includes nullness check. 2688 } 2689 } 2690 } 2691 if (jobj1 != nullptr && jobj1 != phantom_obj && 2692 jobj2 != nullptr && jobj2 != phantom_obj && 2693 jobj1->ideal_node()->is_Con() && 2694 jobj2->ideal_node()->is_Con()) { 2695 // Klass or String constants compare. Need to be careful with 2696 // compressed pointers - compare types of ConN and ConP instead of nodes. 2697 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2698 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2699 if (t1->make_ptr() == t2->make_ptr()) { 2700 return EQ; 2701 } else { 2702 return NE; 2703 } 2704 } 2705 if (ptn1->meet(ptn2)) { 2706 return UNKNOWN; // Sets are not disjoint 2707 } 2708 2709 // Sets are disjoint. 2710 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2711 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2712 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2713 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2714 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2715 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2716 // Check nullness of unknown object. 2717 return UNKNOWN; 2718 } 2719 2720 // Disjointness by itself is not sufficient since 2721 // alias analysis is not complete for escaped objects. 2722 // Disjoint sets are definitely unrelated only when 2723 // at least one set has only not escaping allocations. 2724 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2725 if (ptn1->non_escaping_allocation()) { 2726 return NE; 2727 } 2728 } 2729 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2730 if (ptn2->non_escaping_allocation()) { 2731 return NE; 2732 } 2733 } 2734 return UNKNOWN; 2735 } 2736 2737 // Connection Graph construction functions. 2738 2739 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2740 PointsToNode* ptadr = _nodes.at(n->_idx); 2741 if (ptadr != nullptr) { 2742 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2743 return; 2744 } 2745 Compile* C = _compile; 2746 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2747 map_ideal_node(n, ptadr); 2748 } 2749 2750 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2751 PointsToNode* ptadr = _nodes.at(n->_idx); 2752 if (ptadr != nullptr) { 2753 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2754 return ptadr; 2755 } 2756 Compile* C = _compile; 2757 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2758 map_ideal_node(n, ptadr); 2759 return ptadr; 2760 } 2761 2762 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2763 PointsToNode* ptadr = _nodes.at(n->_idx); 2764 if (ptadr != nullptr) { 2765 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2766 return; 2767 } 2768 bool unsafe = false; 2769 bool is_oop = is_oop_field(n, offset, &unsafe); 2770 if (unsafe) { 2771 es = PointsToNode::GlobalEscape; 2772 } 2773 Compile* C = _compile; 2774 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2775 map_ideal_node(n, field); 2776 } 2777 2778 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2779 PointsToNode* src, PointsToNode* dst) { 2780 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2781 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 2782 PointsToNode* ptadr = _nodes.at(n->_idx); 2783 if (ptadr != nullptr) { 2784 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2785 return; 2786 } 2787 Compile* C = _compile; 2788 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2789 map_ideal_node(n, ptadr); 2790 // Add edge from arraycopy node to source object. 2791 (void)add_edge(ptadr, src); 2792 src->set_arraycopy_src(); 2793 // Add edge from destination object to arraycopy node. 2794 (void)add_edge(dst, ptadr); 2795 dst->set_arraycopy_dst(); 2796 } 2797 2798 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2799 const Type* adr_type = n->as_AddP()->bottom_type(); 2800 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 2801 BasicType bt = T_INT; 2802 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 2803 // Check only oop fields. 2804 if (!adr_type->isa_aryptr() || 2805 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2806 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 2807 // OffsetBot is used to reference array's element. Ignore first AddP. 2808 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 2809 bt = T_OBJECT; 2810 } 2811 } 2812 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2813 if (adr_type->isa_instptr()) { 2814 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 2815 if (field != nullptr) { 2816 bt = field->layout_type(); 2817 } else { 2818 // Check for unsafe oop field access 2819 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2820 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2821 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2822 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2823 bt = T_OBJECT; 2824 (*unsafe) = true; 2825 } 2826 } 2827 } else if (adr_type->isa_aryptr()) { 2828 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2829 // Ignore array length load. 2830 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 2831 // Ignore first AddP. 2832 } else { 2833 const Type* elemtype = adr_type->is_aryptr()->elem(); 2834 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) { 2835 ciInlineKlass* vk = elemtype->inline_klass(); 2836 field_offset += vk->first_field_offset(); 2837 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 2838 } else { 2839 bt = elemtype->array_element_basic_type(); 2840 } 2841 } 2842 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2843 // Allocation initialization, ThreadLocal field access, unsafe access 2844 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2845 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2846 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2847 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2848 bt = T_OBJECT; 2849 } 2850 } 2851 } 2852 // Note: T_NARROWOOP is not classed as a real reference type 2853 return (is_reference_type(bt) || bt == T_NARROWOOP); 2854 } 2855 2856 // Returns unique pointed java object or null. 2857 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 2858 // If the node was created after the escape computation we can't answer. 2859 uint idx = n->_idx; 2860 if (idx >= nodes_size()) { 2861 return nullptr; 2862 } 2863 PointsToNode* ptn = ptnode_adr(idx); 2864 if (ptn == nullptr) { 2865 return nullptr; 2866 } 2867 if (ptn->is_JavaObject()) { 2868 return ptn->as_JavaObject(); 2869 } 2870 assert(ptn->is_LocalVar(), "sanity"); 2871 // Check all java objects it points to. 2872 JavaObjectNode* jobj = nullptr; 2873 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2874 PointsToNode* e = i.get(); 2875 if (e->is_JavaObject()) { 2876 if (jobj == nullptr) { 2877 jobj = e->as_JavaObject(); 2878 } else if (jobj != e) { 2879 return nullptr; 2880 } 2881 } 2882 } 2883 return jobj; 2884 } 2885 2886 // Return true if this node points only to non-escaping allocations. 2887 bool PointsToNode::non_escaping_allocation() { 2888 if (is_JavaObject()) { 2889 Node* n = ideal_node(); 2890 if (n->is_Allocate() || n->is_CallStaticJava()) { 2891 return (escape_state() == PointsToNode::NoEscape); 2892 } else { 2893 return false; 2894 } 2895 } 2896 assert(is_LocalVar(), "sanity"); 2897 // Check all java objects it points to. 2898 for (EdgeIterator i(this); i.has_next(); i.next()) { 2899 PointsToNode* e = i.get(); 2900 if (e->is_JavaObject()) { 2901 Node* n = e->ideal_node(); 2902 if ((e->escape_state() != PointsToNode::NoEscape) || 2903 !(n->is_Allocate() || n->is_CallStaticJava())) { 2904 return false; 2905 } 2906 } 2907 } 2908 return true; 2909 } 2910 2911 // Return true if we know the node does not escape globally. 2912 bool ConnectionGraph::not_global_escape(Node *n) { 2913 assert(!_collecting, "should not call during graph construction"); 2914 // If the node was created after the escape computation we can't answer. 2915 uint idx = n->_idx; 2916 if (idx >= nodes_size()) { 2917 return false; 2918 } 2919 PointsToNode* ptn = ptnode_adr(idx); 2920 if (ptn == nullptr) { 2921 return false; // not in congraph (e.g. ConI) 2922 } 2923 PointsToNode::EscapeState es = ptn->escape_state(); 2924 // If we have already computed a value, return it. 2925 if (es >= PointsToNode::GlobalEscape) { 2926 return false; 2927 } 2928 if (ptn->is_JavaObject()) { 2929 return true; // (es < PointsToNode::GlobalEscape); 2930 } 2931 assert(ptn->is_LocalVar(), "sanity"); 2932 // Check all java objects it points to. 2933 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2934 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2935 return false; 2936 } 2937 } 2938 return true; 2939 } 2940 2941 2942 // Helper functions 2943 2944 // Return true if this node points to specified node or nodes it points to. 2945 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2946 if (is_JavaObject()) { 2947 return (this == ptn); 2948 } 2949 assert(is_LocalVar() || is_Field(), "sanity"); 2950 for (EdgeIterator i(this); i.has_next(); i.next()) { 2951 if (i.get() == ptn) { 2952 return true; 2953 } 2954 } 2955 return false; 2956 } 2957 2958 // Return true if one node points to an other. 2959 bool PointsToNode::meet(PointsToNode* ptn) { 2960 if (this == ptn) { 2961 return true; 2962 } else if (ptn->is_JavaObject()) { 2963 return this->points_to(ptn->as_JavaObject()); 2964 } else if (this->is_JavaObject()) { 2965 return ptn->points_to(this->as_JavaObject()); 2966 } 2967 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2968 int ptn_count = ptn->edge_count(); 2969 for (EdgeIterator i(this); i.has_next(); i.next()) { 2970 PointsToNode* this_e = i.get(); 2971 for (int j = 0; j < ptn_count; j++) { 2972 if (this_e == ptn->edge(j)) { 2973 return true; 2974 } 2975 } 2976 } 2977 return false; 2978 } 2979 2980 #ifdef ASSERT 2981 // Return true if bases point to this java object. 2982 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2983 for (BaseIterator i(this); i.has_next(); i.next()) { 2984 if (i.get() == jobj) { 2985 return true; 2986 } 2987 } 2988 return false; 2989 } 2990 #endif 2991 2992 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2993 // Handle simple case first. 2994 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 2995 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2996 return true; 2997 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2998 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2999 Node* addp_use = addp->fast_out(i); 3000 if (addp_use->is_Store()) { 3001 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 3002 if (addp_use->fast_out(j)->is_Initialize()) { 3003 return true; 3004 } 3005 } 3006 } 3007 } 3008 } 3009 return false; 3010 } 3011 3012 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 3013 const Type *adr_type = phase->type(adr); 3014 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 3015 // We are computing a raw address for a store captured by an Initialize 3016 // compute an appropriate address type. AddP cases #3 and #5 (see below). 3017 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 3018 assert(offs != Type::OffsetBot || 3019 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 3020 "offset must be a constant or it is initialization of array"); 3021 return offs; 3022 } 3023 return adr_type->is_ptr()->flat_offset(); 3024 } 3025 3026 Node* ConnectionGraph::get_addp_base(Node *addp) { 3027 assert(addp->is_AddP(), "must be AddP"); 3028 // 3029 // AddP cases for Base and Address inputs: 3030 // case #1. Direct object's field reference: 3031 // Allocate 3032 // | 3033 // Proj #5 ( oop result ) 3034 // | 3035 // CheckCastPP (cast to instance type) 3036 // | | 3037 // AddP ( base == address ) 3038 // 3039 // case #2. Indirect object's field reference: 3040 // Phi 3041 // | 3042 // CastPP (cast to instance type) 3043 // | | 3044 // AddP ( base == address ) 3045 // 3046 // case #3. Raw object's field reference for Initialize node: 3047 // Allocate 3048 // | 3049 // Proj #5 ( oop result ) 3050 // top | 3051 // \ | 3052 // AddP ( base == top ) 3053 // 3054 // case #4. Array's element reference: 3055 // {CheckCastPP | CastPP} 3056 // | | | 3057 // | AddP ( array's element offset ) 3058 // | | 3059 // AddP ( array's offset ) 3060 // 3061 // case #5. Raw object's field reference for arraycopy stub call: 3062 // The inline_native_clone() case when the arraycopy stub is called 3063 // after the allocation before Initialize and CheckCastPP nodes. 3064 // Allocate 3065 // | 3066 // Proj #5 ( oop result ) 3067 // | | 3068 // AddP ( base == address ) 3069 // 3070 // case #6. Constant Pool, ThreadLocal, CastX2P or 3071 // Raw object's field reference: 3072 // {ConP, ThreadLocal, CastX2P, raw Load} 3073 // top | 3074 // \ | 3075 // AddP ( base == top ) 3076 // 3077 // case #7. Klass's field reference. 3078 // LoadKlass 3079 // | | 3080 // AddP ( base == address ) 3081 // 3082 // case #8. narrow Klass's field reference. 3083 // LoadNKlass 3084 // | 3085 // DecodeN 3086 // | | 3087 // AddP ( base == address ) 3088 // 3089 // case #9. Mixed unsafe access 3090 // {instance} 3091 // | 3092 // CheckCastPP (raw) 3093 // top | 3094 // \ | 3095 // AddP ( base == top ) 3096 // 3097 Node *base = addp->in(AddPNode::Base); 3098 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3099 base = addp->in(AddPNode::Address); 3100 while (base->is_AddP()) { 3101 // Case #6 (unsafe access) may have several chained AddP nodes. 3102 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3103 base = base->in(AddPNode::Address); 3104 } 3105 if (base->Opcode() == Op_CheckCastPP && 3106 base->bottom_type()->isa_rawptr() && 3107 _igvn->type(base->in(1))->isa_oopptr()) { 3108 base = base->in(1); // Case #9 3109 } else { 3110 Node* uncast_base = base->uncast(); 3111 int opcode = uncast_base->Opcode(); 3112 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3113 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3114 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3115 is_captured_store_address(addp), "sanity"); 3116 } 3117 } 3118 return base; 3119 } 3120 3121 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3122 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3123 Node* addp2 = addp->raw_out(0); 3124 if (addp->outcnt() == 1 && addp2->is_AddP() && 3125 addp2->in(AddPNode::Base) == n && 3126 addp2->in(AddPNode::Address) == addp) { 3127 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3128 // 3129 // Find array's offset to push it on worklist first and 3130 // as result process an array's element offset first (pushed second) 3131 // to avoid CastPP for the array's offset. 3132 // Otherwise the inserted CastPP (LocalVar) will point to what 3133 // the AddP (Field) points to. Which would be wrong since 3134 // the algorithm expects the CastPP has the same point as 3135 // as AddP's base CheckCastPP (LocalVar). 3136 // 3137 // ArrayAllocation 3138 // | 3139 // CheckCastPP 3140 // | 3141 // memProj (from ArrayAllocation CheckCastPP) 3142 // | || 3143 // | || Int (element index) 3144 // | || | ConI (log(element size)) 3145 // | || | / 3146 // | || LShift 3147 // | || / 3148 // | AddP (array's element offset) 3149 // | | 3150 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3151 // | / / 3152 // AddP (array's offset) 3153 // | 3154 // Load/Store (memory operation on array's element) 3155 // 3156 return addp2; 3157 } 3158 return nullptr; 3159 } 3160 3161 // 3162 // Adjust the type and inputs of an AddP which computes the 3163 // address of a field of an instance 3164 // 3165 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3166 PhaseGVN* igvn = _igvn; 3167 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3168 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3169 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3170 if (t == nullptr) { 3171 // We are computing a raw address for a store captured by an Initialize 3172 // compute an appropriate address type (cases #3 and #5). 3173 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3174 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3175 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3176 assert(offs != Type::OffsetBot, "offset must be a constant"); 3177 if (base_t->isa_aryptr() != nullptr) { 3178 // In the case of a flat inline type array, each field has its 3179 // own slice so we need to extract the field being accessed from 3180 // the address computation 3181 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 3182 } else { 3183 t = base_t->add_offset(offs)->is_oopptr(); 3184 } 3185 } 3186 int inst_id = base_t->instance_id(); 3187 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3188 "old type must be non-instance or match new type"); 3189 3190 // The type 't' could be subclass of 'base_t'. 3191 // As result t->offset() could be large then base_t's size and it will 3192 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3193 // constructor verifies correctness of the offset. 3194 // 3195 // It could happened on subclass's branch (from the type profiling 3196 // inlining) which was not eliminated during parsing since the exactness 3197 // of the allocation type was not propagated to the subclass type check. 3198 // 3199 // Or the type 't' could be not related to 'base_t' at all. 3200 // It could happen when CHA type is different from MDO type on a dead path 3201 // (for example, from instanceof check) which is not collapsed during parsing. 3202 // 3203 // Do nothing for such AddP node and don't process its users since 3204 // this code branch will go away. 3205 // 3206 if (!t->is_known_instance() && 3207 !base_t->maybe_java_subtype_of(t)) { 3208 return false; // bail out 3209 } 3210 const TypePtr* tinst = base_t->add_offset(t->offset()); 3211 if (tinst->isa_aryptr() && t->isa_aryptr()) { 3212 // In the case of a flat inline type array, each field has its 3213 // own slice so we need to keep track of the field being accessed. 3214 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 3215 // Keep array properties (not flat/null-free) 3216 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr()); 3217 if (tinst == nullptr) { 3218 return false; // Skip dead path with inconsistent properties 3219 } 3220 } 3221 3222 // Do NOT remove the next line: ensure a new alias index is allocated 3223 // for the instance type. Note: C++ will not remove it since the call 3224 // has side effect. 3225 int alias_idx = _compile->get_alias_index(tinst); 3226 igvn->set_type(addp, tinst); 3227 // record the allocation in the node map 3228 set_map(addp, get_map(base->_idx)); 3229 // Set addp's Base and Address to 'base'. 3230 Node *abase = addp->in(AddPNode::Base); 3231 Node *adr = addp->in(AddPNode::Address); 3232 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3233 adr->in(0)->_idx == (uint)inst_id) { 3234 // Skip AddP cases #3 and #5. 3235 } else { 3236 assert(!abase->is_top(), "sanity"); // AddP case #3 3237 if (abase != base) { 3238 igvn->hash_delete(addp); 3239 addp->set_req(AddPNode::Base, base); 3240 if (abase == adr) { 3241 addp->set_req(AddPNode::Address, base); 3242 } else { 3243 // AddP case #4 (adr is array's element offset AddP node) 3244 #ifdef ASSERT 3245 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3246 assert(adr->is_AddP() && atype != nullptr && 3247 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3248 #endif 3249 } 3250 igvn->hash_insert(addp); 3251 } 3252 } 3253 // Put on IGVN worklist since at least addp's type was changed above. 3254 record_for_optimizer(addp); 3255 return true; 3256 } 3257 3258 // 3259 // Create a new version of orig_phi if necessary. Returns either the newly 3260 // created phi or an existing phi. Sets create_new to indicate whether a new 3261 // phi was created. Cache the last newly created phi in the node map. 3262 // 3263 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3264 Compile *C = _compile; 3265 PhaseGVN* igvn = _igvn; 3266 new_created = false; 3267 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3268 // nothing to do if orig_phi is bottom memory or matches alias_idx 3269 if (phi_alias_idx == alias_idx) { 3270 return orig_phi; 3271 } 3272 // Have we recently created a Phi for this alias index? 3273 PhiNode *result = get_map_phi(orig_phi->_idx); 3274 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3275 return result; 3276 } 3277 // Previous check may fail when the same wide memory Phi was split into Phis 3278 // for different memory slices. Search all Phis for this region. 3279 if (result != nullptr) { 3280 Node* region = orig_phi->in(0); 3281 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3282 Node* phi = region->fast_out(i); 3283 if (phi->is_Phi() && 3284 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3285 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3286 return phi->as_Phi(); 3287 } 3288 } 3289 } 3290 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3291 if (C->do_escape_analysis() == true && !C->failing()) { 3292 // Retry compilation without escape analysis. 3293 // If this is the first failure, the sentinel string will "stick" 3294 // to the Compile object, and the C2Compiler will see it and retry. 3295 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3296 } 3297 return nullptr; 3298 } 3299 orig_phi_worklist.append_if_missing(orig_phi); 3300 const TypePtr *atype = C->get_adr_type(alias_idx); 3301 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3302 C->copy_node_notes_to(result, orig_phi); 3303 igvn->set_type(result, result->bottom_type()); 3304 record_for_optimizer(result); 3305 set_map(orig_phi, result); 3306 new_created = true; 3307 return result; 3308 } 3309 3310 // 3311 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3312 // specified alias index. 3313 // 3314 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 3315 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3316 Compile *C = _compile; 3317 PhaseGVN* igvn = _igvn; 3318 bool new_phi_created; 3319 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3320 if (!new_phi_created) { 3321 return result; 3322 } 3323 GrowableArray<PhiNode *> phi_list; 3324 GrowableArray<uint> cur_input; 3325 PhiNode *phi = orig_phi; 3326 uint idx = 1; 3327 bool finished = false; 3328 while(!finished) { 3329 while (idx < phi->req()) { 3330 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 3331 if (mem != nullptr && mem->is_Phi()) { 3332 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3333 if (new_phi_created) { 3334 // found an phi for which we created a new split, push current one on worklist and begin 3335 // processing new one 3336 phi_list.push(phi); 3337 cur_input.push(idx); 3338 phi = mem->as_Phi(); 3339 result = newphi; 3340 idx = 1; 3341 continue; 3342 } else { 3343 mem = newphi; 3344 } 3345 } 3346 if (C->failing()) { 3347 return nullptr; 3348 } 3349 result->set_req(idx++, mem); 3350 } 3351 #ifdef ASSERT 3352 // verify that the new Phi has an input for each input of the original 3353 assert( phi->req() == result->req(), "must have same number of inputs."); 3354 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3355 #endif 3356 // Check if all new phi's inputs have specified alias index. 3357 // Otherwise use old phi. 3358 for (uint i = 1; i < phi->req(); i++) { 3359 Node* in = result->in(i); 3360 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3361 } 3362 // we have finished processing a Phi, see if there are any more to do 3363 finished = (phi_list.length() == 0 ); 3364 if (!finished) { 3365 phi = phi_list.pop(); 3366 idx = cur_input.pop(); 3367 PhiNode *prev_result = get_map_phi(phi->_idx); 3368 prev_result->set_req(idx++, result); 3369 result = prev_result; 3370 } 3371 } 3372 return result; 3373 } 3374 3375 // 3376 // The next methods are derived from methods in MemNode. 3377 // 3378 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3379 Node *mem = mmem; 3380 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3381 // means an array I have not precisely typed yet. Do not do any 3382 // alias stuff with it any time soon. 3383 if (toop->base() != Type::AnyPtr && 3384 !(toop->isa_instptr() && 3385 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3386 toop->offset() == Type::OffsetBot)) { 3387 mem = mmem->memory_at(alias_idx); 3388 // Update input if it is progress over what we have now 3389 } 3390 return mem; 3391 } 3392 3393 // 3394 // Move memory users to their memory slices. 3395 // 3396 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 3397 Compile* C = _compile; 3398 PhaseGVN* igvn = _igvn; 3399 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 3400 assert(tp != nullptr, "ptr type"); 3401 int alias_idx = C->get_alias_index(tp); 3402 int general_idx = C->get_general_index(alias_idx); 3403 3404 // Move users first 3405 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3406 Node* use = n->fast_out(i); 3407 if (use->is_MergeMem()) { 3408 MergeMemNode* mmem = use->as_MergeMem(); 3409 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 3410 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 3411 continue; // Nothing to do 3412 } 3413 // Replace previous general reference to mem node. 3414 uint orig_uniq = C->unique(); 3415 Node* m = find_inst_mem(n, general_idx, orig_phis); 3416 assert(orig_uniq == C->unique(), "no new nodes"); 3417 mmem->set_memory_at(general_idx, m); 3418 --imax; 3419 --i; 3420 } else if (use->is_MemBar()) { 3421 assert(!use->is_Initialize(), "initializing stores should not be moved"); 3422 if (use->req() > MemBarNode::Precedent && 3423 use->in(MemBarNode::Precedent) == n) { 3424 // Don't move related membars. 3425 record_for_optimizer(use); 3426 continue; 3427 } 3428 tp = use->as_MemBar()->adr_type()->isa_ptr(); 3429 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 3430 alias_idx == general_idx) { 3431 continue; // Nothing to do 3432 } 3433 // Move to general memory slice. 3434 uint orig_uniq = C->unique(); 3435 Node* m = find_inst_mem(n, general_idx, orig_phis); 3436 assert(orig_uniq == C->unique(), "no new nodes"); 3437 igvn->hash_delete(use); 3438 imax -= use->replace_edge(n, m, igvn); 3439 igvn->hash_insert(use); 3440 record_for_optimizer(use); 3441 --i; 3442 #ifdef ASSERT 3443 } else if (use->is_Mem()) { 3444 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 3445 // Don't move related cardmark. 3446 continue; 3447 } 3448 // Memory nodes should have new memory input. 3449 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 3450 assert(tp != nullptr, "ptr type"); 3451 int idx = C->get_alias_index(tp); 3452 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 3453 "Following memory nodes should have new memory input or be on the same memory slice"); 3454 } else if (use->is_Phi()) { 3455 // Phi nodes should be split and moved already. 3456 tp = use->as_Phi()->adr_type()->isa_ptr(); 3457 assert(tp != nullptr, "ptr type"); 3458 int idx = C->get_alias_index(tp); 3459 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 3460 } else { 3461 use->dump(); 3462 assert(false, "should not be here"); 3463 #endif 3464 } 3465 } 3466 } 3467 3468 // 3469 // Search memory chain of "mem" to find a MemNode whose address 3470 // is the specified alias index. 3471 // 3472 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 3473 if (orig_mem == nullptr) { 3474 return orig_mem; 3475 } 3476 Compile* C = _compile; 3477 PhaseGVN* igvn = _igvn; 3478 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 3479 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 3480 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 3481 Node *prev = nullptr; 3482 Node *result = orig_mem; 3483 while (prev != result) { 3484 prev = result; 3485 if (result == start_mem) { 3486 break; // hit one of our sentinels 3487 } 3488 if (result->is_Mem()) { 3489 const Type *at = igvn->type(result->in(MemNode::Address)); 3490 if (at == Type::TOP) { 3491 break; // Dead 3492 } 3493 assert (at->isa_ptr() != nullptr, "pointer type required."); 3494 int idx = C->get_alias_index(at->is_ptr()); 3495 if (idx == alias_idx) { 3496 break; // Found 3497 } 3498 if (!is_instance && (at->isa_oopptr() == nullptr || 3499 !at->is_oopptr()->is_known_instance())) { 3500 break; // Do not skip store to general memory slice. 3501 } 3502 result = result->in(MemNode::Memory); 3503 } 3504 if (!is_instance) { 3505 continue; // don't search further for non-instance types 3506 } 3507 // skip over a call which does not affect this memory slice 3508 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 3509 Node *proj_in = result->in(0); 3510 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 3511 break; // hit one of our sentinels 3512 } else if (proj_in->is_Call()) { 3513 // ArrayCopy node processed here as well 3514 CallNode *call = proj_in->as_Call(); 3515 if (!call->may_modify(toop, igvn)) { 3516 result = call->in(TypeFunc::Memory); 3517 } 3518 } else if (proj_in->is_Initialize()) { 3519 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 3520 // Stop if this is the initialization for the object instance which 3521 // which contains this memory slice, otherwise skip over it. 3522 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 3523 result = proj_in->in(TypeFunc::Memory); 3524 } 3525 } else if (proj_in->is_MemBar()) { 3526 // Check if there is an array copy for a clone 3527 // Step over GC barrier when ReduceInitialCardMarks is disabled 3528 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3529 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3530 3531 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3532 // Stop if it is a clone 3533 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3534 if (ac->may_modify(toop, igvn)) { 3535 break; 3536 } 3537 } 3538 result = proj_in->in(TypeFunc::Memory); 3539 } 3540 } else if (result->is_MergeMem()) { 3541 MergeMemNode *mmem = result->as_MergeMem(); 3542 result = step_through_mergemem(mmem, alias_idx, toop); 3543 if (result == mmem->base_memory()) { 3544 // Didn't find instance memory, search through general slice recursively. 3545 result = mmem->memory_at(C->get_general_index(alias_idx)); 3546 result = find_inst_mem(result, alias_idx, orig_phis); 3547 if (C->failing()) { 3548 return nullptr; 3549 } 3550 mmem->set_memory_at(alias_idx, result); 3551 } 3552 } else if (result->is_Phi() && 3553 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3554 Node *un = result->as_Phi()->unique_input(igvn); 3555 if (un != nullptr) { 3556 orig_phis.append_if_missing(result->as_Phi()); 3557 result = un; 3558 } else { 3559 break; 3560 } 3561 } else if (result->is_ClearArray()) { 3562 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3563 // Can not bypass initialization of the instance 3564 // we are looking for. 3565 break; 3566 } 3567 // Otherwise skip it (the call updated 'result' value). 3568 } else if (result->Opcode() == Op_SCMemProj) { 3569 Node* mem = result->in(0); 3570 Node* adr = nullptr; 3571 if (mem->is_LoadStore()) { 3572 adr = mem->in(MemNode::Address); 3573 } else { 3574 assert(mem->Opcode() == Op_EncodeISOArray || 3575 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3576 adr = mem->in(3); // Memory edge corresponds to destination array 3577 } 3578 const Type *at = igvn->type(adr); 3579 if (at != Type::TOP) { 3580 assert(at->isa_ptr() != nullptr, "pointer type required."); 3581 int idx = C->get_alias_index(at->is_ptr()); 3582 if (idx == alias_idx) { 3583 // Assert in debug mode 3584 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3585 break; // In product mode return SCMemProj node 3586 } 3587 } 3588 result = mem->in(MemNode::Memory); 3589 } else if (result->Opcode() == Op_StrInflatedCopy) { 3590 Node* adr = result->in(3); // Memory edge corresponds to destination array 3591 const Type *at = igvn->type(adr); 3592 if (at != Type::TOP) { 3593 assert(at->isa_ptr() != nullptr, "pointer type required."); 3594 int idx = C->get_alias_index(at->is_ptr()); 3595 if (idx == alias_idx) { 3596 // Assert in debug mode 3597 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3598 break; // In product mode return SCMemProj node 3599 } 3600 } 3601 result = result->in(MemNode::Memory); 3602 } 3603 } 3604 if (result->is_Phi()) { 3605 PhiNode *mphi = result->as_Phi(); 3606 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3607 const TypePtr *t = mphi->adr_type(); 3608 if (!is_instance) { 3609 // Push all non-instance Phis on the orig_phis worklist to update inputs 3610 // during Phase 4 if needed. 3611 orig_phis.append_if_missing(mphi); 3612 } else if (C->get_alias_index(t) != alias_idx) { 3613 // Create a new Phi with the specified alias index type. 3614 result = split_memory_phi(mphi, alias_idx, orig_phis); 3615 } 3616 } 3617 // the result is either MemNode, PhiNode, InitializeNode. 3618 return result; 3619 } 3620 3621 // 3622 // Convert the types of non-escaped object to instance types where possible, 3623 // propagate the new type information through the graph, and update memory 3624 // edges and MergeMem inputs to reflect the new type. 3625 // 3626 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3627 // The processing is done in 4 phases: 3628 // 3629 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3630 // types for the CheckCastPP for allocations where possible. 3631 // Propagate the new types through users as follows: 3632 // casts and Phi: push users on alloc_worklist 3633 // AddP: cast Base and Address inputs to the instance type 3634 // push any AddP users on alloc_worklist and push any memnode 3635 // users onto memnode_worklist. 3636 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3637 // search the Memory chain for a store with the appropriate type 3638 // address type. If a Phi is found, create a new version with 3639 // the appropriate memory slices from each of the Phi inputs. 3640 // For stores, process the users as follows: 3641 // MemNode: push on memnode_worklist 3642 // MergeMem: push on mergemem_worklist 3643 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3644 // moving the first node encountered of each instance type to the 3645 // the input corresponding to its alias index. 3646 // appropriate memory slice. 3647 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3648 // 3649 // In the following example, the CheckCastPP nodes are the cast of allocation 3650 // results and the allocation of node 29 is non-escaped and eligible to be an 3651 // instance type. 3652 // 3653 // We start with: 3654 // 3655 // 7 Parm #memory 3656 // 10 ConI "12" 3657 // 19 CheckCastPP "Foo" 3658 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3659 // 29 CheckCastPP "Foo" 3660 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3661 // 3662 // 40 StoreP 25 7 20 ... alias_index=4 3663 // 50 StoreP 35 40 30 ... alias_index=4 3664 // 60 StoreP 45 50 20 ... alias_index=4 3665 // 70 LoadP _ 60 30 ... alias_index=4 3666 // 80 Phi 75 50 60 Memory alias_index=4 3667 // 90 LoadP _ 80 30 ... alias_index=4 3668 // 100 LoadP _ 80 20 ... alias_index=4 3669 // 3670 // 3671 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3672 // and creating a new alias index for node 30. This gives: 3673 // 3674 // 7 Parm #memory 3675 // 10 ConI "12" 3676 // 19 CheckCastPP "Foo" 3677 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3678 // 29 CheckCastPP "Foo" iid=24 3679 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3680 // 3681 // 40 StoreP 25 7 20 ... alias_index=4 3682 // 50 StoreP 35 40 30 ... alias_index=6 3683 // 60 StoreP 45 50 20 ... alias_index=4 3684 // 70 LoadP _ 60 30 ... alias_index=6 3685 // 80 Phi 75 50 60 Memory alias_index=4 3686 // 90 LoadP _ 80 30 ... alias_index=6 3687 // 100 LoadP _ 80 20 ... alias_index=4 3688 // 3689 // In phase 2, new memory inputs are computed for the loads and stores, 3690 // And a new version of the phi is created. In phase 4, the inputs to 3691 // node 80 are updated and then the memory nodes are updated with the 3692 // values computed in phase 2. This results in: 3693 // 3694 // 7 Parm #memory 3695 // 10 ConI "12" 3696 // 19 CheckCastPP "Foo" 3697 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3698 // 29 CheckCastPP "Foo" iid=24 3699 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3700 // 3701 // 40 StoreP 25 7 20 ... alias_index=4 3702 // 50 StoreP 35 7 30 ... alias_index=6 3703 // 60 StoreP 45 40 20 ... alias_index=4 3704 // 70 LoadP _ 50 30 ... alias_index=6 3705 // 80 Phi 75 40 60 Memory alias_index=4 3706 // 120 Phi 75 50 50 Memory alias_index=6 3707 // 90 LoadP _ 120 30 ... alias_index=6 3708 // 100 LoadP _ 80 20 ... alias_index=4 3709 // 3710 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3711 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3712 GrowableArray<MergeMemNode*> &mergemem_worklist, 3713 Unique_Node_List &reducible_merges) { 3714 DEBUG_ONLY(Unique_Node_List reduced_merges;) 3715 GrowableArray<Node *> memnode_worklist; 3716 GrowableArray<PhiNode *> orig_phis; 3717 PhaseIterGVN *igvn = _igvn; 3718 uint new_index_start = (uint) _compile->num_alias_types(); 3719 VectorSet visited; 3720 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3721 uint unique_old = _compile->unique(); 3722 3723 // Phase 1: Process possible allocations from alloc_worklist. 3724 // Create instance types for the CheckCastPP for allocations where possible. 3725 // 3726 // (Note: don't forget to change the order of the second AddP node on 3727 // the alloc_worklist if the order of the worklist processing is changed, 3728 // see the comment in find_second_addp().) 3729 // 3730 while (alloc_worklist.length() != 0) { 3731 Node *n = alloc_worklist.pop(); 3732 uint ni = n->_idx; 3733 if (n->is_Call()) { 3734 CallNode *alloc = n->as_Call(); 3735 // copy escape information to call node 3736 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3737 PointsToNode::EscapeState es = ptn->escape_state(); 3738 // We have an allocation or call which returns a Java object, 3739 // see if it is non-escaped. 3740 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3741 continue; 3742 } 3743 // Find CheckCastPP for the allocate or for the return value of a call 3744 n = alloc->result_cast(); 3745 if (n == nullptr) { // No uses except Initialize node 3746 if (alloc->is_Allocate()) { 3747 // Set the scalar_replaceable flag for allocation 3748 // so it could be eliminated if it has no uses. 3749 alloc->as_Allocate()->_is_scalar_replaceable = true; 3750 } 3751 continue; 3752 } 3753 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3754 // we could reach here for allocate case if one init is associated with many allocs. 3755 if (alloc->is_Allocate()) { 3756 alloc->as_Allocate()->_is_scalar_replaceable = false; 3757 } 3758 continue; 3759 } 3760 3761 // The inline code for Object.clone() casts the allocation result to 3762 // java.lang.Object and then to the actual type of the allocated 3763 // object. Detect this case and use the second cast. 3764 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3765 // the allocation result is cast to java.lang.Object and then 3766 // to the actual Array type. 3767 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3768 && (alloc->is_AllocateArray() || 3769 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3770 Node *cast2 = nullptr; 3771 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3772 Node *use = n->fast_out(i); 3773 if (use->is_CheckCastPP()) { 3774 cast2 = use; 3775 break; 3776 } 3777 } 3778 if (cast2 != nullptr) { 3779 n = cast2; 3780 } else { 3781 // Non-scalar replaceable if the allocation type is unknown statically 3782 // (reflection allocation), the object can't be restored during 3783 // deoptimization without precise type. 3784 continue; 3785 } 3786 } 3787 3788 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3789 if (t == nullptr) { 3790 continue; // not a TypeOopPtr 3791 } 3792 if (!t->klass_is_exact()) { 3793 continue; // not an unique type 3794 } 3795 if (alloc->is_Allocate()) { 3796 // Set the scalar_replaceable flag for allocation 3797 // so it could be eliminated. 3798 alloc->as_Allocate()->_is_scalar_replaceable = true; 3799 } 3800 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3801 // in order for an object to be scalar-replaceable, it must be: 3802 // - a direct allocation (not a call returning an object) 3803 // - non-escaping 3804 // - eligible to be a unique type 3805 // - not determined to be ineligible by escape analysis 3806 set_map(alloc, n); 3807 set_map(n, alloc); 3808 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3809 igvn->hash_delete(n); 3810 igvn->set_type(n, tinst); 3811 n->raise_bottom_type(tinst); 3812 igvn->hash_insert(n); 3813 record_for_optimizer(n); 3814 // Allocate an alias index for the header fields. Accesses to 3815 // the header emitted during macro expansion wouldn't have 3816 // correct memory state otherwise. 3817 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3818 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3819 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3820 3821 // First, put on the worklist all Field edges from Connection Graph 3822 // which is more accurate than putting immediate users from Ideal Graph. 3823 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3824 PointsToNode* tgt = e.get(); 3825 if (tgt->is_Arraycopy()) { 3826 continue; 3827 } 3828 Node* use = tgt->ideal_node(); 3829 assert(tgt->is_Field() && use->is_AddP(), 3830 "only AddP nodes are Field edges in CG"); 3831 if (use->outcnt() > 0) { // Don't process dead nodes 3832 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3833 if (addp2 != nullptr) { 3834 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3835 alloc_worklist.append_if_missing(addp2); 3836 } 3837 alloc_worklist.append_if_missing(use); 3838 } 3839 } 3840 3841 // An allocation may have an Initialize which has raw stores. Scan 3842 // the users of the raw allocation result and push AddP users 3843 // on alloc_worklist. 3844 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3845 assert (raw_result != nullptr, "must have an allocation result"); 3846 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3847 Node *use = raw_result->fast_out(i); 3848 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3849 Node* addp2 = find_second_addp(use, raw_result); 3850 if (addp2 != nullptr) { 3851 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3852 alloc_worklist.append_if_missing(addp2); 3853 } 3854 alloc_worklist.append_if_missing(use); 3855 } else if (use->is_MemBar()) { 3856 memnode_worklist.append_if_missing(use); 3857 } 3858 } 3859 } 3860 } else if (n->is_AddP()) { 3861 Node* addp_base = get_addp_base(n); 3862 if (addp_base != nullptr && reducible_merges.member(addp_base)) { 3863 // This AddP will go away when we reduce the the Phi 3864 continue; 3865 } 3866 JavaObjectNode* jobj = unique_java_object(addp_base); 3867 if (jobj == nullptr || jobj == phantom_obj) { 3868 #ifdef ASSERT 3869 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3870 ptnode_adr(n->_idx)->dump(); 3871 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3872 #endif 3873 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3874 return; 3875 } 3876 Node *base = get_map(jobj->idx()); // CheckCastPP node 3877 if (!split_AddP(n, base)) continue; // wrong type from dead path 3878 } else if (n->is_Phi() || 3879 n->is_CheckCastPP() || 3880 n->is_EncodeP() || 3881 n->is_DecodeN() || 3882 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3883 if (visited.test_set(n->_idx)) { 3884 assert(n->is_Phi(), "loops only through Phi's"); 3885 continue; // already processed 3886 } 3887 // Reducible Phi's will be removed from the graph after split_unique_types finishes 3888 if (reducible_merges.member(n)) { 3889 // Split loads through phi 3890 reduce_phi_on_field_access(n->as_Phi(), alloc_worklist); 3891 #ifdef ASSERT 3892 if (VerifyReduceAllocationMerges) { 3893 reduced_merges.push(n); 3894 } 3895 #endif 3896 continue; 3897 } 3898 JavaObjectNode* jobj = unique_java_object(n); 3899 if (jobj == nullptr || jobj == phantom_obj) { 3900 #ifdef ASSERT 3901 ptnode_adr(n->_idx)->dump(); 3902 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3903 #endif 3904 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3905 return; 3906 } else { 3907 Node *val = get_map(jobj->idx()); // CheckCastPP node 3908 TypeNode *tn = n->as_Type(); 3909 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3910 assert(tinst != nullptr && tinst->is_known_instance() && 3911 tinst->instance_id() == jobj->idx() , "instance type expected."); 3912 3913 const Type *tn_type = igvn->type(tn); 3914 const TypeOopPtr *tn_t; 3915 if (tn_type->isa_narrowoop()) { 3916 tn_t = tn_type->make_ptr()->isa_oopptr(); 3917 } else { 3918 tn_t = tn_type->isa_oopptr(); 3919 } 3920 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 3921 if (tn_t->isa_aryptr()) { 3922 // Keep array properties (not flat/null-free) 3923 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr()); 3924 if (tinst == nullptr) { 3925 continue; // Skip dead path with inconsistent properties 3926 } 3927 } 3928 if (tn_type->isa_narrowoop()) { 3929 tn_type = tinst->make_narrowoop(); 3930 } else { 3931 tn_type = tinst; 3932 } 3933 igvn->hash_delete(tn); 3934 igvn->set_type(tn, tn_type); 3935 tn->set_type(tn_type); 3936 igvn->hash_insert(tn); 3937 record_for_optimizer(n); 3938 } else { 3939 assert(tn_type == TypePtr::NULL_PTR || 3940 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t), 3941 "unexpected type"); 3942 continue; // Skip dead path with different type 3943 } 3944 } 3945 } else { 3946 debug_only(n->dump();) 3947 assert(false, "EA: unexpected node"); 3948 continue; 3949 } 3950 // push allocation's users on appropriate worklist 3951 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3952 Node *use = n->fast_out(i); 3953 if (use->is_Mem() && use->in(MemNode::Address) == n) { 3954 // Load/store to instance's field 3955 memnode_worklist.append_if_missing(use); 3956 } else if (use->is_MemBar()) { 3957 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3958 memnode_worklist.append_if_missing(use); 3959 } 3960 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3961 Node* addp2 = find_second_addp(use, n); 3962 if (addp2 != nullptr) { 3963 alloc_worklist.append_if_missing(addp2); 3964 } 3965 alloc_worklist.append_if_missing(use); 3966 } else if (use->is_Phi() || 3967 use->is_CheckCastPP() || 3968 use->is_EncodeNarrowPtr() || 3969 use->is_DecodeNarrowPtr() || 3970 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3971 alloc_worklist.append_if_missing(use); 3972 #ifdef ASSERT 3973 } else if (use->is_Mem()) { 3974 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3975 } else if (use->is_MergeMem()) { 3976 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3977 } else if (use->is_SafePoint()) { 3978 // Look for MergeMem nodes for calls which reference unique allocation 3979 // (through CheckCastPP nodes) even for debug info. 3980 Node* m = use->in(TypeFunc::Memory); 3981 if (m->is_MergeMem()) { 3982 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3983 } 3984 } else if (use->Opcode() == Op_EncodeISOArray) { 3985 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3986 // EncodeISOArray overwrites destination array 3987 memnode_worklist.append_if_missing(use); 3988 } 3989 } else if (use->Opcode() == Op_Return) { 3990 // Allocation is referenced by field of returned inline type 3991 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode"); 3992 } else { 3993 uint op = use->Opcode(); 3994 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3995 (use->in(MemNode::Memory) == n)) { 3996 // They overwrite memory edge corresponding to destination array, 3997 memnode_worklist.append_if_missing(use); 3998 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3999 op == Op_CastP2X || op == Op_StoreCM || 4000 op == Op_FastLock || op == Op_AryEq || 4001 op == Op_StrComp || op == Op_CountPositives || 4002 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 4003 op == Op_StrEquals || op == Op_VectorizedHashCode || 4004 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 4005 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck || 4006 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 4007 n->dump(); 4008 use->dump(); 4009 assert(false, "EA: missing allocation reference path"); 4010 } 4011 #endif 4012 } 4013 } 4014 4015 } 4016 4017 #ifdef ASSERT 4018 if (VerifyReduceAllocationMerges) { 4019 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints. 4020 for (uint i = 0; i < reducible_merges.size(); i++) { 4021 Node* phi = reducible_merges.at(i); 4022 4023 if (!reduced_merges.member(phi)) { 4024 phi->dump(2); 4025 phi->dump(-2); 4026 assert(false, "This reducible merge wasn't reduced."); 4027 } 4028 4029 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 4030 Node* use = phi->fast_out(j); 4031 if (!use->is_SafePoint()) { 4032 phi->dump(2); 4033 phi->dump(-2); 4034 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 4035 } 4036 } 4037 } 4038 } 4039 #endif 4040 4041 // Go over all ArrayCopy nodes and if one of the inputs has a unique 4042 // type, record it in the ArrayCopy node so we know what memory this 4043 // node uses/modified. 4044 for (int next = 0; next < arraycopy_worklist.length(); next++) { 4045 ArrayCopyNode* ac = arraycopy_worklist.at(next); 4046 Node* dest = ac->in(ArrayCopyNode::Dest); 4047 if (dest->is_AddP()) { 4048 dest = get_addp_base(dest); 4049 } 4050 JavaObjectNode* jobj = unique_java_object(dest); 4051 if (jobj != nullptr) { 4052 Node *base = get_map(jobj->idx()); 4053 if (base != nullptr) { 4054 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4055 ac->_dest_type = base_t; 4056 } 4057 } 4058 Node* src = ac->in(ArrayCopyNode::Src); 4059 if (src->is_AddP()) { 4060 src = get_addp_base(src); 4061 } 4062 jobj = unique_java_object(src); 4063 if (jobj != nullptr) { 4064 Node* base = get_map(jobj->idx()); 4065 if (base != nullptr) { 4066 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4067 ac->_src_type = base_t; 4068 } 4069 } 4070 } 4071 4072 // New alias types were created in split_AddP(). 4073 uint new_index_end = (uint) _compile->num_alias_types(); 4074 4075 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4076 // compute new values for Memory inputs (the Memory inputs are not 4077 // actually updated until phase 4.) 4078 if (memnode_worklist.length() == 0) 4079 return; // nothing to do 4080 while (memnode_worklist.length() != 0) { 4081 Node *n = memnode_worklist.pop(); 4082 if (visited.test_set(n->_idx)) { 4083 continue; 4084 } 4085 if (n->is_Phi() || n->is_ClearArray()) { 4086 // we don't need to do anything, but the users must be pushed 4087 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4088 // we don't need to do anything, but the users must be pushed 4089 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4090 if (n == nullptr) { 4091 continue; 4092 } 4093 } else if (n->Opcode() == Op_StrCompressedCopy || 4094 n->Opcode() == Op_EncodeISOArray) { 4095 // get the memory projection 4096 n = n->find_out_with(Op_SCMemProj); 4097 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4098 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr && 4099 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4100 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); 4101 } else { 4102 assert(n->is_Mem(), "memory node required."); 4103 Node *addr = n->in(MemNode::Address); 4104 const Type *addr_t = igvn->type(addr); 4105 if (addr_t == Type::TOP) { 4106 continue; 4107 } 4108 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4109 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4110 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4111 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4112 if (_compile->failing()) { 4113 return; 4114 } 4115 if (mem != n->in(MemNode::Memory)) { 4116 // We delay the memory edge update since we need old one in 4117 // MergeMem code below when instances memory slices are separated. 4118 set_map(n, mem); 4119 } 4120 if (n->is_Load()) { 4121 continue; // don't push users 4122 } else if (n->is_LoadStore()) { 4123 // get the memory projection 4124 n = n->find_out_with(Op_SCMemProj); 4125 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4126 } 4127 } 4128 // push user on appropriate worklist 4129 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4130 Node *use = n->fast_out(i); 4131 if (use->is_Phi() || use->is_ClearArray()) { 4132 memnode_worklist.append_if_missing(use); 4133 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4134 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 4135 continue; 4136 } 4137 memnode_worklist.append_if_missing(use); 4138 } else if (use->is_MemBar()) { 4139 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4140 memnode_worklist.append_if_missing(use); 4141 } 4142 #ifdef ASSERT 4143 } else if (use->is_Mem()) { 4144 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4145 } else if (use->is_MergeMem()) { 4146 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4147 } else if (use->Opcode() == Op_EncodeISOArray) { 4148 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4149 // EncodeISOArray overwrites destination array 4150 memnode_worklist.append_if_missing(use); 4151 } 4152 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr && 4153 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4154 // store_unknown_inline overwrites destination array 4155 memnode_worklist.append_if_missing(use); 4156 } else { 4157 uint op = use->Opcode(); 4158 if ((use->in(MemNode::Memory) == n) && 4159 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4160 // They overwrite memory edge corresponding to destination array, 4161 memnode_worklist.append_if_missing(use); 4162 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4163 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4164 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4165 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) { 4166 n->dump(); 4167 use->dump(); 4168 assert(false, "EA: missing memory path"); 4169 } 4170 #endif 4171 } 4172 } 4173 } 4174 4175 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4176 // Walk each memory slice moving the first node encountered of each 4177 // instance type to the input corresponding to its alias index. 4178 uint length = mergemem_worklist.length(); 4179 for( uint next = 0; next < length; ++next ) { 4180 MergeMemNode* nmm = mergemem_worklist.at(next); 4181 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4182 // Note: we don't want to use MergeMemStream here because we only want to 4183 // scan inputs which exist at the start, not ones we add during processing. 4184 // Note 2: MergeMem may already contains instance memory slices added 4185 // during find_inst_mem() call when memory nodes were processed above. 4186 igvn->hash_delete(nmm); 4187 uint nslices = MIN2(nmm->req(), new_index_start); 4188 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4189 Node* mem = nmm->in(i); 4190 Node* cur = nullptr; 4191 if (mem == nullptr || mem->is_top()) { 4192 continue; 4193 } 4194 // First, update mergemem by moving memory nodes to corresponding slices 4195 // if their type became more precise since this mergemem was created. 4196 while (mem->is_Mem()) { 4197 const Type *at = igvn->type(mem->in(MemNode::Address)); 4198 if (at != Type::TOP) { 4199 assert (at->isa_ptr() != nullptr, "pointer type required."); 4200 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4201 if (idx == i) { 4202 if (cur == nullptr) { 4203 cur = mem; 4204 } 4205 } else { 4206 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4207 nmm->set_memory_at(idx, mem); 4208 } 4209 } 4210 } 4211 mem = mem->in(MemNode::Memory); 4212 } 4213 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4214 // Find any instance of the current type if we haven't encountered 4215 // already a memory slice of the instance along the memory chain. 4216 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4217 if((uint)_compile->get_general_index(ni) == i) { 4218 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4219 if (nmm->is_empty_memory(m)) { 4220 Node* result = find_inst_mem(mem, ni, orig_phis); 4221 if (_compile->failing()) { 4222 return; 4223 } 4224 nmm->set_memory_at(ni, result); 4225 } 4226 } 4227 } 4228 } 4229 // Find the rest of instances values 4230 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4231 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4232 Node* result = step_through_mergemem(nmm, ni, tinst); 4233 if (result == nmm->base_memory()) { 4234 // Didn't find instance memory, search through general slice recursively. 4235 result = nmm->memory_at(_compile->get_general_index(ni)); 4236 result = find_inst_mem(result, ni, orig_phis); 4237 if (_compile->failing()) { 4238 return; 4239 } 4240 nmm->set_memory_at(ni, result); 4241 } 4242 } 4243 igvn->hash_insert(nmm); 4244 record_for_optimizer(nmm); 4245 } 4246 4247 // Phase 4: Update the inputs of non-instance memory Phis and 4248 // the Memory input of memnodes 4249 // First update the inputs of any non-instance Phi's from 4250 // which we split out an instance Phi. Note we don't have 4251 // to recursively process Phi's encountered on the input memory 4252 // chains as is done in split_memory_phi() since they will 4253 // also be processed here. 4254 for (int j = 0; j < orig_phis.length(); j++) { 4255 PhiNode *phi = orig_phis.at(j); 4256 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4257 igvn->hash_delete(phi); 4258 for (uint i = 1; i < phi->req(); i++) { 4259 Node *mem = phi->in(i); 4260 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4261 if (_compile->failing()) { 4262 return; 4263 } 4264 if (mem != new_mem) { 4265 phi->set_req(i, new_mem); 4266 } 4267 } 4268 igvn->hash_insert(phi); 4269 record_for_optimizer(phi); 4270 } 4271 4272 // Update the memory inputs of MemNodes with the value we computed 4273 // in Phase 2 and move stores memory users to corresponding memory slices. 4274 // Disable memory split verification code until the fix for 6984348. 4275 // Currently it produces false negative results since it does not cover all cases. 4276 #if 0 // ifdef ASSERT 4277 visited.Reset(); 4278 Node_Stack old_mems(arena, _compile->unique() >> 2); 4279 #endif 4280 for (uint i = 0; i < ideal_nodes.size(); i++) { 4281 Node* n = ideal_nodes.at(i); 4282 Node* nmem = get_map(n->_idx); 4283 assert(nmem != nullptr, "sanity"); 4284 if (n->is_Mem()) { 4285 #if 0 // ifdef ASSERT 4286 Node* old_mem = n->in(MemNode::Memory); 4287 if (!visited.test_set(old_mem->_idx)) { 4288 old_mems.push(old_mem, old_mem->outcnt()); 4289 } 4290 #endif 4291 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4292 if (!n->is_Load()) { 4293 // Move memory users of a store first. 4294 move_inst_mem(n, orig_phis); 4295 } 4296 // Now update memory input 4297 igvn->hash_delete(n); 4298 n->set_req(MemNode::Memory, nmem); 4299 igvn->hash_insert(n); 4300 record_for_optimizer(n); 4301 } else { 4302 assert(n->is_Allocate() || n->is_CheckCastPP() || 4303 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4304 } 4305 } 4306 #if 0 // ifdef ASSERT 4307 // Verify that memory was split correctly 4308 while (old_mems.is_nonempty()) { 4309 Node* old_mem = old_mems.node(); 4310 uint old_cnt = old_mems.index(); 4311 old_mems.pop(); 4312 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4313 } 4314 #endif 4315 } 4316 4317 #ifndef PRODUCT 4318 int ConnectionGraph::_no_escape_counter = 0; 4319 int ConnectionGraph::_arg_escape_counter = 0; 4320 int ConnectionGraph::_global_escape_counter = 0; 4321 4322 static const char *node_type_names[] = { 4323 "UnknownType", 4324 "JavaObject", 4325 "LocalVar", 4326 "Field", 4327 "Arraycopy" 4328 }; 4329 4330 static const char *esc_names[] = { 4331 "UnknownEscape", 4332 "NoEscape", 4333 "ArgEscape", 4334 "GlobalEscape" 4335 }; 4336 4337 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4338 NodeType nt = node_type(); 4339 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4340 if (print_state) { 4341 EscapeState es = escape_state(); 4342 EscapeState fields_es = fields_escape_state(); 4343 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4344 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4345 out->print("NSR "); 4346 } 4347 } 4348 } 4349 4350 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4351 dump_header(print_state, out); 4352 if (is_Field()) { 4353 FieldNode* f = (FieldNode*)this; 4354 if (f->is_oop()) { 4355 out->print("oop "); 4356 } 4357 if (f->offset() > 0) { 4358 out->print("+%d ", f->offset()); 4359 } 4360 out->print("("); 4361 for (BaseIterator i(f); i.has_next(); i.next()) { 4362 PointsToNode* b = i.get(); 4363 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4364 } 4365 out->print(" )"); 4366 } 4367 out->print("["); 4368 for (EdgeIterator i(this); i.has_next(); i.next()) { 4369 PointsToNode* e = i.get(); 4370 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4371 } 4372 out->print(" ["); 4373 for (UseIterator i(this); i.has_next(); i.next()) { 4374 PointsToNode* u = i.get(); 4375 bool is_base = false; 4376 if (PointsToNode::is_base_use(u)) { 4377 is_base = true; 4378 u = PointsToNode::get_use_node(u)->as_Field(); 4379 } 4380 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 4381 } 4382 out->print(" ]] "); 4383 if (_node == nullptr) { 4384 out->print("<null>%s", newline ? "\n" : ""); 4385 } else { 4386 _node->dump(newline ? "\n" : "", false, out); 4387 } 4388 } 4389 4390 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 4391 bool first = true; 4392 int ptnodes_length = ptnodes_worklist.length(); 4393 for (int i = 0; i < ptnodes_length; i++) { 4394 PointsToNode *ptn = ptnodes_worklist.at(i); 4395 if (ptn == nullptr || !ptn->is_JavaObject()) { 4396 continue; 4397 } 4398 PointsToNode::EscapeState es = ptn->escape_state(); 4399 if ((es != PointsToNode::NoEscape) && !Verbose) { 4400 continue; 4401 } 4402 Node* n = ptn->ideal_node(); 4403 if (n->is_Allocate() || (n->is_CallStaticJava() && 4404 n->as_CallStaticJava()->is_boxing_method())) { 4405 if (first) { 4406 tty->cr(); 4407 tty->print("======== Connection graph for "); 4408 _compile->method()->print_short_name(); 4409 tty->cr(); 4410 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 4411 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 4412 tty->cr(); 4413 first = false; 4414 } 4415 ptn->dump(); 4416 // Print all locals and fields which reference this allocation 4417 for (UseIterator j(ptn); j.has_next(); j.next()) { 4418 PointsToNode* use = j.get(); 4419 if (use->is_LocalVar()) { 4420 use->dump(Verbose); 4421 } else if (Verbose) { 4422 use->dump(); 4423 } 4424 } 4425 tty->cr(); 4426 } 4427 } 4428 } 4429 4430 void ConnectionGraph::print_statistics() { 4431 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 4432 } 4433 4434 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 4435 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 4436 return; 4437 } 4438 for (int next = 0; next < java_objects_worklist.length(); ++next) { 4439 JavaObjectNode* ptn = java_objects_worklist.at(next); 4440 if (ptn->ideal_node()->is_Allocate()) { 4441 if (ptn->escape_state() == PointsToNode::NoEscape) { 4442 Atomic::inc(&ConnectionGraph::_no_escape_counter); 4443 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 4444 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 4445 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 4446 Atomic::inc(&ConnectionGraph::_global_escape_counter); 4447 } else { 4448 assert(false, "Unexpected Escape State"); 4449 } 4450 } 4451 } 4452 } 4453 4454 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 4455 if (_compile->directive()->TraceEscapeAnalysisOption) { 4456 assert(ptn != nullptr, "should not be null"); 4457 assert(reason != nullptr, "should not be null"); 4458 ptn->dump_header(true); 4459 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 4460 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 4461 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 4462 } 4463 } 4464 4465 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 4466 if (_compile->directive()->TraceEscapeAnalysisOption) { 4467 stringStream ss; 4468 ss.print("propagated from: "); 4469 from->dump(true, &ss, false); 4470 return ss.as_string(); 4471 } else { 4472 return nullptr; 4473 } 4474 } 4475 4476 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 4477 if (_compile->directive()->TraceEscapeAnalysisOption) { 4478 stringStream ss; 4479 ss.print("escapes as arg to:"); 4480 call->dump("", false, &ss); 4481 return ss.as_string(); 4482 } else { 4483 return nullptr; 4484 } 4485 } 4486 4487 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 4488 if (_compile->directive()->TraceEscapeAnalysisOption) { 4489 stringStream ss; 4490 ss.print("is merged with other object: "); 4491 other->dump_header(true, &ss); 4492 return ss.as_string(); 4493 } else { 4494 return nullptr; 4495 } 4496 } 4497 4498 #endif 4499 4500 void ConnectionGraph::record_for_optimizer(Node *n) { 4501 _igvn->_worklist.push(n); 4502 _igvn->add_users_to_worklist(n); 4503 }