1 /* 2 * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/metaspace.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "opto/c2compiler.hpp" 35 #include "opto/arraycopynode.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/compile.hpp" 39 #include "opto/escape.hpp" 40 #include "opto/macro.hpp" 41 #include "opto/phaseX.hpp" 42 #include "opto/movenode.hpp" 43 #include "opto/rootnode.hpp" 44 #include "utilities/macros.hpp" 45 46 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 47 // If ReduceAllocationMerges is enabled we might call split_through_phi during 48 // split_unique_types and that will create additional nodes that need to be 49 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 50 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 51 // the array will be reallocated. 52 _nodes(C->comp_arena(), ReduceAllocationMerges ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 53 _in_worklist(C->comp_arena()), 54 _next_pidx(0), 55 _collecting(true), 56 _verify(false), 57 _compile(C), 58 _igvn(igvn), 59 _invocation(invocation), 60 _build_iterations(0), 61 _build_time(0.), 62 _node_map(C->comp_arena()) { 63 // Add unknown java object. 64 add_java_object(C->top(), PointsToNode::GlobalEscape); 65 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 66 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 67 // Add ConP and ConN null oop nodes 68 Node* oop_null = igvn->zerocon(T_OBJECT); 69 assert(oop_null->_idx < nodes_size(), "should be created already"); 70 add_java_object(oop_null, PointsToNode::NoEscape); 71 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 72 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 73 if (UseCompressedOops) { 74 Node* noop_null = igvn->zerocon(T_NARROWOOP); 75 assert(noop_null->_idx < nodes_size(), "should be created already"); 76 map_ideal_node(noop_null, null_obj); 77 } 78 } 79 80 bool ConnectionGraph::has_candidates(Compile *C) { 81 // EA brings benefits only when the code has allocations and/or locks which 82 // are represented by ideal Macro nodes. 83 int cnt = C->macro_count(); 84 for (int i = 0; i < cnt; i++) { 85 Node *n = C->macro_node(i); 86 if (n->is_Allocate()) { 87 return true; 88 } 89 if (n->is_Lock()) { 90 Node* obj = n->as_Lock()->obj_node()->uncast(); 91 if (!(obj->is_Parm() || obj->is_Con())) { 92 return true; 93 } 94 } 95 if (n->is_CallStaticJava() && 96 n->as_CallStaticJava()->is_boxing_method()) { 97 return true; 98 } 99 } 100 return false; 101 } 102 103 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 104 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 105 ResourceMark rm; 106 107 // Add ConP and ConN null oop nodes before ConnectionGraph construction 108 // to create space for them in ConnectionGraph::_nodes[]. 109 Node* oop_null = igvn->zerocon(T_OBJECT); 110 Node* noop_null = igvn->zerocon(T_NARROWOOP); 111 int invocation = 0; 112 if (C->congraph() != nullptr) { 113 invocation = C->congraph()->_invocation + 1; 114 } 115 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 116 // Perform escape analysis 117 if (congraph->compute_escape()) { 118 // There are non escaping objects. 119 C->set_congraph(congraph); 120 } 121 // Cleanup. 122 if (oop_null->outcnt() == 0) { 123 igvn->hash_delete(oop_null); 124 } 125 if (noop_null->outcnt() == 0) { 126 igvn->hash_delete(noop_null); 127 } 128 } 129 130 bool ConnectionGraph::compute_escape() { 131 Compile* C = _compile; 132 PhaseGVN* igvn = _igvn; 133 134 // Worklists used by EA. 135 Unique_Node_List delayed_worklist; 136 Unique_Node_List reducible_merges; 137 GrowableArray<Node*> alloc_worklist; 138 GrowableArray<Node*> ptr_cmp_worklist; 139 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 140 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 141 GrowableArray<PointsToNode*> ptnodes_worklist; 142 GrowableArray<JavaObjectNode*> java_objects_worklist; 143 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 144 GrowableArray<FieldNode*> oop_fields_worklist; 145 GrowableArray<SafePointNode*> sfn_worklist; 146 GrowableArray<MergeMemNode*> mergemem_worklist; 147 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 148 149 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 150 151 // 1. Populate Connection Graph (CG) with PointsTo nodes. 152 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 153 // Initialize worklist 154 if (C->root() != nullptr) { 155 ideal_nodes.push(C->root()); 156 } 157 // Processed ideal nodes are unique on ideal_nodes list 158 // but several ideal nodes are mapped to the phantom_obj. 159 // To avoid duplicated entries on the following worklists 160 // add the phantom_obj only once to them. 161 ptnodes_worklist.append(phantom_obj); 162 java_objects_worklist.append(phantom_obj); 163 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 164 Node* n = ideal_nodes.at(next); 165 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && 166 !n->in(MemNode::Address)->is_AddP() && 167 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { 168 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA 169 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); 170 _igvn->register_new_node_with_optimizer(addp); 171 _igvn->replace_input_of(n, MemNode::Address, addp); 172 ideal_nodes.push(addp); 173 _nodes.at_put_grow(addp->_idx, nullptr, nullptr); 174 } 175 // Create PointsTo nodes and add them to Connection Graph. Called 176 // only once per ideal node since ideal_nodes is Unique_Node list. 177 add_node_to_connection_graph(n, &delayed_worklist); 178 PointsToNode* ptn = ptnode_adr(n->_idx); 179 if (ptn != nullptr && ptn != phantom_obj) { 180 ptnodes_worklist.append(ptn); 181 if (ptn->is_JavaObject()) { 182 java_objects_worklist.append(ptn->as_JavaObject()); 183 if ((n->is_Allocate() || n->is_CallStaticJava()) && 184 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 185 // Only allocations and java static calls results are interesting. 186 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 187 } 188 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 189 oop_fields_worklist.append(ptn->as_Field()); 190 } 191 } 192 // Collect some interesting nodes for further use. 193 switch (n->Opcode()) { 194 case Op_MergeMem: 195 // Collect all MergeMem nodes to add memory slices for 196 // scalar replaceable objects in split_unique_types(). 197 mergemem_worklist.append(n->as_MergeMem()); 198 break; 199 case Op_CmpP: 200 case Op_CmpN: 201 // Collect compare pointers nodes. 202 if (OptimizePtrCompare) { 203 ptr_cmp_worklist.append(n); 204 } 205 break; 206 case Op_MemBarStoreStore: 207 // Collect all MemBarStoreStore nodes so that depending on the 208 // escape status of the associated Allocate node some of them 209 // may be eliminated. 210 storestore_worklist.append(n->as_MemBarStoreStore()); 211 break; 212 case Op_MemBarRelease: 213 if (n->req() > MemBarNode::Precedent) { 214 record_for_optimizer(n); 215 } 216 break; 217 #ifdef ASSERT 218 case Op_AddP: 219 // Collect address nodes for graph verification. 220 addp_worklist.append(n); 221 break; 222 #endif 223 case Op_ArrayCopy: 224 // Keep a list of ArrayCopy nodes so if one of its input is non 225 // escaping, we can record a unique type 226 arraycopy_worklist.append(n->as_ArrayCopy()); 227 break; 228 default: 229 // not interested now, ignore... 230 break; 231 } 232 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 233 Node* m = n->fast_out(i); // Get user 234 ideal_nodes.push(m); 235 } 236 if (n->is_SafePoint()) { 237 sfn_worklist.append(n->as_SafePoint()); 238 } 239 } 240 241 #ifndef PRODUCT 242 if (_compile->directive()->TraceEscapeAnalysisOption) { 243 tty->print("+++++ Initial worklist for "); 244 _compile->method()->print_name(); 245 tty->print_cr(" (ea_inv=%d)", _invocation); 246 for (int i = 0; i < ptnodes_worklist.length(); i++) { 247 PointsToNode* ptn = ptnodes_worklist.at(i); 248 ptn->dump(); 249 } 250 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 251 } 252 #endif 253 254 if (non_escaped_allocs_worklist.length() == 0) { 255 _collecting = false; 256 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 257 return false; // Nothing to do. 258 } 259 // Add final simple edges to graph. 260 while(delayed_worklist.size() > 0) { 261 Node* n = delayed_worklist.pop(); 262 add_final_edges(n); 263 } 264 265 #ifdef ASSERT 266 if (VerifyConnectionGraph) { 267 // Verify that no new simple edges could be created and all 268 // local vars has edges. 269 _verify = true; 270 int ptnodes_length = ptnodes_worklist.length(); 271 for (int next = 0; next < ptnodes_length; ++next) { 272 PointsToNode* ptn = ptnodes_worklist.at(next); 273 add_final_edges(ptn->ideal_node()); 274 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 275 ptn->dump(); 276 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 277 } 278 } 279 _verify = false; 280 } 281 #endif 282 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 283 // processing, calls to CI to resolve symbols (types, fields, methods) 284 // referenced in bytecode. During symbol resolution VM may throw 285 // an exception which CI cleans and converts to compilation failure. 286 if (C->failing()) { 287 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 288 return false; 289 } 290 291 // 2. Finish Graph construction by propagating references to all 292 // java objects through graph. 293 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 294 java_objects_worklist, oop_fields_worklist)) { 295 // All objects escaped or hit time or iterations limits. 296 _collecting = false; 297 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 298 return false; 299 } 300 301 // 3. Adjust scalar_replaceable state of nonescaping objects and push 302 // scalar replaceable allocations on alloc_worklist for processing 303 // in split_unique_types(). 304 GrowableArray<JavaObjectNode*> jobj_worklist; 305 int non_escaped_length = non_escaped_allocs_worklist.length(); 306 bool found_nsr_alloc = false; 307 for (int next = 0; next < non_escaped_length; next++) { 308 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 309 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 310 Node* n = ptn->ideal_node(); 311 if (n->is_Allocate()) { 312 n->as_Allocate()->_is_non_escaping = noescape; 313 } 314 if (noescape && ptn->scalar_replaceable()) { 315 adjust_scalar_replaceable_state(ptn, reducible_merges); 316 if (ptn->scalar_replaceable()) { 317 jobj_worklist.push(ptn); 318 } else { 319 found_nsr_alloc = true; 320 } 321 } 322 } 323 324 // Propagate NSR (Not Scalar Replaceable) state. 325 if (found_nsr_alloc) { 326 find_scalar_replaceable_allocs(jobj_worklist); 327 } 328 329 // alloc_worklist will be processed in reverse push order. 330 // Therefore the reducible Phis will be processed for last and that's what we 331 // want because by then the scalarizable inputs of the merge will already have 332 // an unique instance type. 333 for (uint i = 0; i < reducible_merges.size(); i++ ) { 334 Node* n = reducible_merges.at(i); 335 alloc_worklist.append(n); 336 } 337 338 for (int next = 0; next < jobj_worklist.length(); ++next) { 339 JavaObjectNode* jobj = jobj_worklist.at(next); 340 if (jobj->scalar_replaceable()) { 341 alloc_worklist.append(jobj->ideal_node()); 342 } 343 } 344 345 #ifdef ASSERT 346 if (VerifyConnectionGraph) { 347 // Verify that graph is complete - no new edges could be added or needed. 348 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 349 java_objects_worklist, addp_worklist); 350 } 351 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 352 assert(null_obj->escape_state() == PointsToNode::NoEscape && 353 null_obj->edge_count() == 0 && 354 !null_obj->arraycopy_src() && 355 !null_obj->arraycopy_dst(), "sanity"); 356 #endif 357 358 _collecting = false; 359 360 } // TracePhase t3("connectionGraph") 361 362 // 4. Optimize ideal graph based on EA information. 363 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 364 if (has_non_escaping_obj) { 365 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 366 } 367 368 #ifndef PRODUCT 369 if (PrintEscapeAnalysis) { 370 dump(ptnodes_worklist); // Dump ConnectionGraph 371 } 372 #endif 373 374 #ifdef ASSERT 375 if (VerifyConnectionGraph) { 376 int alloc_length = alloc_worklist.length(); 377 for (int next = 0; next < alloc_length; ++next) { 378 Node* n = alloc_worklist.at(next); 379 PointsToNode* ptn = ptnode_adr(n->_idx); 380 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 381 } 382 } 383 #endif 384 385 // 5. Separate memory graph for scalar replaceable allcations. 386 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 387 if (has_scalar_replaceable_candidates && EliminateAllocations) { 388 assert(C->do_aliasing(), "Aliasing should be enabled"); 389 // Now use the escape information to create unique types for 390 // scalar replaceable objects. 391 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 392 if (C->failing()) { 393 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 394 return false; 395 } 396 C->print_method(PHASE_AFTER_EA, 2); 397 398 #ifdef ASSERT 399 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 400 tty->print("=== No allocations eliminated for "); 401 C->method()->print_short_name(); 402 if (!EliminateAllocations) { 403 tty->print(" since EliminateAllocations is off ==="); 404 } else if(!has_scalar_replaceable_candidates) { 405 tty->print(" since there are no scalar replaceable candidates ==="); 406 } 407 tty->cr(); 408 #endif 409 } 410 411 // 6. Remove reducible allocation merges from ideal graph 412 if (ReduceAllocationMerges && reducible_merges.size() > 0) { 413 bool delay = _igvn->delay_transform(); 414 _igvn->set_delay_transform(true); 415 for (uint i = 0; i < reducible_merges.size(); i++ ) { 416 Node* n = reducible_merges.at(i); 417 reduce_phi(n->as_Phi()); 418 if (C->failing()) { 419 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 420 return false; 421 } 422 } 423 _igvn->set_delay_transform(delay); 424 } 425 426 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 427 // java calls if they pass ArgEscape objects as parameters. 428 if (has_non_escaping_obj && 429 (C->env()->should_retain_local_variables() || 430 C->env()->jvmti_can_get_owned_monitor_info() || 431 C->env()->jvmti_can_walk_any_space() || 432 DeoptimizeObjectsALot)) { 433 int sfn_length = sfn_worklist.length(); 434 for (int next = 0; next < sfn_length; next++) { 435 SafePointNode* sfn = sfn_worklist.at(next); 436 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 437 if (sfn->is_CallJava()) { 438 CallJavaNode* call = sfn->as_CallJava(); 439 call->set_arg_escape(has_arg_escape(call)); 440 } 441 } 442 } 443 444 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 445 return has_non_escaping_obj; 446 } 447 448 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 449 // if at least one scalar replaceable allocation participates in the merge and 450 // no input to the Phi is nullable. 451 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 452 // Check if there is a scalar replaceable allocate in the Phi 453 bool found_sr_allocate = false; 454 455 for (uint i = 1; i < ophi->req(); i++) { 456 // Right now we can't restore a "null" pointer during deoptimization 457 const Type* inp_t = _igvn->type(ophi->in(i)); 458 if (inp_t == nullptr || inp_t->make_oopptr() == nullptr || inp_t->make_oopptr()->maybe_null()) { 459 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Input %d is nullable.", ophi->_idx, _invocation, i);) 460 return false; 461 } 462 463 // We are looking for at least one SR object in the merge 464 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 465 if (ptn != nullptr && ptn->scalar_replaceable()) { 466 assert(ptn->ideal_node() != nullptr && ptn->ideal_node()->is_Allocate(), "sanity"); 467 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 468 469 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 470 found_sr_allocate = true; 471 } else { 472 ptn->set_scalar_replaceable(false); 473 } 474 } 475 } 476 477 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 478 return found_sr_allocate; 479 } 480 481 // Check if we are able to untangle the merge. Right now we only reduce Phis 482 // which are only used as debug information. 483 bool ConnectionGraph::can_reduce_phi_check_users(PhiNode* ophi) const { 484 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 485 Node* use = ophi->fast_out(i); 486 487 if (use->is_SafePoint()) { 488 if (use->is_Call() && use->as_Call()->has_non_debug_use(ophi)) { 489 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", ophi->_idx, _invocation);) 490 return false; 491 } 492 } else if (use->is_AddP()) { 493 Node* addp = use; 494 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 495 Node* use_use = addp->fast_out(j); 496 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 497 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", ophi->_idx, _invocation, use_use->Name());) 498 return false; 499 } 500 } 501 } else { 502 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", ophi->_idx, _invocation, use->_idx, use->Name());) 503 return false; 504 } 505 } 506 507 return true; 508 } 509 510 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 511 // only used in some certain code shapes. Check comments in 512 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 513 // details. 514 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 515 // If there was an error attempting to reduce allocation merges for this 516 // method we might have disabled the compilation and be retrying 517 // with RAM disabled. 518 if (!_compile->do_reduce_allocation_merges()) { 519 return false; 520 } 521 522 const Type* phi_t = _igvn->type(ophi); 523 if (phi_t == nullptr || phi_t->make_ptr() == nullptr || 524 phi_t->make_ptr()->isa_instptr() == nullptr || 525 !phi_t->make_ptr()->isa_instptr()->klass_is_exact()) { 526 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can NOT reduce Phi %d during invocation %d because it's nullable.", ophi->_idx, _invocation); }) 527 return false; 528 } 529 530 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_phi_check_users(ophi)) { 531 return false; 532 } 533 534 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 535 return true; 536 } 537 538 void ConnectionGraph::reduce_phi_on_field_access(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist) { 539 // We'll pass this to 'split_through_phi' so that it'll do the split even 540 // though the load doesn't have an unique instance type. 541 bool ignore_missing_instance_id = true; 542 543 // Iterate over Phi outputs looking for an AddP 544 for (int j = ophi->outcnt()-1; j >= 0;) { 545 Node* previous_addp = ophi->raw_out(j); 546 uint num_edges = 1; 547 if (previous_addp->is_AddP()) { 548 // All AddPs are present in the connection graph 549 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 550 num_edges = previous_addp->in(AddPNode::Address) == previous_addp->in(AddPNode::Base) ? 2 : 1; 551 552 // Iterate over AddP looking for a Load 553 for (int k = previous_addp->outcnt()-1; k >= 0;) { 554 Node* previous_load = previous_addp->raw_out(k); 555 if (previous_load->is_Load()) { 556 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 557 _igvn->replace_node(previous_load, data_phi); 558 assert(data_phi != nullptr, "Output of split_through_phi is null."); 559 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 560 561 // Push the newly created AddP on alloc_worklist and patch 562 // the connection graph. Note that the changes in the CG below 563 // won't affect the ES of objects since the new nodes have the 564 // same status as the old ones. 565 if (data_phi != nullptr && data_phi->is_Phi()) { 566 for (uint i = 1; i < data_phi->req(); i++) { 567 Node* new_load = data_phi->in(i); 568 if (new_load->is_Load()) { 569 Node* new_addp = new_load->in(MemNode::Address); 570 Node* base = get_addp_base(new_addp); 571 572 // The base might not be something that we can create an unique 573 // type for. If that's the case we are done with that input. 574 PointsToNode* jobj_ptn = unique_java_object(base); 575 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 576 continue; 577 } 578 579 // Push to alloc_worklist since the base has an unique_type 580 alloc_worklist.append_if_missing(new_addp); 581 582 // Now let's add the node to the connection graph 583 _nodes.at_grow(new_addp->_idx, nullptr); 584 add_field(new_addp, fn->escape_state(), fn->offset()); 585 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 586 587 // If the load doesn't load an object then it won't be 588 // part of the connection graph 589 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 590 if (curr_load_ptn != nullptr) { 591 _nodes.at_grow(new_load->_idx, nullptr); 592 add_local_var(new_load, curr_load_ptn->escape_state()); 593 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 594 } 595 } 596 } 597 } 598 } 599 --k; 600 k = MIN2(k, (int)previous_addp->outcnt()-1); 601 } 602 603 // Remove the old AddP from the processing list because it's dead now 604 alloc_worklist.remove_if_existing(previous_addp); 605 } 606 j -= num_edges; 607 j = MIN2(j, (int)ophi->outcnt()-1); 608 } 609 } 610 611 // This method will create a SafePointScalarObjectNode for each combination of 612 // scalar replaceable allocation in 'ophi' and SafePoint node in 'safepoints'. 613 // The method will create a SafePointScalarMERGEnode for each combination of 614 // 'ophi' and SafePoint node in 'safepoints'. 615 // Each SafePointScalarMergeNode created here may describe multiple scalar 616 // replaced objects - check detailed description in SafePointScalarMergeNode 617 // class header. 618 // 619 // This method will set entries in the Phi that are scalar replaceable to 'null'. 620 void ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi, Unique_Node_List* safepoints) { 621 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 622 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 623 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 624 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 625 uint number_of_sr_objects = 0; 626 PhaseMacroExpand mexp(*_igvn); 627 628 _igvn->hash_delete(ophi); 629 630 // Fill in the 'selector' Phi. If index 'i' of the selector is: 631 // -> a '-1' constant, the i'th input of the original Phi is NSR. 632 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and the 633 // info about the scalarized object will be at index x of 634 // ObjectMergeValue::possible_objects 635 for (uint i = 1; i < ophi->req(); i++) { 636 Node* base = ophi->in(i); 637 JavaObjectNode* ptn = unique_java_object(base); 638 639 if (ptn != nullptr && ptn->scalar_replaceable()) { 640 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 641 selector->set_req(i, sr_obj_idx); 642 number_of_sr_objects++; 643 } 644 } 645 646 // Update the debug information of all safepoints in turn 647 for (uint spi = 0; spi < safepoints->size(); spi++) { 648 SafePointNode* sfpt = safepoints->at(spi)->as_SafePoint(); 649 JVMState *jvms = sfpt->jvms(); 650 uint merge_idx = (sfpt->req() - jvms->scloff()); 651 int debug_start = jvms->debug_start(); 652 653 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 654 smerge->init_req(0, _compile->root()); 655 _igvn->register_new_node_with_optimizer(smerge); 656 657 // The next two inputs are: 658 // (1) A copy of the original pointer to NSR objects. 659 // (2) A selector, used to decide if we need to rematerialize an object 660 // or use the pointer to a NSR object. 661 // See more details of these fields in the declaration of SafePointScalarMergeNode 662 sfpt->add_req(ophi); 663 sfpt->add_req(selector); 664 665 for (uint i = 1; i < ophi->req(); i++) { 666 Node* base = ophi->in(i); 667 JavaObjectNode* ptn = unique_java_object(base); 668 669 // If the base is not scalar replaceable we don't need to register information about 670 // it at this time. 671 if (ptn == nullptr || !ptn->scalar_replaceable()) { 672 continue; 673 } 674 675 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 676 Unique_Node_List value_worklist; 677 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist); 678 guarantee(value_worklist.size() == 0, "Unimplemented: Valhalla support for 8287061"); 679 if (sobj == nullptr) { 680 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 681 return; 682 } 683 684 // Now make a pass over the debug information replacing any references 685 // to the allocated object with "sobj" 686 Node* ccpp = alloc->result_cast(); 687 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 688 689 // Register the scalarized object as a candidate for reallocation 690 smerge->add_req(sobj); 691 } 692 693 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge" 694 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn); 695 696 // The call to 'replace_edges_in_range' above might have removed the 697 // reference to ophi that we need at _merge_pointer_idx. The line below make 698 // sure the reference is maintained. 699 sfpt->set_req(smerge->merge_pointer_idx(jvms), ophi); 700 _igvn->_worklist.push(sfpt); 701 } 702 703 // Now we can change ophi since we don't need to know the types 704 // of the input allocations anymore. 705 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 706 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 707 for (uint i = 1; i < ophi->req(); i++) { 708 Node* base = ophi->in(i); 709 JavaObjectNode* ptn = unique_java_object(base); 710 711 if (ptn != nullptr && ptn->scalar_replaceable()) { 712 new_phi->set_req(i, null_ptr); 713 } else { 714 new_phi->set_req(i, ophi->in(i)); 715 } 716 } 717 718 _igvn->replace_node(ophi, new_phi); 719 _igvn->hash_insert(ophi); 720 _igvn->_worklist.push(ophi); 721 } 722 723 void ConnectionGraph::reduce_phi(PhiNode* ophi) { 724 Unique_Node_List safepoints; 725 726 for (uint i = 0; i < ophi->outcnt(); i++) { 727 Node* use = ophi->raw_out(i); 728 729 // All SafePoint nodes using the same Phi node use the same debug 730 // information (regarding the Phi). Furthermore, reducing the Phi used by a 731 // SafePoint requires changing the Phi. Therefore, I collect all safepoints 732 // and patch them all at once later. 733 if (use->is_SafePoint()) { 734 safepoints.push(use->as_SafePoint()); 735 } else { 736 assert(false, "Unexpected use of reducible Phi."); 737 } 738 } 739 740 if (safepoints.size() > 0) { 741 reduce_phi_on_safepoints(ophi, &safepoints); 742 } 743 } 744 745 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 746 Unique_Node_List ideal_nodes; 747 748 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 749 ideal_nodes.push(root); 750 751 for (uint next = 0; next < ideal_nodes.size(); ++next) { 752 Node* n = ideal_nodes.at(next); 753 754 if (n->is_SafePointScalarMerge()) { 755 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 756 757 // Validate inputs of merge 758 for (uint i = 1; i < merge->req(); i++) { 759 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 760 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 761 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 762 } 763 } 764 765 // Validate users of merge 766 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 767 Node* sfpt = merge->fast_out(i); 768 if (sfpt->is_SafePoint()) { 769 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 770 771 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 772 assert(false, "SafePointScalarMerge nodes can't be nested."); 773 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 774 } 775 } else { 776 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 777 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 778 } 779 } 780 } 781 782 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 783 Node* m = n->fast_out(i); 784 ideal_nodes.push(m); 785 } 786 } 787 } 788 789 // Returns true if there is an object in the scope of sfn that does not escape globally. 790 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 791 Compile* C = _compile; 792 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 793 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 794 DeoptimizeObjectsALot) { 795 // Jvmti agents can access locals. Must provide info about local objects at runtime. 796 int num_locs = jvms->loc_size(); 797 for (int idx = 0; idx < num_locs; idx++) { 798 Node* l = sfn->local(jvms, idx); 799 if (not_global_escape(l)) { 800 return true; 801 } 802 } 803 } 804 if (C->env()->jvmti_can_get_owned_monitor_info() || 805 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 806 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 807 int num_mon = jvms->nof_monitors(); 808 for (int idx = 0; idx < num_mon; idx++) { 809 Node* m = sfn->monitor_obj(jvms, idx); 810 if (m != nullptr && not_global_escape(m)) { 811 return true; 812 } 813 } 814 } 815 } 816 return false; 817 } 818 819 // Returns true if at least one of the arguments to the call is an object 820 // that does not escape globally. 821 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 822 if (call->method() != nullptr) { 823 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 824 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 825 Node* p = call->in(idx); 826 if (not_global_escape(p)) { 827 return true; 828 } 829 } 830 } else { 831 const char* name = call->as_CallStaticJava()->_name; 832 assert(name != nullptr, "no name"); 833 // no arg escapes through uncommon traps 834 if (strcmp(name, "uncommon_trap") != 0) { 835 // process_call_arguments() assumes that all arguments escape globally 836 const TypeTuple* d = call->tf()->domain_sig(); 837 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 838 const Type* at = d->field_at(i); 839 if (at->isa_oopptr() != nullptr) { 840 return true; 841 } 842 } 843 } 844 } 845 return false; 846 } 847 848 849 850 // Utility function for nodes that load an object 851 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 852 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 853 // ThreadLocal has RawPtr type. 854 const Type* t = _igvn->type(n); 855 if (t->make_ptr() != nullptr) { 856 Node* adr = n->in(MemNode::Address); 857 #ifdef ASSERT 858 if (!adr->is_AddP()) { 859 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 860 } else { 861 assert((ptnode_adr(adr->_idx) == nullptr || 862 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 863 } 864 #endif 865 add_local_var_and_edge(n, PointsToNode::NoEscape, 866 adr, delayed_worklist); 867 } 868 } 869 870 // Populate Connection Graph with PointsTo nodes and create simple 871 // connection graph edges. 872 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 873 assert(!_verify, "this method should not be called for verification"); 874 PhaseGVN* igvn = _igvn; 875 uint n_idx = n->_idx; 876 PointsToNode* n_ptn = ptnode_adr(n_idx); 877 if (n_ptn != nullptr) { 878 return; // No need to redefine PointsTo node during first iteration. 879 } 880 int opcode = n->Opcode(); 881 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 882 if (gc_handled) { 883 return; // Ignore node if already handled by GC. 884 } 885 886 if (n->is_Call()) { 887 // Arguments to allocation and locking don't escape. 888 if (n->is_AbstractLock()) { 889 // Put Lock and Unlock nodes on IGVN worklist to process them during 890 // first IGVN optimization when escape information is still available. 891 record_for_optimizer(n); 892 } else if (n->is_Allocate()) { 893 add_call_node(n->as_Call()); 894 record_for_optimizer(n); 895 } else { 896 if (n->is_CallStaticJava()) { 897 const char* name = n->as_CallStaticJava()->_name; 898 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 899 return; // Skip uncommon traps 900 } 901 } 902 // Don't mark as processed since call's arguments have to be processed. 903 delayed_worklist->push(n); 904 // Check if a call returns an object. 905 if ((n->as_Call()->returns_pointer() && 906 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 907 (n->is_CallStaticJava() && 908 n->as_CallStaticJava()->is_boxing_method())) { 909 add_call_node(n->as_Call()); 910 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) { 911 bool returns_oop = false; 912 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 913 ProjNode* pn = n->fast_out(i)->as_Proj(); 914 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 915 returns_oop = true; 916 } 917 } 918 if (returns_oop) { 919 add_call_node(n->as_Call()); 920 } 921 } 922 } 923 return; 924 } 925 // Put this check here to process call arguments since some call nodes 926 // point to phantom_obj. 927 if (n_ptn == phantom_obj || n_ptn == null_obj) { 928 return; // Skip predefined nodes. 929 } 930 switch (opcode) { 931 case Op_AddP: { 932 Node* base = get_addp_base(n); 933 PointsToNode* ptn_base = ptnode_adr(base->_idx); 934 // Field nodes are created for all field types. They are used in 935 // adjust_scalar_replaceable_state() and split_unique_types(). 936 // Note, non-oop fields will have only base edges in Connection 937 // Graph because such fields are not used for oop loads and stores. 938 int offset = address_offset(n, igvn); 939 add_field(n, PointsToNode::NoEscape, offset); 940 if (ptn_base == nullptr) { 941 delayed_worklist->push(n); // Process it later. 942 } else { 943 n_ptn = ptnode_adr(n_idx); 944 add_base(n_ptn->as_Field(), ptn_base); 945 } 946 break; 947 } 948 case Op_CastX2P: { 949 map_ideal_node(n, phantom_obj); 950 break; 951 } 952 case Op_InlineType: 953 case Op_CastPP: 954 case Op_CheckCastPP: 955 case Op_EncodeP: 956 case Op_DecodeN: 957 case Op_EncodePKlass: 958 case Op_DecodeNKlass: { 959 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 960 break; 961 } 962 case Op_CMoveP: { 963 add_local_var(n, PointsToNode::NoEscape); 964 // Do not add edges during first iteration because some could be 965 // not defined yet. 966 delayed_worklist->push(n); 967 break; 968 } 969 case Op_ConP: 970 case Op_ConN: 971 case Op_ConNKlass: { 972 // assume all oop constants globally escape except for null 973 PointsToNode::EscapeState es; 974 const Type* t = igvn->type(n); 975 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 976 es = PointsToNode::NoEscape; 977 } else { 978 es = PointsToNode::GlobalEscape; 979 } 980 PointsToNode* ptn_con = add_java_object(n, es); 981 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 982 break; 983 } 984 case Op_CreateEx: { 985 // assume that all exception objects globally escape 986 map_ideal_node(n, phantom_obj); 987 break; 988 } 989 case Op_LoadKlass: 990 case Op_LoadNKlass: { 991 // Unknown class is loaded 992 map_ideal_node(n, phantom_obj); 993 break; 994 } 995 case Op_LoadP: 996 case Op_LoadN: { 997 add_objload_to_connection_graph(n, delayed_worklist); 998 break; 999 } 1000 case Op_Parm: { 1001 map_ideal_node(n, phantom_obj); 1002 break; 1003 } 1004 case Op_PartialSubtypeCheck: { 1005 // Produces Null or notNull and is used in only in CmpP so 1006 // phantom_obj could be used. 1007 map_ideal_node(n, phantom_obj); // Result is unknown 1008 break; 1009 } 1010 case Op_Phi: { 1011 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1012 // ThreadLocal has RawPtr type. 1013 const Type* t = n->as_Phi()->type(); 1014 if (t->make_ptr() != nullptr) { 1015 add_local_var(n, PointsToNode::NoEscape); 1016 // Do not add edges during first iteration because some could be 1017 // not defined yet. 1018 delayed_worklist->push(n); 1019 } 1020 break; 1021 } 1022 case Op_Proj: { 1023 // we are only interested in the oop result projection from a call 1024 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 1025 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 1026 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1027 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1028 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1029 } 1030 break; 1031 } 1032 case Op_Rethrow: // Exception object escapes 1033 case Op_Return: { 1034 if (n->req() > TypeFunc::Parms && 1035 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1036 // Treat Return value as LocalVar with GlobalEscape escape state. 1037 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1038 } 1039 break; 1040 } 1041 case Op_CompareAndExchangeP: 1042 case Op_CompareAndExchangeN: 1043 case Op_GetAndSetP: 1044 case Op_GetAndSetN: { 1045 add_objload_to_connection_graph(n, delayed_worklist); 1046 // fall-through 1047 } 1048 case Op_StoreP: 1049 case Op_StoreN: 1050 case Op_StoreNKlass: 1051 case Op_WeakCompareAndSwapP: 1052 case Op_WeakCompareAndSwapN: 1053 case Op_CompareAndSwapP: 1054 case Op_CompareAndSwapN: { 1055 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1056 break; 1057 } 1058 case Op_AryEq: 1059 case Op_CountPositives: 1060 case Op_StrComp: 1061 case Op_StrEquals: 1062 case Op_StrIndexOf: 1063 case Op_StrIndexOfChar: 1064 case Op_StrInflatedCopy: 1065 case Op_StrCompressedCopy: 1066 case Op_VectorizedHashCode: 1067 case Op_EncodeISOArray: { 1068 add_local_var(n, PointsToNode::ArgEscape); 1069 delayed_worklist->push(n); // Process it later. 1070 break; 1071 } 1072 case Op_ThreadLocal: { 1073 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1074 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1075 break; 1076 } 1077 case Op_Blackhole: { 1078 // All blackhole pointer arguments are globally escaping. 1079 // Only do this if there is at least one pointer argument. 1080 // Do not add edges during first iteration because some could be 1081 // not defined yet, defer to final step. 1082 for (uint i = 0; i < n->req(); i++) { 1083 Node* in = n->in(i); 1084 if (in != nullptr) { 1085 const Type* at = _igvn->type(in); 1086 if (!at->isa_ptr()) continue; 1087 1088 add_local_var(n, PointsToNode::GlobalEscape); 1089 delayed_worklist->push(n); 1090 break; 1091 } 1092 } 1093 break; 1094 } 1095 default: 1096 ; // Do nothing for nodes not related to EA. 1097 } 1098 return; 1099 } 1100 1101 // Add final simple edges to graph. 1102 void ConnectionGraph::add_final_edges(Node *n) { 1103 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1104 #ifdef ASSERT 1105 if (_verify && n_ptn->is_JavaObject()) 1106 return; // This method does not change graph for JavaObject. 1107 #endif 1108 1109 if (n->is_Call()) { 1110 process_call_arguments(n->as_Call()); 1111 return; 1112 } 1113 assert(n->is_Store() || n->is_LoadStore() || 1114 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr), 1115 "node should be registered already"); 1116 int opcode = n->Opcode(); 1117 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1118 if (gc_handled) { 1119 return; // Ignore node if already handled by GC. 1120 } 1121 switch (opcode) { 1122 case Op_AddP: { 1123 Node* base = get_addp_base(n); 1124 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1125 assert(ptn_base != nullptr, "field's base should be registered"); 1126 add_base(n_ptn->as_Field(), ptn_base); 1127 break; 1128 } 1129 case Op_InlineType: 1130 case Op_CastPP: 1131 case Op_CheckCastPP: 1132 case Op_EncodeP: 1133 case Op_DecodeN: 1134 case Op_EncodePKlass: 1135 case Op_DecodeNKlass: { 1136 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1137 break; 1138 } 1139 case Op_CMoveP: { 1140 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1141 Node* in = n->in(i); 1142 if (in == nullptr) { 1143 continue; // ignore null 1144 } 1145 Node* uncast_in = in->uncast(); 1146 if (uncast_in->is_top() || uncast_in == n) { 1147 continue; // ignore top or inputs which go back this node 1148 } 1149 PointsToNode* ptn = ptnode_adr(in->_idx); 1150 assert(ptn != nullptr, "node should be registered"); 1151 add_edge(n_ptn, ptn); 1152 } 1153 break; 1154 } 1155 case Op_LoadP: 1156 case Op_LoadN: { 1157 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1158 // ThreadLocal has RawPtr type. 1159 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1160 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1161 break; 1162 } 1163 case Op_Phi: { 1164 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1165 // ThreadLocal has RawPtr type. 1166 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1167 for (uint i = 1; i < n->req(); i++) { 1168 Node* in = n->in(i); 1169 if (in == nullptr) { 1170 continue; // ignore null 1171 } 1172 Node* uncast_in = in->uncast(); 1173 if (uncast_in->is_top() || uncast_in == n) { 1174 continue; // ignore top or inputs which go back this node 1175 } 1176 PointsToNode* ptn = ptnode_adr(in->_idx); 1177 assert(ptn != nullptr, "node should be registered"); 1178 add_edge(n_ptn, ptn); 1179 } 1180 break; 1181 } 1182 case Op_Proj: { 1183 // we are only interested in the oop result projection from a call 1184 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1185 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1186 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1187 break; 1188 } 1189 case Op_Rethrow: // Exception object escapes 1190 case Op_Return: { 1191 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1192 "Unexpected node type"); 1193 // Treat Return value as LocalVar with GlobalEscape escape state. 1194 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1195 break; 1196 } 1197 case Op_CompareAndExchangeP: 1198 case Op_CompareAndExchangeN: 1199 case Op_GetAndSetP: 1200 case Op_GetAndSetN:{ 1201 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1202 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1203 // fall-through 1204 } 1205 case Op_CompareAndSwapP: 1206 case Op_CompareAndSwapN: 1207 case Op_WeakCompareAndSwapP: 1208 case Op_WeakCompareAndSwapN: 1209 case Op_StoreP: 1210 case Op_StoreN: 1211 case Op_StoreNKlass:{ 1212 add_final_edges_unsafe_access(n, opcode); 1213 break; 1214 } 1215 case Op_VectorizedHashCode: 1216 case Op_AryEq: 1217 case Op_CountPositives: 1218 case Op_StrComp: 1219 case Op_StrEquals: 1220 case Op_StrIndexOf: 1221 case Op_StrIndexOfChar: 1222 case Op_StrInflatedCopy: 1223 case Op_StrCompressedCopy: 1224 case Op_EncodeISOArray: { 1225 // char[]/byte[] arrays passed to string intrinsic do not escape but 1226 // they are not scalar replaceable. Adjust escape state for them. 1227 // Start from in(2) edge since in(1) is memory edge. 1228 for (uint i = 2; i < n->req(); i++) { 1229 Node* adr = n->in(i); 1230 const Type* at = _igvn->type(adr); 1231 if (!adr->is_top() && at->isa_ptr()) { 1232 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1233 at->isa_ptr() != nullptr, "expecting a pointer"); 1234 if (adr->is_AddP()) { 1235 adr = get_addp_base(adr); 1236 } 1237 PointsToNode* ptn = ptnode_adr(adr->_idx); 1238 assert(ptn != nullptr, "node should be registered"); 1239 add_edge(n_ptn, ptn); 1240 } 1241 } 1242 break; 1243 } 1244 case Op_Blackhole: { 1245 // All blackhole pointer arguments are globally escaping. 1246 for (uint i = 0; i < n->req(); i++) { 1247 Node* in = n->in(i); 1248 if (in != nullptr) { 1249 const Type* at = _igvn->type(in); 1250 if (!at->isa_ptr()) continue; 1251 1252 if (in->is_AddP()) { 1253 in = get_addp_base(in); 1254 } 1255 1256 PointsToNode* ptn = ptnode_adr(in->_idx); 1257 assert(ptn != nullptr, "should be defined already"); 1258 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1259 add_edge(n_ptn, ptn); 1260 } 1261 } 1262 break; 1263 } 1264 default: { 1265 // This method should be called only for EA specific nodes which may 1266 // miss some edges when they were created. 1267 #ifdef ASSERT 1268 n->dump(1); 1269 #endif 1270 guarantee(false, "unknown node"); 1271 } 1272 } 1273 return; 1274 } 1275 1276 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1277 Node* adr = n->in(MemNode::Address); 1278 const Type* adr_type = _igvn->type(adr); 1279 adr_type = adr_type->make_ptr(); 1280 if (adr_type == nullptr) { 1281 return; // skip dead nodes 1282 } 1283 if (adr_type->isa_oopptr() 1284 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1285 && adr_type == TypeRawPtr::NOTNULL 1286 && is_captured_store_address(adr))) { 1287 delayed_worklist->push(n); // Process it later. 1288 #ifdef ASSERT 1289 assert (adr->is_AddP(), "expecting an AddP"); 1290 if (adr_type == TypeRawPtr::NOTNULL) { 1291 // Verify a raw address for a store captured by Initialize node. 1292 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1293 assert(offs != Type::OffsetBot, "offset must be a constant"); 1294 } 1295 #endif 1296 } else { 1297 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1298 if (adr->is_BoxLock()) { 1299 return; 1300 } 1301 // Stored value escapes in unsafe access. 1302 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1303 delayed_worklist->push(n); // Process unsafe access later. 1304 return; 1305 } 1306 #ifdef ASSERT 1307 n->dump(1); 1308 assert(false, "not unsafe"); 1309 #endif 1310 } 1311 } 1312 1313 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1314 Node* adr = n->in(MemNode::Address); 1315 const Type *adr_type = _igvn->type(adr); 1316 adr_type = adr_type->make_ptr(); 1317 #ifdef ASSERT 1318 if (adr_type == nullptr) { 1319 n->dump(1); 1320 assert(adr_type != nullptr, "dead node should not be on list"); 1321 return true; 1322 } 1323 #endif 1324 1325 if (adr_type->isa_oopptr() 1326 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1327 && adr_type == TypeRawPtr::NOTNULL 1328 && is_captured_store_address(adr))) { 1329 // Point Address to Value 1330 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1331 assert(adr_ptn != nullptr && 1332 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1333 Node* val = n->in(MemNode::ValueIn); 1334 PointsToNode* ptn = ptnode_adr(val->_idx); 1335 assert(ptn != nullptr, "node should be registered"); 1336 add_edge(adr_ptn, ptn); 1337 return true; 1338 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1339 // Stored value escapes in unsafe access. 1340 Node* val = n->in(MemNode::ValueIn); 1341 PointsToNode* ptn = ptnode_adr(val->_idx); 1342 assert(ptn != nullptr, "node should be registered"); 1343 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1344 // Add edge to object for unsafe access with offset. 1345 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1346 assert(adr_ptn != nullptr, "node should be registered"); 1347 if (adr_ptn->is_Field()) { 1348 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1349 add_edge(adr_ptn, ptn); 1350 } 1351 return true; 1352 } 1353 #ifdef ASSERT 1354 n->dump(1); 1355 assert(false, "not unsafe"); 1356 #endif 1357 return false; 1358 } 1359 1360 void ConnectionGraph::add_call_node(CallNode* call) { 1361 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer"); 1362 uint call_idx = call->_idx; 1363 if (call->is_Allocate()) { 1364 Node* k = call->in(AllocateNode::KlassNode); 1365 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1366 assert(kt != nullptr, "TypeKlassPtr required."); 1367 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1368 bool scalar_replaceable = true; 1369 NOT_PRODUCT(const char* nsr_reason = ""); 1370 if (call->is_AllocateArray()) { 1371 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1372 es = PointsToNode::GlobalEscape; 1373 } else { 1374 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1375 if (length < 0) { 1376 // Not scalar replaceable if the length is not constant. 1377 scalar_replaceable = false; 1378 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1379 } else if (length > EliminateAllocationArraySizeLimit) { 1380 // Not scalar replaceable if the length is too big. 1381 scalar_replaceable = false; 1382 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1383 } 1384 } 1385 } else { // Allocate instance 1386 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1387 es = PointsToNode::GlobalEscape; 1388 } else { 1389 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1390 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 1391 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 1392 ik->is_subclass_of(_compile->env()->Reference_klass()) || 1393 !ik->can_be_instantiated() || 1394 ik->has_finalizer()) { 1395 es = PointsToNode::GlobalEscape; 1396 } else { 1397 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 1398 if (nfields > EliminateAllocationFieldsLimit) { 1399 // Not scalar replaceable if there are too many fields. 1400 scalar_replaceable = false; 1401 NOT_PRODUCT(nsr_reason = "has too many fields"); 1402 } 1403 } 1404 } 1405 } 1406 add_java_object(call, es); 1407 PointsToNode* ptn = ptnode_adr(call_idx); 1408 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1409 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1410 } 1411 } else if (call->is_CallStaticJava()) { 1412 // Call nodes could be different types: 1413 // 1414 // 1. CallDynamicJavaNode (what happened during call is unknown): 1415 // 1416 // - mapped to GlobalEscape JavaObject node if oop is returned; 1417 // 1418 // - all oop arguments are escaping globally; 1419 // 1420 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1421 // 1422 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1423 // 1424 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1425 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1426 // during call is returned; 1427 // - mapped to ArgEscape LocalVar node pointed to object arguments 1428 // which are returned and does not escape during call; 1429 // 1430 // - oop arguments escaping status is defined by bytecode analysis; 1431 // 1432 // For a static call, we know exactly what method is being called. 1433 // Use bytecode estimator to record whether the call's return value escapes. 1434 ciMethod* meth = call->as_CallJava()->method(); 1435 if (meth == nullptr) { 1436 const char* name = call->as_CallStaticJava()->_name; 1437 assert(strncmp(name, "_multianewarray", 15) == 0 || 1438 strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check"); 1439 // Returns a newly allocated non-escaped object. 1440 add_java_object(call, PointsToNode::NoEscape); 1441 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1442 } else if (meth->is_boxing_method()) { 1443 // Returns boxing object 1444 PointsToNode::EscapeState es; 1445 vmIntrinsics::ID intr = meth->intrinsic_id(); 1446 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1447 // It does not escape if object is always allocated. 1448 es = PointsToNode::NoEscape; 1449 } else { 1450 // It escapes globally if object could be loaded from cache. 1451 es = PointsToNode::GlobalEscape; 1452 } 1453 add_java_object(call, es); 1454 if (es == PointsToNode::GlobalEscape) { 1455 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 1456 } 1457 } else { 1458 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1459 call_analyzer->copy_dependencies(_compile->dependencies()); 1460 if (call_analyzer->is_return_allocated()) { 1461 // Returns a newly allocated non-escaped object, simply 1462 // update dependency information. 1463 // Mark it as NoEscape so that objects referenced by 1464 // it's fields will be marked as NoEscape at least. 1465 add_java_object(call, PointsToNode::NoEscape); 1466 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1467 } else { 1468 // Determine whether any arguments are returned. 1469 const TypeTuple* d = call->tf()->domain_cc(); 1470 bool ret_arg = false; 1471 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1472 if (d->field_at(i)->isa_ptr() != nullptr && 1473 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1474 ret_arg = true; 1475 break; 1476 } 1477 } 1478 if (ret_arg) { 1479 add_local_var(call, PointsToNode::ArgEscape); 1480 } else { 1481 // Returns unknown object. 1482 map_ideal_node(call, phantom_obj); 1483 } 1484 } 1485 } 1486 } else { 1487 // An other type of call, assume the worst case: 1488 // returned value is unknown and globally escapes. 1489 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1490 map_ideal_node(call, phantom_obj); 1491 } 1492 } 1493 1494 void ConnectionGraph::process_call_arguments(CallNode *call) { 1495 bool is_arraycopy = false; 1496 switch (call->Opcode()) { 1497 #ifdef ASSERT 1498 case Op_Allocate: 1499 case Op_AllocateArray: 1500 case Op_Lock: 1501 case Op_Unlock: 1502 assert(false, "should be done already"); 1503 break; 1504 #endif 1505 case Op_ArrayCopy: 1506 case Op_CallLeafNoFP: 1507 // Most array copies are ArrayCopy nodes at this point but there 1508 // are still a few direct calls to the copy subroutines (See 1509 // PhaseStringOpts::copy_string()) 1510 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1511 call->as_CallLeaf()->is_call_to_arraycopystub(); 1512 // fall through 1513 case Op_CallLeafVector: 1514 case Op_CallLeaf: { 1515 // Stub calls, objects do not escape but they are not scale replaceable. 1516 // Adjust escape state for outgoing arguments. 1517 const TypeTuple * d = call->tf()->domain_sig(); 1518 bool src_has_oops = false; 1519 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1520 const Type* at = d->field_at(i); 1521 Node *arg = call->in(i); 1522 if (arg == nullptr) { 1523 continue; 1524 } 1525 const Type *aat = _igvn->type(arg); 1526 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1527 continue; 1528 } 1529 if (arg->is_AddP()) { 1530 // 1531 // The inline_native_clone() case when the arraycopy stub is called 1532 // after the allocation before Initialize and CheckCastPP nodes. 1533 // Or normal arraycopy for object arrays case. 1534 // 1535 // Set AddP's base (Allocate) as not scalar replaceable since 1536 // pointer to the base (with offset) is passed as argument. 1537 // 1538 arg = get_addp_base(arg); 1539 } 1540 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1541 assert(arg_ptn != nullptr, "should be registered"); 1542 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1543 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1544 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1545 aat->isa_ptr() != nullptr, "expecting an Ptr"); 1546 bool arg_has_oops = aat->isa_oopptr() && 1547 (aat->isa_instptr() || 1548 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) || 1549 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr && 1550 aat->isa_aryptr()->is_flat() && 1551 aat->isa_aryptr()->elem()->inline_klass()->contains_oops())); 1552 if (i == TypeFunc::Parms) { 1553 src_has_oops = arg_has_oops; 1554 } 1555 // 1556 // src or dst could be j.l.Object when other is basic type array: 1557 // 1558 // arraycopy(char[],0,Object*,0,size); 1559 // arraycopy(Object*,0,char[],0,size); 1560 // 1561 // Don't add edges in such cases. 1562 // 1563 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1564 arg_has_oops && (i > TypeFunc::Parms); 1565 #ifdef ASSERT 1566 if (!(is_arraycopy || 1567 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1568 (call->as_CallLeaf()->_name != nullptr && 1569 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1570 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1571 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1572 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1573 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1574 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1575 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1576 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1577 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1578 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1579 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1580 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1581 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1582 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1583 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1584 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1585 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1586 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1587 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1588 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1589 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1590 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1591 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1592 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1593 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1594 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1595 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1596 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1597 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1598 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1599 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1600 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1601 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 || 1602 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 || 1603 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1604 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1605 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1606 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1607 ))) { 1608 call->dump(); 1609 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1610 } 1611 #endif 1612 // Always process arraycopy's destination object since 1613 // we need to add all possible edges to references in 1614 // source object. 1615 if (arg_esc >= PointsToNode::ArgEscape && 1616 !arg_is_arraycopy_dest) { 1617 continue; 1618 } 1619 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1620 if (call->is_ArrayCopy()) { 1621 ArrayCopyNode* ac = call->as_ArrayCopy(); 1622 if (ac->is_clonebasic() || 1623 ac->is_arraycopy_validated() || 1624 ac->is_copyof_validated() || 1625 ac->is_copyofrange_validated()) { 1626 es = PointsToNode::NoEscape; 1627 } 1628 } 1629 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1630 if (arg_is_arraycopy_dest) { 1631 Node* src = call->in(TypeFunc::Parms); 1632 if (src->is_AddP()) { 1633 src = get_addp_base(src); 1634 } 1635 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1636 assert(src_ptn != nullptr, "should be registered"); 1637 if (arg_ptn != src_ptn) { 1638 // Special arraycopy edge: 1639 // A destination object's field can't have the source object 1640 // as base since objects escape states are not related. 1641 // Only escape state of destination object's fields affects 1642 // escape state of fields in source object. 1643 add_arraycopy(call, es, src_ptn, arg_ptn); 1644 } 1645 } 1646 } 1647 } 1648 break; 1649 } 1650 case Op_CallStaticJava: { 1651 // For a static call, we know exactly what method is being called. 1652 // Use bytecode estimator to record the call's escape affects 1653 #ifdef ASSERT 1654 const char* name = call->as_CallStaticJava()->_name; 1655 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1656 #endif 1657 ciMethod* meth = call->as_CallJava()->method(); 1658 if ((meth != nullptr) && meth->is_boxing_method()) { 1659 break; // Boxing methods do not modify any oops. 1660 } 1661 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 1662 // fall-through if not a Java method or no analyzer information 1663 if (call_analyzer != nullptr) { 1664 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1665 const TypeTuple* d = call->tf()->domain_cc(); 1666 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1667 const Type* at = d->field_at(i); 1668 int k = i - TypeFunc::Parms; 1669 Node* arg = call->in(i); 1670 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1671 if (at->isa_ptr() != nullptr && 1672 call_analyzer->is_arg_returned(k)) { 1673 // The call returns arguments. 1674 if (call_ptn != nullptr) { // Is call's result used? 1675 assert(call_ptn->is_LocalVar(), "node should be registered"); 1676 assert(arg_ptn != nullptr, "node should be registered"); 1677 add_edge(call_ptn, arg_ptn); 1678 } 1679 } 1680 if (at->isa_oopptr() != nullptr && 1681 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1682 if (!call_analyzer->is_arg_stack(k)) { 1683 // The argument global escapes 1684 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1685 } else { 1686 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1687 if (!call_analyzer->is_arg_local(k)) { 1688 // The argument itself doesn't escape, but any fields might 1689 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1690 } 1691 } 1692 } 1693 } 1694 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 1695 // The call returns arguments. 1696 assert(call_ptn->edge_count() > 0, "sanity"); 1697 if (!call_analyzer->is_return_local()) { 1698 // Returns also unknown object. 1699 add_edge(call_ptn, phantom_obj); 1700 } 1701 } 1702 break; 1703 } 1704 } 1705 default: { 1706 // Fall-through here if not a Java method or no analyzer information 1707 // or some other type of call, assume the worst case: all arguments 1708 // globally escape. 1709 const TypeTuple* d = call->tf()->domain_cc(); 1710 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1711 const Type* at = d->field_at(i); 1712 if (at->isa_oopptr() != nullptr) { 1713 Node* arg = call->in(i); 1714 if (arg->is_AddP()) { 1715 arg = get_addp_base(arg); 1716 } 1717 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 1718 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1719 } 1720 } 1721 } 1722 } 1723 } 1724 1725 1726 // Finish Graph construction. 1727 bool ConnectionGraph::complete_connection_graph( 1728 GrowableArray<PointsToNode*>& ptnodes_worklist, 1729 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1730 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1731 GrowableArray<FieldNode*>& oop_fields_worklist) { 1732 // Normally only 1-3 passes needed to build Connection Graph depending 1733 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1734 // Set limit to 20 to catch situation when something did go wrong and 1735 // bailout Escape Analysis. 1736 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1737 #define GRAPH_BUILD_ITER_LIMIT 20 1738 1739 // Propagate GlobalEscape and ArgEscape escape states and check that 1740 // we still have non-escaping objects. The method pushs on _worklist 1741 // Field nodes which reference phantom_object. 1742 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1743 return false; // Nothing to do. 1744 } 1745 // Now propagate references to all JavaObject nodes. 1746 int java_objects_length = java_objects_worklist.length(); 1747 elapsedTimer build_time; 1748 build_time.start(); 1749 elapsedTimer time; 1750 bool timeout = false; 1751 int new_edges = 1; 1752 int iterations = 0; 1753 do { 1754 while ((new_edges > 0) && 1755 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1756 double start_time = time.seconds(); 1757 time.start(); 1758 new_edges = 0; 1759 // Propagate references to phantom_object for nodes pushed on _worklist 1760 // by find_non_escaped_objects() and find_field_value(). 1761 new_edges += add_java_object_edges(phantom_obj, false); 1762 for (int next = 0; next < java_objects_length; ++next) { 1763 JavaObjectNode* ptn = java_objects_worklist.at(next); 1764 new_edges += add_java_object_edges(ptn, true); 1765 1766 #define SAMPLE_SIZE 4 1767 if ((next % SAMPLE_SIZE) == 0) { 1768 // Each 4 iterations calculate how much time it will take 1769 // to complete graph construction. 1770 time.stop(); 1771 // Poll for requests from shutdown mechanism to quiesce compiler 1772 // because Connection graph construction may take long time. 1773 CompileBroker::maybe_block(); 1774 double stop_time = time.seconds(); 1775 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1776 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1777 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1778 timeout = true; 1779 break; // Timeout 1780 } 1781 start_time = stop_time; 1782 time.start(); 1783 } 1784 #undef SAMPLE_SIZE 1785 1786 } 1787 if (timeout) break; 1788 if (new_edges > 0) { 1789 // Update escape states on each iteration if graph was updated. 1790 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1791 return false; // Nothing to do. 1792 } 1793 } 1794 time.stop(); 1795 if (time.seconds() >= EscapeAnalysisTimeout) { 1796 timeout = true; 1797 break; 1798 } 1799 } 1800 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1801 time.start(); 1802 // Find fields which have unknown value. 1803 int fields_length = oop_fields_worklist.length(); 1804 for (int next = 0; next < fields_length; next++) { 1805 FieldNode* field = oop_fields_worklist.at(next); 1806 if (field->edge_count() == 0) { 1807 new_edges += find_field_value(field); 1808 // This code may added new edges to phantom_object. 1809 // Need an other cycle to propagate references to phantom_object. 1810 } 1811 } 1812 time.stop(); 1813 if (time.seconds() >= EscapeAnalysisTimeout) { 1814 timeout = true; 1815 break; 1816 } 1817 } else { 1818 new_edges = 0; // Bailout 1819 } 1820 } while (new_edges > 0); 1821 1822 build_time.stop(); 1823 _build_time = build_time.seconds(); 1824 _build_iterations = iterations; 1825 1826 // Bailout if passed limits. 1827 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1828 Compile* C = _compile; 1829 if (C->log() != nullptr) { 1830 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1831 C->log()->text("%s", timeout ? "time" : "iterations"); 1832 C->log()->end_elem(" limit'"); 1833 } 1834 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1835 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1836 // Possible infinite build_connection_graph loop, 1837 // bailout (no changes to ideal graph were made). 1838 return false; 1839 } 1840 1841 #undef GRAPH_BUILD_ITER_LIMIT 1842 1843 // Find fields initialized by null for non-escaping Allocations. 1844 int non_escaped_length = non_escaped_allocs_worklist.length(); 1845 for (int next = 0; next < non_escaped_length; next++) { 1846 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1847 PointsToNode::EscapeState es = ptn->escape_state(); 1848 assert(es <= PointsToNode::ArgEscape, "sanity"); 1849 if (es == PointsToNode::NoEscape) { 1850 if (find_init_values_null(ptn, _igvn) > 0) { 1851 // Adding references to null object does not change escape states 1852 // since it does not escape. Also no fields are added to null object. 1853 add_java_object_edges(null_obj, false); 1854 } 1855 } 1856 Node* n = ptn->ideal_node(); 1857 if (n->is_Allocate()) { 1858 // The object allocated by this Allocate node will never be 1859 // seen by an other thread. Mark it so that when it is 1860 // expanded no MemBarStoreStore is added. 1861 InitializeNode* ini = n->as_Allocate()->initialization(); 1862 if (ini != nullptr) 1863 ini->set_does_not_escape(); 1864 } 1865 } 1866 return true; // Finished graph construction. 1867 } 1868 1869 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1870 // and check that we still have non-escaping java objects. 1871 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1872 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1873 GrowableArray<PointsToNode*> escape_worklist; 1874 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1875 int ptnodes_length = ptnodes_worklist.length(); 1876 for (int next = 0; next < ptnodes_length; ++next) { 1877 PointsToNode* ptn = ptnodes_worklist.at(next); 1878 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1879 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1880 escape_worklist.push(ptn); 1881 } 1882 } 1883 // Set escape states to referenced nodes (edges list). 1884 while (escape_worklist.length() > 0) { 1885 PointsToNode* ptn = escape_worklist.pop(); 1886 PointsToNode::EscapeState es = ptn->escape_state(); 1887 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1888 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1889 es >= PointsToNode::ArgEscape) { 1890 // GlobalEscape or ArgEscape state of field means it has unknown value. 1891 if (add_edge(ptn, phantom_obj)) { 1892 // New edge was added 1893 add_field_uses_to_worklist(ptn->as_Field()); 1894 } 1895 } 1896 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1897 PointsToNode* e = i.get(); 1898 if (e->is_Arraycopy()) { 1899 assert(ptn->arraycopy_dst(), "sanity"); 1900 // Propagate only fields escape state through arraycopy edge. 1901 if (e->fields_escape_state() < field_es) { 1902 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1903 escape_worklist.push(e); 1904 } 1905 } else if (es >= field_es) { 1906 // fields_escape_state is also set to 'es' if it is less than 'es'. 1907 if (e->escape_state() < es) { 1908 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1909 escape_worklist.push(e); 1910 } 1911 } else { 1912 // Propagate field escape state. 1913 bool es_changed = false; 1914 if (e->fields_escape_state() < field_es) { 1915 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1916 es_changed = true; 1917 } 1918 if ((e->escape_state() < field_es) && 1919 e->is_Field() && ptn->is_JavaObject() && 1920 e->as_Field()->is_oop()) { 1921 // Change escape state of referenced fields. 1922 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1923 es_changed = true; 1924 } else if (e->escape_state() < es) { 1925 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1926 es_changed = true; 1927 } 1928 if (es_changed) { 1929 escape_worklist.push(e); 1930 } 1931 } 1932 } 1933 } 1934 // Remove escaped objects from non_escaped list. 1935 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1936 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1937 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1938 non_escaped_allocs_worklist.delete_at(next); 1939 } 1940 if (ptn->escape_state() == PointsToNode::NoEscape) { 1941 // Find fields in non-escaped allocations which have unknown value. 1942 find_init_values_phantom(ptn); 1943 } 1944 } 1945 return (non_escaped_allocs_worklist.length() > 0); 1946 } 1947 1948 // Add all references to JavaObject node by walking over all uses. 1949 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1950 int new_edges = 0; 1951 if (populate_worklist) { 1952 // Populate _worklist by uses of jobj's uses. 1953 for (UseIterator i(jobj); i.has_next(); i.next()) { 1954 PointsToNode* use = i.get(); 1955 if (use->is_Arraycopy()) { 1956 continue; 1957 } 1958 add_uses_to_worklist(use); 1959 if (use->is_Field() && use->as_Field()->is_oop()) { 1960 // Put on worklist all field's uses (loads) and 1961 // related field nodes (same base and offset). 1962 add_field_uses_to_worklist(use->as_Field()); 1963 } 1964 } 1965 } 1966 for (int l = 0; l < _worklist.length(); l++) { 1967 PointsToNode* use = _worklist.at(l); 1968 if (PointsToNode::is_base_use(use)) { 1969 // Add reference from jobj to field and from field to jobj (field's base). 1970 use = PointsToNode::get_use_node(use)->as_Field(); 1971 if (add_base(use->as_Field(), jobj)) { 1972 new_edges++; 1973 } 1974 continue; 1975 } 1976 assert(!use->is_JavaObject(), "sanity"); 1977 if (use->is_Arraycopy()) { 1978 if (jobj == null_obj) { // null object does not have field edges 1979 continue; 1980 } 1981 // Added edge from Arraycopy node to arraycopy's source java object 1982 if (add_edge(use, jobj)) { 1983 jobj->set_arraycopy_src(); 1984 new_edges++; 1985 } 1986 // and stop here. 1987 continue; 1988 } 1989 if (!add_edge(use, jobj)) { 1990 continue; // No new edge added, there was such edge already. 1991 } 1992 new_edges++; 1993 if (use->is_LocalVar()) { 1994 add_uses_to_worklist(use); 1995 if (use->arraycopy_dst()) { 1996 for (EdgeIterator i(use); i.has_next(); i.next()) { 1997 PointsToNode* e = i.get(); 1998 if (e->is_Arraycopy()) { 1999 if (jobj == null_obj) { // null object does not have field edges 2000 continue; 2001 } 2002 // Add edge from arraycopy's destination java object to Arraycopy node. 2003 if (add_edge(jobj, e)) { 2004 new_edges++; 2005 jobj->set_arraycopy_dst(); 2006 } 2007 } 2008 } 2009 } 2010 } else { 2011 // Added new edge to stored in field values. 2012 // Put on worklist all field's uses (loads) and 2013 // related field nodes (same base and offset). 2014 add_field_uses_to_worklist(use->as_Field()); 2015 } 2016 } 2017 _worklist.clear(); 2018 _in_worklist.reset(); 2019 return new_edges; 2020 } 2021 2022 // Put on worklist all related field nodes. 2023 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2024 assert(field->is_oop(), "sanity"); 2025 int offset = field->offset(); 2026 add_uses_to_worklist(field); 2027 // Loop over all bases of this field and push on worklist Field nodes 2028 // with the same offset and base (since they may reference the same field). 2029 for (BaseIterator i(field); i.has_next(); i.next()) { 2030 PointsToNode* base = i.get(); 2031 add_fields_to_worklist(field, base); 2032 // Check if the base was source object of arraycopy and go over arraycopy's 2033 // destination objects since values stored to a field of source object are 2034 // accessible by uses (loads) of fields of destination objects. 2035 if (base->arraycopy_src()) { 2036 for (UseIterator j(base); j.has_next(); j.next()) { 2037 PointsToNode* arycp = j.get(); 2038 if (arycp->is_Arraycopy()) { 2039 for (UseIterator k(arycp); k.has_next(); k.next()) { 2040 PointsToNode* abase = k.get(); 2041 if (abase->arraycopy_dst() && abase != base) { 2042 // Look for the same arraycopy reference. 2043 add_fields_to_worklist(field, abase); 2044 } 2045 } 2046 } 2047 } 2048 } 2049 } 2050 } 2051 2052 // Put on worklist all related field nodes. 2053 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2054 int offset = field->offset(); 2055 if (base->is_LocalVar()) { 2056 for (UseIterator j(base); j.has_next(); j.next()) { 2057 PointsToNode* f = j.get(); 2058 if (PointsToNode::is_base_use(f)) { // Field 2059 f = PointsToNode::get_use_node(f); 2060 if (f == field || !f->as_Field()->is_oop()) { 2061 continue; 2062 } 2063 int offs = f->as_Field()->offset(); 2064 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2065 add_to_worklist(f); 2066 } 2067 } 2068 } 2069 } else { 2070 assert(base->is_JavaObject(), "sanity"); 2071 if (// Skip phantom_object since it is only used to indicate that 2072 // this field's content globally escapes. 2073 (base != phantom_obj) && 2074 // null object node does not have fields. 2075 (base != null_obj)) { 2076 for (EdgeIterator i(base); i.has_next(); i.next()) { 2077 PointsToNode* f = i.get(); 2078 // Skip arraycopy edge since store to destination object field 2079 // does not update value in source object field. 2080 if (f->is_Arraycopy()) { 2081 assert(base->arraycopy_dst(), "sanity"); 2082 continue; 2083 } 2084 if (f == field || !f->as_Field()->is_oop()) { 2085 continue; 2086 } 2087 int offs = f->as_Field()->offset(); 2088 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2089 add_to_worklist(f); 2090 } 2091 } 2092 } 2093 } 2094 } 2095 2096 // Find fields which have unknown value. 2097 int ConnectionGraph::find_field_value(FieldNode* field) { 2098 // Escaped fields should have init value already. 2099 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2100 int new_edges = 0; 2101 for (BaseIterator i(field); i.has_next(); i.next()) { 2102 PointsToNode* base = i.get(); 2103 if (base->is_JavaObject()) { 2104 // Skip Allocate's fields which will be processed later. 2105 if (base->ideal_node()->is_Allocate()) { 2106 return 0; 2107 } 2108 assert(base == null_obj, "only null ptr base expected here"); 2109 } 2110 } 2111 if (add_edge(field, phantom_obj)) { 2112 // New edge was added 2113 new_edges++; 2114 add_field_uses_to_worklist(field); 2115 } 2116 return new_edges; 2117 } 2118 2119 // Find fields initializing values for allocations. 2120 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2121 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2122 PointsToNode* init_val = phantom_obj; 2123 Node* alloc = pta->ideal_node(); 2124 2125 // Do nothing for Allocate nodes since its fields values are 2126 // "known" unless they are initialized by arraycopy/clone. 2127 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2128 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2129 // Non-flat inline type arrays are initialized with 2130 // the default value instead of null. Handle them here. 2131 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx); 2132 assert(init_val != nullptr, "default value should be registered"); 2133 } else { 2134 return 0; 2135 } 2136 } 2137 // Non-escaped allocation returned from Java or runtime call has unknown values in fields. 2138 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity"); 2139 #ifdef ASSERT 2140 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) { 2141 const char* name = alloc->as_CallStaticJava()->_name; 2142 assert(strncmp(name, "_multianewarray", 15) == 0 || 2143 strncmp(name, "_load_unknown_inline", 20) == 0, "sanity"); 2144 } 2145 #endif 2146 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2147 int new_edges = 0; 2148 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2149 PointsToNode* field = i.get(); 2150 if (field->is_Field() && field->as_Field()->is_oop()) { 2151 if (add_edge(field, init_val)) { 2152 // New edge was added 2153 new_edges++; 2154 add_field_uses_to_worklist(field->as_Field()); 2155 } 2156 } 2157 } 2158 return new_edges; 2159 } 2160 2161 // Find fields initializing values for allocations. 2162 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2163 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2164 Node* alloc = pta->ideal_node(); 2165 // Do nothing for Call nodes since its fields values are unknown. 2166 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2167 return 0; 2168 } 2169 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2170 bool visited_bottom_offset = false; 2171 GrowableArray<int> offsets_worklist; 2172 int new_edges = 0; 2173 2174 // Check if an oop field's initializing value is recorded and add 2175 // a corresponding null if field's value if it is not recorded. 2176 // Connection Graph does not record a default initialization by null 2177 // captured by Initialize node. 2178 // 2179 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2180 PointsToNode* field = i.get(); // Field (AddP) 2181 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2182 continue; // Not oop field 2183 } 2184 int offset = field->as_Field()->offset(); 2185 if (offset == Type::OffsetBot) { 2186 if (!visited_bottom_offset) { 2187 // OffsetBot is used to reference array's element, 2188 // always add reference to null to all Field nodes since we don't 2189 // known which element is referenced. 2190 if (add_edge(field, null_obj)) { 2191 // New edge was added 2192 new_edges++; 2193 add_field_uses_to_worklist(field->as_Field()); 2194 visited_bottom_offset = true; 2195 } 2196 } 2197 } else { 2198 // Check only oop fields. 2199 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2200 if (adr_type->isa_rawptr()) { 2201 #ifdef ASSERT 2202 // Raw pointers are used for initializing stores so skip it 2203 // since it should be recorded already 2204 Node* base = get_addp_base(field->ideal_node()); 2205 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2206 #endif 2207 continue; 2208 } 2209 if (!offsets_worklist.contains(offset)) { 2210 offsets_worklist.append(offset); 2211 Node* value = nullptr; 2212 if (ini != nullptr) { 2213 // StoreP::memory_type() == T_ADDRESS 2214 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2215 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2216 // Make sure initializing store has the same type as this AddP. 2217 // This AddP may reference non existing field because it is on a 2218 // dead branch of bimorphic call which is not eliminated yet. 2219 if (store != nullptr && store->is_Store() && 2220 store->as_Store()->memory_type() == ft) { 2221 value = store->in(MemNode::ValueIn); 2222 #ifdef ASSERT 2223 if (VerifyConnectionGraph) { 2224 // Verify that AddP already points to all objects the value points to. 2225 PointsToNode* val = ptnode_adr(value->_idx); 2226 assert((val != nullptr), "should be processed already"); 2227 PointsToNode* missed_obj = nullptr; 2228 if (val->is_JavaObject()) { 2229 if (!field->points_to(val->as_JavaObject())) { 2230 missed_obj = val; 2231 } 2232 } else { 2233 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2234 tty->print_cr("----------init store has invalid value -----"); 2235 store->dump(); 2236 val->dump(); 2237 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2238 } 2239 for (EdgeIterator j(val); j.has_next(); j.next()) { 2240 PointsToNode* obj = j.get(); 2241 if (obj->is_JavaObject()) { 2242 if (!field->points_to(obj->as_JavaObject())) { 2243 missed_obj = obj; 2244 break; 2245 } 2246 } 2247 } 2248 } 2249 if (missed_obj != nullptr) { 2250 tty->print_cr("----------field---------------------------------"); 2251 field->dump(); 2252 tty->print_cr("----------missed reference to object------------"); 2253 missed_obj->dump(); 2254 tty->print_cr("----------object referenced by init store-------"); 2255 store->dump(); 2256 val->dump(); 2257 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2258 } 2259 } 2260 #endif 2261 } else { 2262 // There could be initializing stores which follow allocation. 2263 // For example, a volatile field store is not collected 2264 // by Initialize node. 2265 // 2266 // Need to check for dependent loads to separate such stores from 2267 // stores which follow loads. For now, add initial value null so 2268 // that compare pointers optimization works correctly. 2269 } 2270 } 2271 if (value == nullptr) { 2272 // A field's initializing value was not recorded. Add null. 2273 if (add_edge(field, null_obj)) { 2274 // New edge was added 2275 new_edges++; 2276 add_field_uses_to_worklist(field->as_Field()); 2277 } 2278 } 2279 } 2280 } 2281 } 2282 return new_edges; 2283 } 2284 2285 // Adjust scalar_replaceable state after Connection Graph is built. 2286 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2287 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2288 // returns true. If one of the constraints in this method set 'jobj' to NSR 2289 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2290 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2291 // that other object and the Phi will become a reducible Phi. 2292 // There could be multiple merges involving the same jobj. 2293 Unique_Node_List candidates; 2294 2295 // Search for non-escaping objects which are not scalar replaceable 2296 // and mark them to propagate the state to referenced objects. 2297 2298 for (UseIterator i(jobj); i.has_next(); i.next()) { 2299 PointsToNode* use = i.get(); 2300 if (use->is_Arraycopy()) { 2301 continue; 2302 } 2303 if (use->is_Field()) { 2304 FieldNode* field = use->as_Field(); 2305 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2306 // 1. An object is not scalar replaceable if the field into which it is 2307 // stored has unknown offset (stored into unknown element of an array). 2308 if (field->offset() == Type::OffsetBot) { 2309 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2310 return; 2311 } 2312 for (BaseIterator i(field); i.has_next(); i.next()) { 2313 PointsToNode* base = i.get(); 2314 // 2. An object is not scalar replaceable if the field into which it is 2315 // stored has multiple bases one of which is null. 2316 if ((base == null_obj) && (field->base_count() > 1)) { 2317 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2318 return; 2319 } 2320 // 2.5. An object is not scalar replaceable if the field into which it is 2321 // stored has NSR base. 2322 if (!base->scalar_replaceable()) { 2323 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2324 return; 2325 } 2326 } 2327 } 2328 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2329 // 3. An object is not scalar replaceable if it is merged with other objects 2330 // and we can't remove the merge 2331 for (EdgeIterator j(use); j.has_next(); j.next()) { 2332 PointsToNode* ptn = j.get(); 2333 if (ptn->is_JavaObject() && ptn != jobj) { 2334 Node* use_n = use->ideal_node(); 2335 2336 // If it's already a candidate or confirmed reducible merge we can skip verification 2337 if (candidates.member(use_n)) { 2338 continue; 2339 } else if (reducible_merges.member(use_n)) { 2340 candidates.push(use_n); 2341 continue; 2342 } 2343 2344 if (ReduceAllocationMerges && use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2345 candidates.push(use_n); 2346 } else { 2347 // Mark all objects as NSR if we can't remove the merge 2348 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2349 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2350 } 2351 } 2352 } 2353 if (!jobj->scalar_replaceable()) { 2354 return; 2355 } 2356 } 2357 2358 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2359 if (j.get()->is_Arraycopy()) { 2360 continue; 2361 } 2362 2363 // Non-escaping object node should point only to field nodes. 2364 FieldNode* field = j.get()->as_Field(); 2365 int offset = field->as_Field()->offset(); 2366 2367 // 4. An object is not scalar replaceable if it has a field with unknown 2368 // offset (array's element is accessed in loop). 2369 if (offset == Type::OffsetBot) { 2370 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2371 return; 2372 } 2373 // 5. Currently an object is not scalar replaceable if a LoadStore node 2374 // access its field since the field value is unknown after it. 2375 // 2376 Node* n = field->ideal_node(); 2377 2378 // Test for an unsafe access that was parsed as maybe off heap 2379 // (with a CheckCastPP to raw memory). 2380 assert(n->is_AddP(), "expect an address computation"); 2381 if (n->in(AddPNode::Base)->is_top() && 2382 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2383 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2384 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2385 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2386 return; 2387 } 2388 2389 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2390 Node* u = n->fast_out(i); 2391 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2392 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2393 return; 2394 } 2395 } 2396 2397 // 6. Or the address may point to more then one object. This may produce 2398 // the false positive result (set not scalar replaceable) 2399 // since the flow-insensitive escape analysis can't separate 2400 // the case when stores overwrite the field's value from the case 2401 // when stores happened on different control branches. 2402 // 2403 // Note: it will disable scalar replacement in some cases: 2404 // 2405 // Point p[] = new Point[1]; 2406 // p[0] = new Point(); // Will be not scalar replaced 2407 // 2408 // but it will save us from incorrect optimizations in next cases: 2409 // 2410 // Point p[] = new Point[1]; 2411 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2412 // 2413 if (field->base_count() > 1 && candidates.size() == 0) { 2414 for (BaseIterator i(field); i.has_next(); i.next()) { 2415 PointsToNode* base = i.get(); 2416 // Don't take into account LocalVar nodes which 2417 // may point to only one object which should be also 2418 // this field's base by now. 2419 if (base->is_JavaObject() && base != jobj) { 2420 // Mark all bases. 2421 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 2422 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 2423 } 2424 } 2425 2426 if (!jobj->scalar_replaceable()) { 2427 return; 2428 } 2429 } 2430 } 2431 2432 // The candidate is truly a reducible merge only if none of the other 2433 // constraints ruled it as NSR. There could be multiple merges involving the 2434 // same jobj. 2435 assert(jobj->scalar_replaceable(), "sanity"); 2436 for (uint i = 0; i < candidates.size(); i++ ) { 2437 Node* candidate = candidates.at(i); 2438 reducible_merges.push(candidate); 2439 } 2440 } 2441 2442 // Propagate NSR (Not scalar replaceable) state. 2443 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 2444 int jobj_length = jobj_worklist.length(); 2445 bool found_nsr_alloc = true; 2446 while (found_nsr_alloc) { 2447 found_nsr_alloc = false; 2448 for (int next = 0; next < jobj_length; ++next) { 2449 JavaObjectNode* jobj = jobj_worklist.at(next); 2450 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 2451 PointsToNode* use = i.get(); 2452 if (use->is_Field()) { 2453 FieldNode* field = use->as_Field(); 2454 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2455 assert(field->offset() != Type::OffsetBot, "sanity"); 2456 for (BaseIterator i(field); i.has_next(); i.next()) { 2457 PointsToNode* base = i.get(); 2458 // An object is not scalar replaceable if the field into which 2459 // it is stored has NSR base. 2460 if ((base != null_obj) && !base->scalar_replaceable()) { 2461 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2462 found_nsr_alloc = true; 2463 break; 2464 } 2465 } 2466 } 2467 } 2468 } 2469 } 2470 } 2471 2472 #ifdef ASSERT 2473 void ConnectionGraph::verify_connection_graph( 2474 GrowableArray<PointsToNode*>& ptnodes_worklist, 2475 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2476 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2477 GrowableArray<Node*>& addp_worklist) { 2478 // Verify that graph is complete - no new edges could be added. 2479 int java_objects_length = java_objects_worklist.length(); 2480 int non_escaped_length = non_escaped_allocs_worklist.length(); 2481 int new_edges = 0; 2482 for (int next = 0; next < java_objects_length; ++next) { 2483 JavaObjectNode* ptn = java_objects_worklist.at(next); 2484 new_edges += add_java_object_edges(ptn, true); 2485 } 2486 assert(new_edges == 0, "graph was not complete"); 2487 // Verify that escape state is final. 2488 int length = non_escaped_allocs_worklist.length(); 2489 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2490 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2491 (non_escaped_length == length) && 2492 (_worklist.length() == 0), "escape state was not final"); 2493 2494 // Verify fields information. 2495 int addp_length = addp_worklist.length(); 2496 for (int next = 0; next < addp_length; ++next ) { 2497 Node* n = addp_worklist.at(next); 2498 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2499 if (field->is_oop()) { 2500 // Verify that field has all bases 2501 Node* base = get_addp_base(n); 2502 PointsToNode* ptn = ptnode_adr(base->_idx); 2503 if (ptn->is_JavaObject()) { 2504 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2505 } else { 2506 assert(ptn->is_LocalVar(), "sanity"); 2507 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2508 PointsToNode* e = i.get(); 2509 if (e->is_JavaObject()) { 2510 assert(field->has_base(e->as_JavaObject()), "sanity"); 2511 } 2512 } 2513 } 2514 // Verify that all fields have initializing values. 2515 if (field->edge_count() == 0) { 2516 tty->print_cr("----------field does not have references----------"); 2517 field->dump(); 2518 for (BaseIterator i(field); i.has_next(); i.next()) { 2519 PointsToNode* base = i.get(); 2520 tty->print_cr("----------field has next base---------------------"); 2521 base->dump(); 2522 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2523 tty->print_cr("----------base has fields-------------------------"); 2524 for (EdgeIterator j(base); j.has_next(); j.next()) { 2525 j.get()->dump(); 2526 } 2527 tty->print_cr("----------base has references---------------------"); 2528 for (UseIterator j(base); j.has_next(); j.next()) { 2529 j.get()->dump(); 2530 } 2531 } 2532 } 2533 for (UseIterator i(field); i.has_next(); i.next()) { 2534 i.get()->dump(); 2535 } 2536 assert(field->edge_count() > 0, "sanity"); 2537 } 2538 } 2539 } 2540 } 2541 #endif 2542 2543 // Optimize ideal graph. 2544 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2545 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2546 Compile* C = _compile; 2547 PhaseIterGVN* igvn = _igvn; 2548 if (EliminateLocks) { 2549 // Mark locks before changing ideal graph. 2550 int cnt = C->macro_count(); 2551 for (int i = 0; i < cnt; i++) { 2552 Node *n = C->macro_node(i); 2553 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2554 AbstractLockNode* alock = n->as_AbstractLock(); 2555 if (!alock->is_non_esc_obj()) { 2556 const Type* obj_type = igvn->type(alock->obj_node()); 2557 if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) { 2558 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2559 // The lock could be marked eliminated by lock coarsening 2560 // code during first IGVN before EA. Replace coarsened flag 2561 // to eliminate all associated locks/unlocks. 2562 #ifdef ASSERT 2563 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2564 #endif 2565 alock->set_non_esc_obj(); 2566 } 2567 } 2568 } 2569 } 2570 } 2571 2572 if (OptimizePtrCompare) { 2573 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2574 Node *n = ptr_cmp_worklist.at(i); 2575 const TypeInt* tcmp = optimize_ptr_compare(n); 2576 if (tcmp->singleton()) { 2577 Node* cmp = igvn->makecon(tcmp); 2578 #ifndef PRODUCT 2579 if (PrintOptimizePtrCompare) { 2580 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2581 if (Verbose) { 2582 n->dump(1); 2583 } 2584 } 2585 #endif 2586 igvn->replace_node(n, cmp); 2587 } 2588 } 2589 } 2590 2591 // For MemBarStoreStore nodes added in library_call.cpp, check 2592 // escape status of associated AllocateNode and optimize out 2593 // MemBarStoreStore node if the allocated object never escapes. 2594 for (int i = 0; i < storestore_worklist.length(); i++) { 2595 Node* storestore = storestore_worklist.at(i); 2596 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2597 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2598 if (alloc->in(AllocateNode::InlineType) != nullptr) { 2599 // Non-escaping inline type buffer allocations don't require a membar 2600 storestore->as_MemBar()->remove(_igvn); 2601 } else { 2602 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2603 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2604 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2605 igvn->register_new_node_with_optimizer(mb); 2606 igvn->replace_node(storestore, mb); 2607 } 2608 } 2609 } 2610 } 2611 2612 // Optimize objects compare. 2613 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2614 assert(OptimizePtrCompare, "sanity"); 2615 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2616 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2617 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2618 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2619 2620 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2621 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2622 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2623 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2624 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2625 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2626 2627 // Check simple cases first. 2628 if (jobj1 != nullptr) { 2629 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2630 if (jobj1 == jobj2) { 2631 // Comparing the same not escaping object. 2632 return EQ; 2633 } 2634 Node* obj = jobj1->ideal_node(); 2635 // Comparing not escaping allocation. 2636 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2637 !ptn2->points_to(jobj1)) { 2638 return NE; // This includes nullness check. 2639 } 2640 } 2641 } 2642 if (jobj2 != nullptr) { 2643 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2644 Node* obj = jobj2->ideal_node(); 2645 // Comparing not escaping allocation. 2646 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2647 !ptn1->points_to(jobj2)) { 2648 return NE; // This includes nullness check. 2649 } 2650 } 2651 } 2652 if (jobj1 != nullptr && jobj1 != phantom_obj && 2653 jobj2 != nullptr && jobj2 != phantom_obj && 2654 jobj1->ideal_node()->is_Con() && 2655 jobj2->ideal_node()->is_Con()) { 2656 // Klass or String constants compare. Need to be careful with 2657 // compressed pointers - compare types of ConN and ConP instead of nodes. 2658 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2659 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2660 if (t1->make_ptr() == t2->make_ptr()) { 2661 return EQ; 2662 } else { 2663 return NE; 2664 } 2665 } 2666 if (ptn1->meet(ptn2)) { 2667 return UNKNOWN; // Sets are not disjoint 2668 } 2669 2670 // Sets are disjoint. 2671 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2672 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2673 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2674 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2675 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2676 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2677 // Check nullness of unknown object. 2678 return UNKNOWN; 2679 } 2680 2681 // Disjointness by itself is not sufficient since 2682 // alias analysis is not complete for escaped objects. 2683 // Disjoint sets are definitely unrelated only when 2684 // at least one set has only not escaping allocations. 2685 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2686 if (ptn1->non_escaping_allocation()) { 2687 return NE; 2688 } 2689 } 2690 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2691 if (ptn2->non_escaping_allocation()) { 2692 return NE; 2693 } 2694 } 2695 return UNKNOWN; 2696 } 2697 2698 // Connection Graph construction functions. 2699 2700 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2701 PointsToNode* ptadr = _nodes.at(n->_idx); 2702 if (ptadr != nullptr) { 2703 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2704 return; 2705 } 2706 Compile* C = _compile; 2707 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2708 map_ideal_node(n, ptadr); 2709 } 2710 2711 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2712 PointsToNode* ptadr = _nodes.at(n->_idx); 2713 if (ptadr != nullptr) { 2714 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2715 return ptadr; 2716 } 2717 Compile* C = _compile; 2718 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2719 map_ideal_node(n, ptadr); 2720 return ptadr; 2721 } 2722 2723 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2724 PointsToNode* ptadr = _nodes.at(n->_idx); 2725 if (ptadr != nullptr) { 2726 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2727 return; 2728 } 2729 bool unsafe = false; 2730 bool is_oop = is_oop_field(n, offset, &unsafe); 2731 if (unsafe) { 2732 es = PointsToNode::GlobalEscape; 2733 } 2734 Compile* C = _compile; 2735 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2736 map_ideal_node(n, field); 2737 } 2738 2739 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2740 PointsToNode* src, PointsToNode* dst) { 2741 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2742 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 2743 PointsToNode* ptadr = _nodes.at(n->_idx); 2744 if (ptadr != nullptr) { 2745 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2746 return; 2747 } 2748 Compile* C = _compile; 2749 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2750 map_ideal_node(n, ptadr); 2751 // Add edge from arraycopy node to source object. 2752 (void)add_edge(ptadr, src); 2753 src->set_arraycopy_src(); 2754 // Add edge from destination object to arraycopy node. 2755 (void)add_edge(dst, ptadr); 2756 dst->set_arraycopy_dst(); 2757 } 2758 2759 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2760 const Type* adr_type = n->as_AddP()->bottom_type(); 2761 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 2762 BasicType bt = T_INT; 2763 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 2764 // Check only oop fields. 2765 if (!adr_type->isa_aryptr() || 2766 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2767 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 2768 // OffsetBot is used to reference array's element. Ignore first AddP. 2769 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 2770 bt = T_OBJECT; 2771 } 2772 } 2773 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2774 if (adr_type->isa_instptr()) { 2775 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 2776 if (field != nullptr) { 2777 bt = field->layout_type(); 2778 } else { 2779 // Check for unsafe oop field access 2780 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2781 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2782 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2783 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2784 bt = T_OBJECT; 2785 (*unsafe) = true; 2786 } 2787 } 2788 } else if (adr_type->isa_aryptr()) { 2789 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2790 // Ignore array length load. 2791 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 2792 // Ignore first AddP. 2793 } else { 2794 const Type* elemtype = adr_type->is_aryptr()->elem(); 2795 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) { 2796 ciInlineKlass* vk = elemtype->inline_klass(); 2797 field_offset += vk->first_field_offset(); 2798 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 2799 } else { 2800 bt = elemtype->array_element_basic_type(); 2801 } 2802 } 2803 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2804 // Allocation initialization, ThreadLocal field access, unsafe access 2805 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2806 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2807 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2808 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2809 bt = T_OBJECT; 2810 } 2811 } 2812 } 2813 // Note: T_NARROWOOP is not classed as a real reference type 2814 return (is_reference_type(bt) || bt == T_NARROWOOP); 2815 } 2816 2817 // Returns unique pointed java object or null. 2818 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 2819 // If the node was created after the escape computation we can't answer. 2820 uint idx = n->_idx; 2821 if (idx >= nodes_size()) { 2822 return nullptr; 2823 } 2824 PointsToNode* ptn = ptnode_adr(idx); 2825 if (ptn == nullptr) { 2826 return nullptr; 2827 } 2828 if (ptn->is_JavaObject()) { 2829 return ptn->as_JavaObject(); 2830 } 2831 assert(ptn->is_LocalVar(), "sanity"); 2832 // Check all java objects it points to. 2833 JavaObjectNode* jobj = nullptr; 2834 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2835 PointsToNode* e = i.get(); 2836 if (e->is_JavaObject()) { 2837 if (jobj == nullptr) { 2838 jobj = e->as_JavaObject(); 2839 } else if (jobj != e) { 2840 return nullptr; 2841 } 2842 } 2843 } 2844 return jobj; 2845 } 2846 2847 // Return true if this node points only to non-escaping allocations. 2848 bool PointsToNode::non_escaping_allocation() { 2849 if (is_JavaObject()) { 2850 Node* n = ideal_node(); 2851 if (n->is_Allocate() || n->is_CallStaticJava()) { 2852 return (escape_state() == PointsToNode::NoEscape); 2853 } else { 2854 return false; 2855 } 2856 } 2857 assert(is_LocalVar(), "sanity"); 2858 // Check all java objects it points to. 2859 for (EdgeIterator i(this); i.has_next(); i.next()) { 2860 PointsToNode* e = i.get(); 2861 if (e->is_JavaObject()) { 2862 Node* n = e->ideal_node(); 2863 if ((e->escape_state() != PointsToNode::NoEscape) || 2864 !(n->is_Allocate() || n->is_CallStaticJava())) { 2865 return false; 2866 } 2867 } 2868 } 2869 return true; 2870 } 2871 2872 // Return true if we know the node does not escape globally. 2873 bool ConnectionGraph::not_global_escape(Node *n) { 2874 assert(!_collecting, "should not call during graph construction"); 2875 // If the node was created after the escape computation we can't answer. 2876 uint idx = n->_idx; 2877 if (idx >= nodes_size()) { 2878 return false; 2879 } 2880 PointsToNode* ptn = ptnode_adr(idx); 2881 if (ptn == nullptr) { 2882 return false; // not in congraph (e.g. ConI) 2883 } 2884 PointsToNode::EscapeState es = ptn->escape_state(); 2885 // If we have already computed a value, return it. 2886 if (es >= PointsToNode::GlobalEscape) { 2887 return false; 2888 } 2889 if (ptn->is_JavaObject()) { 2890 return true; // (es < PointsToNode::GlobalEscape); 2891 } 2892 assert(ptn->is_LocalVar(), "sanity"); 2893 // Check all java objects it points to. 2894 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2895 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2896 return false; 2897 } 2898 } 2899 return true; 2900 } 2901 2902 2903 // Helper functions 2904 2905 // Return true if this node points to specified node or nodes it points to. 2906 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2907 if (is_JavaObject()) { 2908 return (this == ptn); 2909 } 2910 assert(is_LocalVar() || is_Field(), "sanity"); 2911 for (EdgeIterator i(this); i.has_next(); i.next()) { 2912 if (i.get() == ptn) { 2913 return true; 2914 } 2915 } 2916 return false; 2917 } 2918 2919 // Return true if one node points to an other. 2920 bool PointsToNode::meet(PointsToNode* ptn) { 2921 if (this == ptn) { 2922 return true; 2923 } else if (ptn->is_JavaObject()) { 2924 return this->points_to(ptn->as_JavaObject()); 2925 } else if (this->is_JavaObject()) { 2926 return ptn->points_to(this->as_JavaObject()); 2927 } 2928 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2929 int ptn_count = ptn->edge_count(); 2930 for (EdgeIterator i(this); i.has_next(); i.next()) { 2931 PointsToNode* this_e = i.get(); 2932 for (int j = 0; j < ptn_count; j++) { 2933 if (this_e == ptn->edge(j)) { 2934 return true; 2935 } 2936 } 2937 } 2938 return false; 2939 } 2940 2941 #ifdef ASSERT 2942 // Return true if bases point to this java object. 2943 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2944 for (BaseIterator i(this); i.has_next(); i.next()) { 2945 if (i.get() == jobj) { 2946 return true; 2947 } 2948 } 2949 return false; 2950 } 2951 #endif 2952 2953 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2954 // Handle simple case first. 2955 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 2956 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2957 return true; 2958 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2959 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2960 Node* addp_use = addp->fast_out(i); 2961 if (addp_use->is_Store()) { 2962 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2963 if (addp_use->fast_out(j)->is_Initialize()) { 2964 return true; 2965 } 2966 } 2967 } 2968 } 2969 } 2970 return false; 2971 } 2972 2973 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 2974 const Type *adr_type = phase->type(adr); 2975 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 2976 // We are computing a raw address for a store captured by an Initialize 2977 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2978 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2979 assert(offs != Type::OffsetBot || 2980 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2981 "offset must be a constant or it is initialization of array"); 2982 return offs; 2983 } 2984 return adr_type->is_ptr()->flat_offset(); 2985 } 2986 2987 Node* ConnectionGraph::get_addp_base(Node *addp) { 2988 assert(addp->is_AddP(), "must be AddP"); 2989 // 2990 // AddP cases for Base and Address inputs: 2991 // case #1. Direct object's field reference: 2992 // Allocate 2993 // | 2994 // Proj #5 ( oop result ) 2995 // | 2996 // CheckCastPP (cast to instance type) 2997 // | | 2998 // AddP ( base == address ) 2999 // 3000 // case #2. Indirect object's field reference: 3001 // Phi 3002 // | 3003 // CastPP (cast to instance type) 3004 // | | 3005 // AddP ( base == address ) 3006 // 3007 // case #3. Raw object's field reference for Initialize node: 3008 // Allocate 3009 // | 3010 // Proj #5 ( oop result ) 3011 // top | 3012 // \ | 3013 // AddP ( base == top ) 3014 // 3015 // case #4. Array's element reference: 3016 // {CheckCastPP | CastPP} 3017 // | | | 3018 // | AddP ( array's element offset ) 3019 // | | 3020 // AddP ( array's offset ) 3021 // 3022 // case #5. Raw object's field reference for arraycopy stub call: 3023 // The inline_native_clone() case when the arraycopy stub is called 3024 // after the allocation before Initialize and CheckCastPP nodes. 3025 // Allocate 3026 // | 3027 // Proj #5 ( oop result ) 3028 // | | 3029 // AddP ( base == address ) 3030 // 3031 // case #6. Constant Pool, ThreadLocal, CastX2P or 3032 // Raw object's field reference: 3033 // {ConP, ThreadLocal, CastX2P, raw Load} 3034 // top | 3035 // \ | 3036 // AddP ( base == top ) 3037 // 3038 // case #7. Klass's field reference. 3039 // LoadKlass 3040 // | | 3041 // AddP ( base == address ) 3042 // 3043 // case #8. narrow Klass's field reference. 3044 // LoadNKlass 3045 // | 3046 // DecodeN 3047 // | | 3048 // AddP ( base == address ) 3049 // 3050 // case #9. Mixed unsafe access 3051 // {instance} 3052 // | 3053 // CheckCastPP (raw) 3054 // top | 3055 // \ | 3056 // AddP ( base == top ) 3057 // 3058 Node *base = addp->in(AddPNode::Base); 3059 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3060 base = addp->in(AddPNode::Address); 3061 while (base->is_AddP()) { 3062 // Case #6 (unsafe access) may have several chained AddP nodes. 3063 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3064 base = base->in(AddPNode::Address); 3065 } 3066 if (base->Opcode() == Op_CheckCastPP && 3067 base->bottom_type()->isa_rawptr() && 3068 _igvn->type(base->in(1))->isa_oopptr()) { 3069 base = base->in(1); // Case #9 3070 } else { 3071 Node* uncast_base = base->uncast(); 3072 int opcode = uncast_base->Opcode(); 3073 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3074 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3075 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3076 is_captured_store_address(addp), "sanity"); 3077 } 3078 } 3079 return base; 3080 } 3081 3082 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3083 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3084 Node* addp2 = addp->raw_out(0); 3085 if (addp->outcnt() == 1 && addp2->is_AddP() && 3086 addp2->in(AddPNode::Base) == n && 3087 addp2->in(AddPNode::Address) == addp) { 3088 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3089 // 3090 // Find array's offset to push it on worklist first and 3091 // as result process an array's element offset first (pushed second) 3092 // to avoid CastPP for the array's offset. 3093 // Otherwise the inserted CastPP (LocalVar) will point to what 3094 // the AddP (Field) points to. Which would be wrong since 3095 // the algorithm expects the CastPP has the same point as 3096 // as AddP's base CheckCastPP (LocalVar). 3097 // 3098 // ArrayAllocation 3099 // | 3100 // CheckCastPP 3101 // | 3102 // memProj (from ArrayAllocation CheckCastPP) 3103 // | || 3104 // | || Int (element index) 3105 // | || | ConI (log(element size)) 3106 // | || | / 3107 // | || LShift 3108 // | || / 3109 // | AddP (array's element offset) 3110 // | | 3111 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3112 // | / / 3113 // AddP (array's offset) 3114 // | 3115 // Load/Store (memory operation on array's element) 3116 // 3117 return addp2; 3118 } 3119 return nullptr; 3120 } 3121 3122 // 3123 // Adjust the type and inputs of an AddP which computes the 3124 // address of a field of an instance 3125 // 3126 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3127 PhaseGVN* igvn = _igvn; 3128 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3129 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3130 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3131 if (t == nullptr) { 3132 // We are computing a raw address for a store captured by an Initialize 3133 // compute an appropriate address type (cases #3 and #5). 3134 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3135 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3136 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3137 assert(offs != Type::OffsetBot, "offset must be a constant"); 3138 if (base_t->isa_aryptr() != nullptr) { 3139 // In the case of a flat inline type array, each field has its 3140 // own slice so we need to extract the field being accessed from 3141 // the address computation 3142 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 3143 } else { 3144 t = base_t->add_offset(offs)->is_oopptr(); 3145 } 3146 } 3147 int inst_id = base_t->instance_id(); 3148 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3149 "old type must be non-instance or match new type"); 3150 3151 // The type 't' could be subclass of 'base_t'. 3152 // As result t->offset() could be large then base_t's size and it will 3153 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3154 // constructor verifies correctness of the offset. 3155 // 3156 // It could happened on subclass's branch (from the type profiling 3157 // inlining) which was not eliminated during parsing since the exactness 3158 // of the allocation type was not propagated to the subclass type check. 3159 // 3160 // Or the type 't' could be not related to 'base_t' at all. 3161 // It could happen when CHA type is different from MDO type on a dead path 3162 // (for example, from instanceof check) which is not collapsed during parsing. 3163 // 3164 // Do nothing for such AddP node and don't process its users since 3165 // this code branch will go away. 3166 // 3167 if (!t->is_known_instance() && 3168 !base_t->maybe_java_subtype_of(t)) { 3169 return false; // bail out 3170 } 3171 const TypePtr* tinst = base_t->add_offset(t->offset()); 3172 if (tinst->isa_aryptr() && t->isa_aryptr()) { 3173 // In the case of a flat inline type array, each field has its 3174 // own slice so we need to keep track of the field being accessed. 3175 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 3176 // Keep array properties (not flat/null-free) 3177 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr()); 3178 if (tinst == nullptr) { 3179 return false; // Skip dead path with inconsistent properties 3180 } 3181 } 3182 3183 // Do NOT remove the next line: ensure a new alias index is allocated 3184 // for the instance type. Note: C++ will not remove it since the call 3185 // has side effect. 3186 int alias_idx = _compile->get_alias_index(tinst); 3187 igvn->set_type(addp, tinst); 3188 // record the allocation in the node map 3189 set_map(addp, get_map(base->_idx)); 3190 // Set addp's Base and Address to 'base'. 3191 Node *abase = addp->in(AddPNode::Base); 3192 Node *adr = addp->in(AddPNode::Address); 3193 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3194 adr->in(0)->_idx == (uint)inst_id) { 3195 // Skip AddP cases #3 and #5. 3196 } else { 3197 assert(!abase->is_top(), "sanity"); // AddP case #3 3198 if (abase != base) { 3199 igvn->hash_delete(addp); 3200 addp->set_req(AddPNode::Base, base); 3201 if (abase == adr) { 3202 addp->set_req(AddPNode::Address, base); 3203 } else { 3204 // AddP case #4 (adr is array's element offset AddP node) 3205 #ifdef ASSERT 3206 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3207 assert(adr->is_AddP() && atype != nullptr && 3208 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3209 #endif 3210 } 3211 igvn->hash_insert(addp); 3212 } 3213 } 3214 // Put on IGVN worklist since at least addp's type was changed above. 3215 record_for_optimizer(addp); 3216 return true; 3217 } 3218 3219 // 3220 // Create a new version of orig_phi if necessary. Returns either the newly 3221 // created phi or an existing phi. Sets create_new to indicate whether a new 3222 // phi was created. Cache the last newly created phi in the node map. 3223 // 3224 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3225 Compile *C = _compile; 3226 PhaseGVN* igvn = _igvn; 3227 new_created = false; 3228 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3229 // nothing to do if orig_phi is bottom memory or matches alias_idx 3230 if (phi_alias_idx == alias_idx) { 3231 return orig_phi; 3232 } 3233 // Have we recently created a Phi for this alias index? 3234 PhiNode *result = get_map_phi(orig_phi->_idx); 3235 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3236 return result; 3237 } 3238 // Previous check may fail when the same wide memory Phi was split into Phis 3239 // for different memory slices. Search all Phis for this region. 3240 if (result != nullptr) { 3241 Node* region = orig_phi->in(0); 3242 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3243 Node* phi = region->fast_out(i); 3244 if (phi->is_Phi() && 3245 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3246 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3247 return phi->as_Phi(); 3248 } 3249 } 3250 } 3251 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3252 if (C->do_escape_analysis() == true && !C->failing()) { 3253 // Retry compilation without escape analysis. 3254 // If this is the first failure, the sentinel string will "stick" 3255 // to the Compile object, and the C2Compiler will see it and retry. 3256 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3257 } 3258 return nullptr; 3259 } 3260 orig_phi_worklist.append_if_missing(orig_phi); 3261 const TypePtr *atype = C->get_adr_type(alias_idx); 3262 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3263 C->copy_node_notes_to(result, orig_phi); 3264 igvn->set_type(result, result->bottom_type()); 3265 record_for_optimizer(result); 3266 set_map(orig_phi, result); 3267 new_created = true; 3268 return result; 3269 } 3270 3271 // 3272 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3273 // specified alias index. 3274 // 3275 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 3276 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3277 Compile *C = _compile; 3278 PhaseGVN* igvn = _igvn; 3279 bool new_phi_created; 3280 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3281 if (!new_phi_created) { 3282 return result; 3283 } 3284 GrowableArray<PhiNode *> phi_list; 3285 GrowableArray<uint> cur_input; 3286 PhiNode *phi = orig_phi; 3287 uint idx = 1; 3288 bool finished = false; 3289 while(!finished) { 3290 while (idx < phi->req()) { 3291 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 3292 if (mem != nullptr && mem->is_Phi()) { 3293 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3294 if (new_phi_created) { 3295 // found an phi for which we created a new split, push current one on worklist and begin 3296 // processing new one 3297 phi_list.push(phi); 3298 cur_input.push(idx); 3299 phi = mem->as_Phi(); 3300 result = newphi; 3301 idx = 1; 3302 continue; 3303 } else { 3304 mem = newphi; 3305 } 3306 } 3307 if (C->failing()) { 3308 return nullptr; 3309 } 3310 result->set_req(idx++, mem); 3311 } 3312 #ifdef ASSERT 3313 // verify that the new Phi has an input for each input of the original 3314 assert( phi->req() == result->req(), "must have same number of inputs."); 3315 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3316 #endif 3317 // Check if all new phi's inputs have specified alias index. 3318 // Otherwise use old phi. 3319 for (uint i = 1; i < phi->req(); i++) { 3320 Node* in = result->in(i); 3321 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3322 } 3323 // we have finished processing a Phi, see if there are any more to do 3324 finished = (phi_list.length() == 0 ); 3325 if (!finished) { 3326 phi = phi_list.pop(); 3327 idx = cur_input.pop(); 3328 PhiNode *prev_result = get_map_phi(phi->_idx); 3329 prev_result->set_req(idx++, result); 3330 result = prev_result; 3331 } 3332 } 3333 return result; 3334 } 3335 3336 // 3337 // The next methods are derived from methods in MemNode. 3338 // 3339 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3340 Node *mem = mmem; 3341 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3342 // means an array I have not precisely typed yet. Do not do any 3343 // alias stuff with it any time soon. 3344 if (toop->base() != Type::AnyPtr && 3345 !(toop->isa_instptr() && 3346 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3347 toop->offset() == Type::OffsetBot)) { 3348 mem = mmem->memory_at(alias_idx); 3349 // Update input if it is progress over what we have now 3350 } 3351 return mem; 3352 } 3353 3354 // 3355 // Move memory users to their memory slices. 3356 // 3357 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 3358 Compile* C = _compile; 3359 PhaseGVN* igvn = _igvn; 3360 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 3361 assert(tp != nullptr, "ptr type"); 3362 int alias_idx = C->get_alias_index(tp); 3363 int general_idx = C->get_general_index(alias_idx); 3364 3365 // Move users first 3366 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3367 Node* use = n->fast_out(i); 3368 if (use->is_MergeMem()) { 3369 MergeMemNode* mmem = use->as_MergeMem(); 3370 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 3371 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 3372 continue; // Nothing to do 3373 } 3374 // Replace previous general reference to mem node. 3375 uint orig_uniq = C->unique(); 3376 Node* m = find_inst_mem(n, general_idx, orig_phis); 3377 assert(orig_uniq == C->unique(), "no new nodes"); 3378 mmem->set_memory_at(general_idx, m); 3379 --imax; 3380 --i; 3381 } else if (use->is_MemBar()) { 3382 assert(!use->is_Initialize(), "initializing stores should not be moved"); 3383 if (use->req() > MemBarNode::Precedent && 3384 use->in(MemBarNode::Precedent) == n) { 3385 // Don't move related membars. 3386 record_for_optimizer(use); 3387 continue; 3388 } 3389 tp = use->as_MemBar()->adr_type()->isa_ptr(); 3390 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 3391 alias_idx == general_idx) { 3392 continue; // Nothing to do 3393 } 3394 // Move to general memory slice. 3395 uint orig_uniq = C->unique(); 3396 Node* m = find_inst_mem(n, general_idx, orig_phis); 3397 assert(orig_uniq == C->unique(), "no new nodes"); 3398 igvn->hash_delete(use); 3399 imax -= use->replace_edge(n, m, igvn); 3400 igvn->hash_insert(use); 3401 record_for_optimizer(use); 3402 --i; 3403 #ifdef ASSERT 3404 } else if (use->is_Mem()) { 3405 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 3406 // Don't move related cardmark. 3407 continue; 3408 } 3409 // Memory nodes should have new memory input. 3410 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 3411 assert(tp != nullptr, "ptr type"); 3412 int idx = C->get_alias_index(tp); 3413 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 3414 "Following memory nodes should have new memory input or be on the same memory slice"); 3415 } else if (use->is_Phi()) { 3416 // Phi nodes should be split and moved already. 3417 tp = use->as_Phi()->adr_type()->isa_ptr(); 3418 assert(tp != nullptr, "ptr type"); 3419 int idx = C->get_alias_index(tp); 3420 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 3421 } else { 3422 use->dump(); 3423 assert(false, "should not be here"); 3424 #endif 3425 } 3426 } 3427 } 3428 3429 // 3430 // Search memory chain of "mem" to find a MemNode whose address 3431 // is the specified alias index. 3432 // 3433 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 3434 if (orig_mem == nullptr) { 3435 return orig_mem; 3436 } 3437 Compile* C = _compile; 3438 PhaseGVN* igvn = _igvn; 3439 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 3440 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 3441 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 3442 Node *prev = nullptr; 3443 Node *result = orig_mem; 3444 while (prev != result) { 3445 prev = result; 3446 if (result == start_mem) { 3447 break; // hit one of our sentinels 3448 } 3449 if (result->is_Mem()) { 3450 const Type *at = igvn->type(result->in(MemNode::Address)); 3451 if (at == Type::TOP) { 3452 break; // Dead 3453 } 3454 assert (at->isa_ptr() != nullptr, "pointer type required."); 3455 int idx = C->get_alias_index(at->is_ptr()); 3456 if (idx == alias_idx) { 3457 break; // Found 3458 } 3459 if (!is_instance && (at->isa_oopptr() == nullptr || 3460 !at->is_oopptr()->is_known_instance())) { 3461 break; // Do not skip store to general memory slice. 3462 } 3463 result = result->in(MemNode::Memory); 3464 } 3465 if (!is_instance) { 3466 continue; // don't search further for non-instance types 3467 } 3468 // skip over a call which does not affect this memory slice 3469 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 3470 Node *proj_in = result->in(0); 3471 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 3472 break; // hit one of our sentinels 3473 } else if (proj_in->is_Call()) { 3474 // ArrayCopy node processed here as well 3475 CallNode *call = proj_in->as_Call(); 3476 if (!call->may_modify(toop, igvn)) { 3477 result = call->in(TypeFunc::Memory); 3478 } 3479 } else if (proj_in->is_Initialize()) { 3480 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 3481 // Stop if this is the initialization for the object instance which 3482 // which contains this memory slice, otherwise skip over it. 3483 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 3484 result = proj_in->in(TypeFunc::Memory); 3485 } 3486 } else if (proj_in->is_MemBar()) { 3487 // Check if there is an array copy for a clone 3488 // Step over GC barrier when ReduceInitialCardMarks is disabled 3489 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3490 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3491 3492 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3493 // Stop if it is a clone 3494 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3495 if (ac->may_modify(toop, igvn)) { 3496 break; 3497 } 3498 } 3499 result = proj_in->in(TypeFunc::Memory); 3500 } 3501 } else if (result->is_MergeMem()) { 3502 MergeMemNode *mmem = result->as_MergeMem(); 3503 result = step_through_mergemem(mmem, alias_idx, toop); 3504 if (result == mmem->base_memory()) { 3505 // Didn't find instance memory, search through general slice recursively. 3506 result = mmem->memory_at(C->get_general_index(alias_idx)); 3507 result = find_inst_mem(result, alias_idx, orig_phis); 3508 if (C->failing()) { 3509 return nullptr; 3510 } 3511 mmem->set_memory_at(alias_idx, result); 3512 } 3513 } else if (result->is_Phi() && 3514 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3515 Node *un = result->as_Phi()->unique_input(igvn); 3516 if (un != nullptr) { 3517 orig_phis.append_if_missing(result->as_Phi()); 3518 result = un; 3519 } else { 3520 break; 3521 } 3522 } else if (result->is_ClearArray()) { 3523 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3524 // Can not bypass initialization of the instance 3525 // we are looking for. 3526 break; 3527 } 3528 // Otherwise skip it (the call updated 'result' value). 3529 } else if (result->Opcode() == Op_SCMemProj) { 3530 Node* mem = result->in(0); 3531 Node* adr = nullptr; 3532 if (mem->is_LoadStore()) { 3533 adr = mem->in(MemNode::Address); 3534 } else { 3535 assert(mem->Opcode() == Op_EncodeISOArray || 3536 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3537 adr = mem->in(3); // Memory edge corresponds to destination array 3538 } 3539 const Type *at = igvn->type(adr); 3540 if (at != Type::TOP) { 3541 assert(at->isa_ptr() != nullptr, "pointer type required."); 3542 int idx = C->get_alias_index(at->is_ptr()); 3543 if (idx == alias_idx) { 3544 // Assert in debug mode 3545 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3546 break; // In product mode return SCMemProj node 3547 } 3548 } 3549 result = mem->in(MemNode::Memory); 3550 } else if (result->Opcode() == Op_StrInflatedCopy) { 3551 Node* adr = result->in(3); // Memory edge corresponds to destination array 3552 const Type *at = igvn->type(adr); 3553 if (at != Type::TOP) { 3554 assert(at->isa_ptr() != nullptr, "pointer type required."); 3555 int idx = C->get_alias_index(at->is_ptr()); 3556 if (idx == alias_idx) { 3557 // Assert in debug mode 3558 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3559 break; // In product mode return SCMemProj node 3560 } 3561 } 3562 result = result->in(MemNode::Memory); 3563 } 3564 } 3565 if (result->is_Phi()) { 3566 PhiNode *mphi = result->as_Phi(); 3567 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3568 const TypePtr *t = mphi->adr_type(); 3569 if (!is_instance) { 3570 // Push all non-instance Phis on the orig_phis worklist to update inputs 3571 // during Phase 4 if needed. 3572 orig_phis.append_if_missing(mphi); 3573 } else if (C->get_alias_index(t) != alias_idx) { 3574 // Create a new Phi with the specified alias index type. 3575 result = split_memory_phi(mphi, alias_idx, orig_phis); 3576 } 3577 } 3578 // the result is either MemNode, PhiNode, InitializeNode. 3579 return result; 3580 } 3581 3582 // 3583 // Convert the types of non-escaped object to instance types where possible, 3584 // propagate the new type information through the graph, and update memory 3585 // edges and MergeMem inputs to reflect the new type. 3586 // 3587 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3588 // The processing is done in 4 phases: 3589 // 3590 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3591 // types for the CheckCastPP for allocations where possible. 3592 // Propagate the new types through users as follows: 3593 // casts and Phi: push users on alloc_worklist 3594 // AddP: cast Base and Address inputs to the instance type 3595 // push any AddP users on alloc_worklist and push any memnode 3596 // users onto memnode_worklist. 3597 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3598 // search the Memory chain for a store with the appropriate type 3599 // address type. If a Phi is found, create a new version with 3600 // the appropriate memory slices from each of the Phi inputs. 3601 // For stores, process the users as follows: 3602 // MemNode: push on memnode_worklist 3603 // MergeMem: push on mergemem_worklist 3604 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3605 // moving the first node encountered of each instance type to the 3606 // the input corresponding to its alias index. 3607 // appropriate memory slice. 3608 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3609 // 3610 // In the following example, the CheckCastPP nodes are the cast of allocation 3611 // results and the allocation of node 29 is non-escaped and eligible to be an 3612 // instance type. 3613 // 3614 // We start with: 3615 // 3616 // 7 Parm #memory 3617 // 10 ConI "12" 3618 // 19 CheckCastPP "Foo" 3619 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3620 // 29 CheckCastPP "Foo" 3621 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3622 // 3623 // 40 StoreP 25 7 20 ... alias_index=4 3624 // 50 StoreP 35 40 30 ... alias_index=4 3625 // 60 StoreP 45 50 20 ... alias_index=4 3626 // 70 LoadP _ 60 30 ... alias_index=4 3627 // 80 Phi 75 50 60 Memory alias_index=4 3628 // 90 LoadP _ 80 30 ... alias_index=4 3629 // 100 LoadP _ 80 20 ... alias_index=4 3630 // 3631 // 3632 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3633 // and creating a new alias index for node 30. This gives: 3634 // 3635 // 7 Parm #memory 3636 // 10 ConI "12" 3637 // 19 CheckCastPP "Foo" 3638 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3639 // 29 CheckCastPP "Foo" iid=24 3640 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3641 // 3642 // 40 StoreP 25 7 20 ... alias_index=4 3643 // 50 StoreP 35 40 30 ... alias_index=6 3644 // 60 StoreP 45 50 20 ... alias_index=4 3645 // 70 LoadP _ 60 30 ... alias_index=6 3646 // 80 Phi 75 50 60 Memory alias_index=4 3647 // 90 LoadP _ 80 30 ... alias_index=6 3648 // 100 LoadP _ 80 20 ... alias_index=4 3649 // 3650 // In phase 2, new memory inputs are computed for the loads and stores, 3651 // And a new version of the phi is created. In phase 4, the inputs to 3652 // node 80 are updated and then the memory nodes are updated with the 3653 // values computed in phase 2. This results in: 3654 // 3655 // 7 Parm #memory 3656 // 10 ConI "12" 3657 // 19 CheckCastPP "Foo" 3658 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3659 // 29 CheckCastPP "Foo" iid=24 3660 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3661 // 3662 // 40 StoreP 25 7 20 ... alias_index=4 3663 // 50 StoreP 35 7 30 ... alias_index=6 3664 // 60 StoreP 45 40 20 ... alias_index=4 3665 // 70 LoadP _ 50 30 ... alias_index=6 3666 // 80 Phi 75 40 60 Memory alias_index=4 3667 // 120 Phi 75 50 50 Memory alias_index=6 3668 // 90 LoadP _ 120 30 ... alias_index=6 3669 // 100 LoadP _ 80 20 ... alias_index=4 3670 // 3671 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3672 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3673 GrowableArray<MergeMemNode*> &mergemem_worklist, 3674 Unique_Node_List &reducible_merges) { 3675 GrowableArray<Node *> memnode_worklist; 3676 GrowableArray<PhiNode *> orig_phis; 3677 PhaseIterGVN *igvn = _igvn; 3678 uint new_index_start = (uint) _compile->num_alias_types(); 3679 VectorSet visited; 3680 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3681 uint unique_old = _compile->unique(); 3682 3683 // Phase 1: Process possible allocations from alloc_worklist. 3684 // Create instance types for the CheckCastPP for allocations where possible. 3685 // 3686 // (Note: don't forget to change the order of the second AddP node on 3687 // the alloc_worklist if the order of the worklist processing is changed, 3688 // see the comment in find_second_addp().) 3689 // 3690 while (alloc_worklist.length() != 0) { 3691 Node *n = alloc_worklist.pop(); 3692 uint ni = n->_idx; 3693 if (n->is_Call()) { 3694 CallNode *alloc = n->as_Call(); 3695 // copy escape information to call node 3696 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3697 PointsToNode::EscapeState es = ptn->escape_state(); 3698 // We have an allocation or call which returns a Java object, 3699 // see if it is non-escaped. 3700 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3701 continue; 3702 } 3703 // Find CheckCastPP for the allocate or for the return value of a call 3704 n = alloc->result_cast(); 3705 if (n == nullptr) { // No uses except Initialize node 3706 if (alloc->is_Allocate()) { 3707 // Set the scalar_replaceable flag for allocation 3708 // so it could be eliminated if it has no uses. 3709 alloc->as_Allocate()->_is_scalar_replaceable = true; 3710 } 3711 continue; 3712 } 3713 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3714 // we could reach here for allocate case if one init is associated with many allocs. 3715 if (alloc->is_Allocate()) { 3716 alloc->as_Allocate()->_is_scalar_replaceable = false; 3717 } 3718 continue; 3719 } 3720 3721 // The inline code for Object.clone() casts the allocation result to 3722 // java.lang.Object and then to the actual type of the allocated 3723 // object. Detect this case and use the second cast. 3724 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3725 // the allocation result is cast to java.lang.Object and then 3726 // to the actual Array type. 3727 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3728 && (alloc->is_AllocateArray() || 3729 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3730 Node *cast2 = nullptr; 3731 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3732 Node *use = n->fast_out(i); 3733 if (use->is_CheckCastPP()) { 3734 cast2 = use; 3735 break; 3736 } 3737 } 3738 if (cast2 != nullptr) { 3739 n = cast2; 3740 } else { 3741 // Non-scalar replaceable if the allocation type is unknown statically 3742 // (reflection allocation), the object can't be restored during 3743 // deoptimization without precise type. 3744 continue; 3745 } 3746 } 3747 3748 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3749 if (t == nullptr) { 3750 continue; // not a TypeOopPtr 3751 } 3752 if (!t->klass_is_exact()) { 3753 continue; // not an unique type 3754 } 3755 if (alloc->is_Allocate()) { 3756 // Set the scalar_replaceable flag for allocation 3757 // so it could be eliminated. 3758 alloc->as_Allocate()->_is_scalar_replaceable = true; 3759 } 3760 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3761 // in order for an object to be scalar-replaceable, it must be: 3762 // - a direct allocation (not a call returning an object) 3763 // - non-escaping 3764 // - eligible to be a unique type 3765 // - not determined to be ineligible by escape analysis 3766 set_map(alloc, n); 3767 set_map(n, alloc); 3768 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3769 igvn->hash_delete(n); 3770 igvn->set_type(n, tinst); 3771 n->raise_bottom_type(tinst); 3772 igvn->hash_insert(n); 3773 record_for_optimizer(n); 3774 // Allocate an alias index for the header fields. Accesses to 3775 // the header emitted during macro expansion wouldn't have 3776 // correct memory state otherwise. 3777 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3778 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3779 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3780 3781 // First, put on the worklist all Field edges from Connection Graph 3782 // which is more accurate than putting immediate users from Ideal Graph. 3783 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3784 PointsToNode* tgt = e.get(); 3785 if (tgt->is_Arraycopy()) { 3786 continue; 3787 } 3788 Node* use = tgt->ideal_node(); 3789 assert(tgt->is_Field() && use->is_AddP(), 3790 "only AddP nodes are Field edges in CG"); 3791 if (use->outcnt() > 0) { // Don't process dead nodes 3792 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3793 if (addp2 != nullptr) { 3794 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3795 alloc_worklist.append_if_missing(addp2); 3796 } 3797 alloc_worklist.append_if_missing(use); 3798 } 3799 } 3800 3801 // An allocation may have an Initialize which has raw stores. Scan 3802 // the users of the raw allocation result and push AddP users 3803 // on alloc_worklist. 3804 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3805 assert (raw_result != nullptr, "must have an allocation result"); 3806 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3807 Node *use = raw_result->fast_out(i); 3808 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3809 Node* addp2 = find_second_addp(use, raw_result); 3810 if (addp2 != nullptr) { 3811 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3812 alloc_worklist.append_if_missing(addp2); 3813 } 3814 alloc_worklist.append_if_missing(use); 3815 } else if (use->is_MemBar()) { 3816 memnode_worklist.append_if_missing(use); 3817 } 3818 } 3819 } 3820 } else if (n->is_AddP()) { 3821 Node* addp_base = get_addp_base(n); 3822 if (addp_base != nullptr && reducible_merges.member(addp_base)) { 3823 // This AddP will go away when we reduce the the Phi 3824 continue; 3825 } 3826 JavaObjectNode* jobj = unique_java_object(addp_base); 3827 if (jobj == nullptr || jobj == phantom_obj) { 3828 #ifdef ASSERT 3829 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3830 ptnode_adr(n->_idx)->dump(); 3831 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3832 #endif 3833 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3834 return; 3835 } 3836 Node *base = get_map(jobj->idx()); // CheckCastPP node 3837 if (!split_AddP(n, base)) continue; // wrong type from dead path 3838 } else if (n->is_Phi() || 3839 n->is_CheckCastPP() || 3840 n->is_EncodeP() || 3841 n->is_DecodeN() || 3842 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3843 if (visited.test_set(n->_idx)) { 3844 assert(n->is_Phi(), "loops only through Phi's"); 3845 continue; // already processed 3846 } 3847 // Reducible Phi's will be removed from the graph after split_unique_types finishes 3848 if (reducible_merges.member(n)) { 3849 // Split loads through phi 3850 reduce_phi_on_field_access(n->as_Phi(), alloc_worklist); 3851 continue; 3852 } 3853 JavaObjectNode* jobj = unique_java_object(n); 3854 if (jobj == nullptr || jobj == phantom_obj) { 3855 #ifdef ASSERT 3856 ptnode_adr(n->_idx)->dump(); 3857 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3858 #endif 3859 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3860 return; 3861 } else { 3862 Node *val = get_map(jobj->idx()); // CheckCastPP node 3863 TypeNode *tn = n->as_Type(); 3864 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3865 assert(tinst != nullptr && tinst->is_known_instance() && 3866 tinst->instance_id() == jobj->idx() , "instance type expected."); 3867 3868 const Type *tn_type = igvn->type(tn); 3869 const TypeOopPtr *tn_t; 3870 if (tn_type->isa_narrowoop()) { 3871 tn_t = tn_type->make_ptr()->isa_oopptr(); 3872 } else { 3873 tn_t = tn_type->isa_oopptr(); 3874 } 3875 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 3876 if (tn_t->isa_aryptr()) { 3877 // Keep array properties (not flat/null-free) 3878 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr()); 3879 if (tinst == nullptr) { 3880 continue; // Skip dead path with inconsistent properties 3881 } 3882 } 3883 if (tn_type->isa_narrowoop()) { 3884 tn_type = tinst->make_narrowoop(); 3885 } else { 3886 tn_type = tinst; 3887 } 3888 igvn->hash_delete(tn); 3889 igvn->set_type(tn, tn_type); 3890 tn->set_type(tn_type); 3891 igvn->hash_insert(tn); 3892 record_for_optimizer(n); 3893 } else { 3894 assert(tn_type == TypePtr::NULL_PTR || 3895 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t), 3896 "unexpected type"); 3897 continue; // Skip dead path with different type 3898 } 3899 } 3900 } else { 3901 debug_only(n->dump();) 3902 assert(false, "EA: unexpected node"); 3903 continue; 3904 } 3905 // push allocation's users on appropriate worklist 3906 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3907 Node *use = n->fast_out(i); 3908 if (use->is_Mem() && use->in(MemNode::Address) == n) { 3909 // Load/store to instance's field 3910 memnode_worklist.append_if_missing(use); 3911 } else if (use->is_MemBar()) { 3912 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3913 memnode_worklist.append_if_missing(use); 3914 } 3915 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3916 Node* addp2 = find_second_addp(use, n); 3917 if (addp2 != nullptr) { 3918 alloc_worklist.append_if_missing(addp2); 3919 } 3920 alloc_worklist.append_if_missing(use); 3921 } else if (use->is_Phi() || 3922 use->is_CheckCastPP() || 3923 use->is_EncodeNarrowPtr() || 3924 use->is_DecodeNarrowPtr() || 3925 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3926 alloc_worklist.append_if_missing(use); 3927 #ifdef ASSERT 3928 } else if (use->is_Mem()) { 3929 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3930 } else if (use->is_MergeMem()) { 3931 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3932 } else if (use->is_SafePoint()) { 3933 // Look for MergeMem nodes for calls which reference unique allocation 3934 // (through CheckCastPP nodes) even for debug info. 3935 Node* m = use->in(TypeFunc::Memory); 3936 if (m->is_MergeMem()) { 3937 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3938 } 3939 } else if (use->Opcode() == Op_EncodeISOArray) { 3940 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3941 // EncodeISOArray overwrites destination array 3942 memnode_worklist.append_if_missing(use); 3943 } 3944 } else if (use->Opcode() == Op_Return) { 3945 // Allocation is referenced by field of returned inline type 3946 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode"); 3947 } else { 3948 uint op = use->Opcode(); 3949 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3950 (use->in(MemNode::Memory) == n)) { 3951 // They overwrite memory edge corresponding to destination array, 3952 memnode_worklist.append_if_missing(use); 3953 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3954 op == Op_CastP2X || op == Op_StoreCM || 3955 op == Op_FastLock || op == Op_AryEq || 3956 op == Op_StrComp || op == Op_CountPositives || 3957 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3958 op == Op_StrEquals || op == Op_VectorizedHashCode || 3959 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3960 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck || 3961 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3962 n->dump(); 3963 use->dump(); 3964 assert(false, "EA: missing allocation reference path"); 3965 } 3966 #endif 3967 } 3968 } 3969 3970 } 3971 3972 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3973 // type, record it in the ArrayCopy node so we know what memory this 3974 // node uses/modified. 3975 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3976 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3977 Node* dest = ac->in(ArrayCopyNode::Dest); 3978 if (dest->is_AddP()) { 3979 dest = get_addp_base(dest); 3980 } 3981 JavaObjectNode* jobj = unique_java_object(dest); 3982 if (jobj != nullptr) { 3983 Node *base = get_map(jobj->idx()); 3984 if (base != nullptr) { 3985 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3986 ac->_dest_type = base_t; 3987 } 3988 } 3989 Node* src = ac->in(ArrayCopyNode::Src); 3990 if (src->is_AddP()) { 3991 src = get_addp_base(src); 3992 } 3993 jobj = unique_java_object(src); 3994 if (jobj != nullptr) { 3995 Node* base = get_map(jobj->idx()); 3996 if (base != nullptr) { 3997 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3998 ac->_src_type = base_t; 3999 } 4000 } 4001 } 4002 4003 // New alias types were created in split_AddP(). 4004 uint new_index_end = (uint) _compile->num_alias_types(); 4005 4006 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4007 // compute new values for Memory inputs (the Memory inputs are not 4008 // actually updated until phase 4.) 4009 if (memnode_worklist.length() == 0) 4010 return; // nothing to do 4011 while (memnode_worklist.length() != 0) { 4012 Node *n = memnode_worklist.pop(); 4013 if (visited.test_set(n->_idx)) { 4014 continue; 4015 } 4016 if (n->is_Phi() || n->is_ClearArray()) { 4017 // we don't need to do anything, but the users must be pushed 4018 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4019 // we don't need to do anything, but the users must be pushed 4020 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4021 if (n == nullptr) { 4022 continue; 4023 } 4024 } else if (n->Opcode() == Op_StrCompressedCopy || 4025 n->Opcode() == Op_EncodeISOArray) { 4026 // get the memory projection 4027 n = n->find_out_with(Op_SCMemProj); 4028 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4029 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr && 4030 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4031 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); 4032 } else { 4033 assert(n->is_Mem(), "memory node required."); 4034 Node *addr = n->in(MemNode::Address); 4035 const Type *addr_t = igvn->type(addr); 4036 if (addr_t == Type::TOP) { 4037 continue; 4038 } 4039 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4040 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4041 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4042 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4043 if (_compile->failing()) { 4044 return; 4045 } 4046 if (mem != n->in(MemNode::Memory)) { 4047 // We delay the memory edge update since we need old one in 4048 // MergeMem code below when instances memory slices are separated. 4049 set_map(n, mem); 4050 } 4051 if (n->is_Load()) { 4052 continue; // don't push users 4053 } else if (n->is_LoadStore()) { 4054 // get the memory projection 4055 n = n->find_out_with(Op_SCMemProj); 4056 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4057 } 4058 } 4059 // push user on appropriate worklist 4060 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4061 Node *use = n->fast_out(i); 4062 if (use->is_Phi() || use->is_ClearArray()) { 4063 memnode_worklist.append_if_missing(use); 4064 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4065 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 4066 continue; 4067 } 4068 memnode_worklist.append_if_missing(use); 4069 } else if (use->is_MemBar()) { 4070 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4071 memnode_worklist.append_if_missing(use); 4072 } 4073 #ifdef ASSERT 4074 } else if (use->is_Mem()) { 4075 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4076 } else if (use->is_MergeMem()) { 4077 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4078 } else if (use->Opcode() == Op_EncodeISOArray) { 4079 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4080 // EncodeISOArray overwrites destination array 4081 memnode_worklist.append_if_missing(use); 4082 } 4083 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr && 4084 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4085 // store_unknown_inline overwrites destination array 4086 memnode_worklist.append_if_missing(use); 4087 } else { 4088 uint op = use->Opcode(); 4089 if ((use->in(MemNode::Memory) == n) && 4090 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4091 // They overwrite memory edge corresponding to destination array, 4092 memnode_worklist.append_if_missing(use); 4093 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4094 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4095 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4096 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) { 4097 n->dump(); 4098 use->dump(); 4099 assert(false, "EA: missing memory path"); 4100 } 4101 #endif 4102 } 4103 } 4104 } 4105 4106 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4107 // Walk each memory slice moving the first node encountered of each 4108 // instance type to the input corresponding to its alias index. 4109 uint length = mergemem_worklist.length(); 4110 for( uint next = 0; next < length; ++next ) { 4111 MergeMemNode* nmm = mergemem_worklist.at(next); 4112 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4113 // Note: we don't want to use MergeMemStream here because we only want to 4114 // scan inputs which exist at the start, not ones we add during processing. 4115 // Note 2: MergeMem may already contains instance memory slices added 4116 // during find_inst_mem() call when memory nodes were processed above. 4117 igvn->hash_delete(nmm); 4118 uint nslices = MIN2(nmm->req(), new_index_start); 4119 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4120 Node* mem = nmm->in(i); 4121 Node* cur = nullptr; 4122 if (mem == nullptr || mem->is_top()) { 4123 continue; 4124 } 4125 // First, update mergemem by moving memory nodes to corresponding slices 4126 // if their type became more precise since this mergemem was created. 4127 while (mem->is_Mem()) { 4128 const Type *at = igvn->type(mem->in(MemNode::Address)); 4129 if (at != Type::TOP) { 4130 assert (at->isa_ptr() != nullptr, "pointer type required."); 4131 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4132 if (idx == i) { 4133 if (cur == nullptr) { 4134 cur = mem; 4135 } 4136 } else { 4137 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4138 nmm->set_memory_at(idx, mem); 4139 } 4140 } 4141 } 4142 mem = mem->in(MemNode::Memory); 4143 } 4144 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4145 // Find any instance of the current type if we haven't encountered 4146 // already a memory slice of the instance along the memory chain. 4147 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4148 if((uint)_compile->get_general_index(ni) == i) { 4149 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4150 if (nmm->is_empty_memory(m)) { 4151 Node* result = find_inst_mem(mem, ni, orig_phis); 4152 if (_compile->failing()) { 4153 return; 4154 } 4155 nmm->set_memory_at(ni, result); 4156 } 4157 } 4158 } 4159 } 4160 // Find the rest of instances values 4161 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4162 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4163 Node* result = step_through_mergemem(nmm, ni, tinst); 4164 if (result == nmm->base_memory()) { 4165 // Didn't find instance memory, search through general slice recursively. 4166 result = nmm->memory_at(_compile->get_general_index(ni)); 4167 result = find_inst_mem(result, ni, orig_phis); 4168 if (_compile->failing()) { 4169 return; 4170 } 4171 nmm->set_memory_at(ni, result); 4172 } 4173 } 4174 igvn->hash_insert(nmm); 4175 record_for_optimizer(nmm); 4176 } 4177 4178 // Phase 4: Update the inputs of non-instance memory Phis and 4179 // the Memory input of memnodes 4180 // First update the inputs of any non-instance Phi's from 4181 // which we split out an instance Phi. Note we don't have 4182 // to recursively process Phi's encountered on the input memory 4183 // chains as is done in split_memory_phi() since they will 4184 // also be processed here. 4185 for (int j = 0; j < orig_phis.length(); j++) { 4186 PhiNode *phi = orig_phis.at(j); 4187 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4188 igvn->hash_delete(phi); 4189 for (uint i = 1; i < phi->req(); i++) { 4190 Node *mem = phi->in(i); 4191 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4192 if (_compile->failing()) { 4193 return; 4194 } 4195 if (mem != new_mem) { 4196 phi->set_req(i, new_mem); 4197 } 4198 } 4199 igvn->hash_insert(phi); 4200 record_for_optimizer(phi); 4201 } 4202 4203 // Update the memory inputs of MemNodes with the value we computed 4204 // in Phase 2 and move stores memory users to corresponding memory slices. 4205 // Disable memory split verification code until the fix for 6984348. 4206 // Currently it produces false negative results since it does not cover all cases. 4207 #if 0 // ifdef ASSERT 4208 visited.Reset(); 4209 Node_Stack old_mems(arena, _compile->unique() >> 2); 4210 #endif 4211 for (uint i = 0; i < ideal_nodes.size(); i++) { 4212 Node* n = ideal_nodes.at(i); 4213 Node* nmem = get_map(n->_idx); 4214 assert(nmem != nullptr, "sanity"); 4215 if (n->is_Mem()) { 4216 #if 0 // ifdef ASSERT 4217 Node* old_mem = n->in(MemNode::Memory); 4218 if (!visited.test_set(old_mem->_idx)) { 4219 old_mems.push(old_mem, old_mem->outcnt()); 4220 } 4221 #endif 4222 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4223 if (!n->is_Load()) { 4224 // Move memory users of a store first. 4225 move_inst_mem(n, orig_phis); 4226 } 4227 // Now update memory input 4228 igvn->hash_delete(n); 4229 n->set_req(MemNode::Memory, nmem); 4230 igvn->hash_insert(n); 4231 record_for_optimizer(n); 4232 } else { 4233 assert(n->is_Allocate() || n->is_CheckCastPP() || 4234 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4235 } 4236 } 4237 #if 0 // ifdef ASSERT 4238 // Verify that memory was split correctly 4239 while (old_mems.is_nonempty()) { 4240 Node* old_mem = old_mems.node(); 4241 uint old_cnt = old_mems.index(); 4242 old_mems.pop(); 4243 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4244 } 4245 #endif 4246 } 4247 4248 #ifndef PRODUCT 4249 int ConnectionGraph::_no_escape_counter = 0; 4250 int ConnectionGraph::_arg_escape_counter = 0; 4251 int ConnectionGraph::_global_escape_counter = 0; 4252 4253 static const char *node_type_names[] = { 4254 "UnknownType", 4255 "JavaObject", 4256 "LocalVar", 4257 "Field", 4258 "Arraycopy" 4259 }; 4260 4261 static const char *esc_names[] = { 4262 "UnknownEscape", 4263 "NoEscape", 4264 "ArgEscape", 4265 "GlobalEscape" 4266 }; 4267 4268 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4269 NodeType nt = node_type(); 4270 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4271 if (print_state) { 4272 EscapeState es = escape_state(); 4273 EscapeState fields_es = fields_escape_state(); 4274 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4275 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4276 out->print("NSR "); 4277 } 4278 } 4279 } 4280 4281 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4282 dump_header(print_state, out); 4283 if (is_Field()) { 4284 FieldNode* f = (FieldNode*)this; 4285 if (f->is_oop()) { 4286 out->print("oop "); 4287 } 4288 if (f->offset() > 0) { 4289 out->print("+%d ", f->offset()); 4290 } 4291 out->print("("); 4292 for (BaseIterator i(f); i.has_next(); i.next()) { 4293 PointsToNode* b = i.get(); 4294 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4295 } 4296 out->print(" )"); 4297 } 4298 out->print("["); 4299 for (EdgeIterator i(this); i.has_next(); i.next()) { 4300 PointsToNode* e = i.get(); 4301 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4302 } 4303 out->print(" ["); 4304 for (UseIterator i(this); i.has_next(); i.next()) { 4305 PointsToNode* u = i.get(); 4306 bool is_base = false; 4307 if (PointsToNode::is_base_use(u)) { 4308 is_base = true; 4309 u = PointsToNode::get_use_node(u)->as_Field(); 4310 } 4311 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 4312 } 4313 out->print(" ]] "); 4314 if (_node == nullptr) { 4315 out->print("<null>%s", newline ? "\n" : ""); 4316 } else { 4317 _node->dump(newline ? "\n" : "", false, out); 4318 } 4319 } 4320 4321 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 4322 bool first = true; 4323 int ptnodes_length = ptnodes_worklist.length(); 4324 for (int i = 0; i < ptnodes_length; i++) { 4325 PointsToNode *ptn = ptnodes_worklist.at(i); 4326 if (ptn == nullptr || !ptn->is_JavaObject()) { 4327 continue; 4328 } 4329 PointsToNode::EscapeState es = ptn->escape_state(); 4330 if ((es != PointsToNode::NoEscape) && !Verbose) { 4331 continue; 4332 } 4333 Node* n = ptn->ideal_node(); 4334 if (n->is_Allocate() || (n->is_CallStaticJava() && 4335 n->as_CallStaticJava()->is_boxing_method())) { 4336 if (first) { 4337 tty->cr(); 4338 tty->print("======== Connection graph for "); 4339 _compile->method()->print_short_name(); 4340 tty->cr(); 4341 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 4342 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 4343 tty->cr(); 4344 first = false; 4345 } 4346 ptn->dump(); 4347 // Print all locals and fields which reference this allocation 4348 for (UseIterator j(ptn); j.has_next(); j.next()) { 4349 PointsToNode* use = j.get(); 4350 if (use->is_LocalVar()) { 4351 use->dump(Verbose); 4352 } else if (Verbose) { 4353 use->dump(); 4354 } 4355 } 4356 tty->cr(); 4357 } 4358 } 4359 } 4360 4361 void ConnectionGraph::print_statistics() { 4362 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 4363 } 4364 4365 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 4366 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 4367 return; 4368 } 4369 for (int next = 0; next < java_objects_worklist.length(); ++next) { 4370 JavaObjectNode* ptn = java_objects_worklist.at(next); 4371 if (ptn->ideal_node()->is_Allocate()) { 4372 if (ptn->escape_state() == PointsToNode::NoEscape) { 4373 Atomic::inc(&ConnectionGraph::_no_escape_counter); 4374 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 4375 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 4376 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 4377 Atomic::inc(&ConnectionGraph::_global_escape_counter); 4378 } else { 4379 assert(false, "Unexpected Escape State"); 4380 } 4381 } 4382 } 4383 } 4384 4385 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 4386 if (_compile->directive()->TraceEscapeAnalysisOption) { 4387 assert(ptn != nullptr, "should not be null"); 4388 assert(reason != nullptr, "should not be null"); 4389 ptn->dump_header(true); 4390 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 4391 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 4392 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 4393 } 4394 } 4395 4396 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 4397 if (_compile->directive()->TraceEscapeAnalysisOption) { 4398 stringStream ss; 4399 ss.print("propagated from: "); 4400 from->dump(true, &ss, false); 4401 return ss.as_string(); 4402 } else { 4403 return nullptr; 4404 } 4405 } 4406 4407 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 4408 if (_compile->directive()->TraceEscapeAnalysisOption) { 4409 stringStream ss; 4410 ss.print("escapes as arg to:"); 4411 call->dump("", false, &ss); 4412 return ss.as_string(); 4413 } else { 4414 return nullptr; 4415 } 4416 } 4417 4418 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 4419 if (_compile->directive()->TraceEscapeAnalysisOption) { 4420 stringStream ss; 4421 ss.print("is merged with other object: "); 4422 other->dump_header(true, &ss); 4423 return ss.as_string(); 4424 } else { 4425 return nullptr; 4426 } 4427 } 4428 4429 #endif 4430 4431 void ConnectionGraph::record_for_optimizer(Node *n) { 4432 _igvn->_worklist.push(n); 4433 _igvn->add_users_to_worklist(n); 4434 }