1 /* 2 * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/macro.hpp" 40 #include "opto/phaseX.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/rootnode.hpp" 43 #include "utilities/macros.hpp" 44 45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 46 // If ReduceAllocationMerges is enabled we might call split_through_phi during 47 // split_unique_types and that will create additional nodes that need to be 48 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 49 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 50 // the array will be reallocated. 51 _nodes(C->comp_arena(), ReduceAllocationMerges ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 52 _in_worklist(C->comp_arena()), 53 _next_pidx(0), 54 _collecting(true), 55 _verify(false), 56 _compile(C), 57 _igvn(igvn), 58 _invocation(invocation), 59 _build_iterations(0), 60 _build_time(0.), 61 _node_map(C->comp_arena()) { 62 // Add unknown java object. 63 add_java_object(C->top(), PointsToNode::GlobalEscape); 64 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 65 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 66 // Add ConP and ConN null oop nodes 67 Node* oop_null = igvn->zerocon(T_OBJECT); 68 assert(oop_null->_idx < nodes_size(), "should be created already"); 69 add_java_object(oop_null, PointsToNode::NoEscape); 70 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 71 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 72 if (UseCompressedOops) { 73 Node* noop_null = igvn->zerocon(T_NARROWOOP); 74 assert(noop_null->_idx < nodes_size(), "should be created already"); 75 map_ideal_node(noop_null, null_obj); 76 } 77 } 78 79 bool ConnectionGraph::has_candidates(Compile *C) { 80 // EA brings benefits only when the code has allocations and/or locks which 81 // are represented by ideal Macro nodes. 82 int cnt = C->macro_count(); 83 for (int i = 0; i < cnt; i++) { 84 Node *n = C->macro_node(i); 85 if (n->is_Allocate()) { 86 return true; 87 } 88 if (n->is_Lock()) { 89 Node* obj = n->as_Lock()->obj_node()->uncast(); 90 if (!(obj->is_Parm() || obj->is_Con())) { 91 return true; 92 } 93 } 94 if (n->is_CallStaticJava() && 95 n->as_CallStaticJava()->is_boxing_method()) { 96 return true; 97 } 98 } 99 return false; 100 } 101 102 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 103 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 104 ResourceMark rm; 105 106 // Add ConP and ConN null oop nodes before ConnectionGraph construction 107 // to create space for them in ConnectionGraph::_nodes[]. 108 Node* oop_null = igvn->zerocon(T_OBJECT); 109 Node* noop_null = igvn->zerocon(T_NARROWOOP); 110 int invocation = 0; 111 if (C->congraph() != nullptr) { 112 invocation = C->congraph()->_invocation + 1; 113 } 114 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 115 // Perform escape analysis 116 if (congraph->compute_escape()) { 117 // There are non escaping objects. 118 C->set_congraph(congraph); 119 } 120 // Cleanup. 121 if (oop_null->outcnt() == 0) { 122 igvn->hash_delete(oop_null); 123 } 124 if (noop_null->outcnt() == 0) { 125 igvn->hash_delete(noop_null); 126 } 127 } 128 129 bool ConnectionGraph::compute_escape() { 130 Compile* C = _compile; 131 PhaseGVN* igvn = _igvn; 132 133 // Worklists used by EA. 134 Unique_Node_List delayed_worklist; 135 Unique_Node_List reducible_merges; 136 GrowableArray<Node*> alloc_worklist; 137 GrowableArray<Node*> ptr_cmp_worklist; 138 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 139 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 140 GrowableArray<PointsToNode*> ptnodes_worklist; 141 GrowableArray<JavaObjectNode*> java_objects_worklist; 142 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 143 GrowableArray<FieldNode*> oop_fields_worklist; 144 GrowableArray<SafePointNode*> sfn_worklist; 145 GrowableArray<MergeMemNode*> mergemem_worklist; 146 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 147 148 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 149 150 // 1. Populate Connection Graph (CG) with PointsTo nodes. 151 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 152 // Initialize worklist 153 if (C->root() != nullptr) { 154 ideal_nodes.push(C->root()); 155 } 156 // Processed ideal nodes are unique on ideal_nodes list 157 // but several ideal nodes are mapped to the phantom_obj. 158 // To avoid duplicated entries on the following worklists 159 // add the phantom_obj only once to them. 160 ptnodes_worklist.append(phantom_obj); 161 java_objects_worklist.append(phantom_obj); 162 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 163 Node* n = ideal_nodes.at(next); 164 // Create PointsTo nodes and add them to Connection Graph. Called 165 // only once per ideal node since ideal_nodes is Unique_Node list. 166 add_node_to_connection_graph(n, &delayed_worklist); 167 PointsToNode* ptn = ptnode_adr(n->_idx); 168 if (ptn != nullptr && ptn != phantom_obj) { 169 ptnodes_worklist.append(ptn); 170 if (ptn->is_JavaObject()) { 171 java_objects_worklist.append(ptn->as_JavaObject()); 172 if ((n->is_Allocate() || n->is_CallStaticJava()) && 173 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 174 // Only allocations and java static calls results are interesting. 175 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 176 } 177 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 178 oop_fields_worklist.append(ptn->as_Field()); 179 } 180 } 181 // Collect some interesting nodes for further use. 182 switch (n->Opcode()) { 183 case Op_MergeMem: 184 // Collect all MergeMem nodes to add memory slices for 185 // scalar replaceable objects in split_unique_types(). 186 mergemem_worklist.append(n->as_MergeMem()); 187 break; 188 case Op_CmpP: 189 case Op_CmpN: 190 // Collect compare pointers nodes. 191 if (OptimizePtrCompare) { 192 ptr_cmp_worklist.append(n); 193 } 194 break; 195 case Op_MemBarStoreStore: 196 // Collect all MemBarStoreStore nodes so that depending on the 197 // escape status of the associated Allocate node some of them 198 // may be eliminated. 199 storestore_worklist.append(n->as_MemBarStoreStore()); 200 break; 201 case Op_MemBarRelease: 202 if (n->req() > MemBarNode::Precedent) { 203 record_for_optimizer(n); 204 } 205 break; 206 #ifdef ASSERT 207 case Op_AddP: 208 // Collect address nodes for graph verification. 209 addp_worklist.append(n); 210 break; 211 #endif 212 case Op_ArrayCopy: 213 // Keep a list of ArrayCopy nodes so if one of its input is non 214 // escaping, we can record a unique type 215 arraycopy_worklist.append(n->as_ArrayCopy()); 216 break; 217 default: 218 // not interested now, ignore... 219 break; 220 } 221 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 222 Node* m = n->fast_out(i); // Get user 223 ideal_nodes.push(m); 224 } 225 if (n->is_SafePoint()) { 226 sfn_worklist.append(n->as_SafePoint()); 227 } 228 } 229 230 #ifndef PRODUCT 231 if (_compile->directive()->TraceEscapeAnalysisOption) { 232 tty->print("+++++ Initial worklist for "); 233 _compile->method()->print_name(); 234 tty->print_cr(" (ea_inv=%d)", _invocation); 235 for (int i = 0; i < ptnodes_worklist.length(); i++) { 236 PointsToNode* ptn = ptnodes_worklist.at(i); 237 ptn->dump(); 238 } 239 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 240 } 241 #endif 242 243 if (non_escaped_allocs_worklist.length() == 0) { 244 _collecting = false; 245 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 246 return false; // Nothing to do. 247 } 248 // Add final simple edges to graph. 249 while(delayed_worklist.size() > 0) { 250 Node* n = delayed_worklist.pop(); 251 add_final_edges(n); 252 } 253 254 #ifdef ASSERT 255 if (VerifyConnectionGraph) { 256 // Verify that no new simple edges could be created and all 257 // local vars has edges. 258 _verify = true; 259 int ptnodes_length = ptnodes_worklist.length(); 260 for (int next = 0; next < ptnodes_length; ++next) { 261 PointsToNode* ptn = ptnodes_worklist.at(next); 262 add_final_edges(ptn->ideal_node()); 263 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 264 ptn->dump(); 265 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 266 } 267 } 268 _verify = false; 269 } 270 #endif 271 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 272 // processing, calls to CI to resolve symbols (types, fields, methods) 273 // referenced in bytecode. During symbol resolution VM may throw 274 // an exception which CI cleans and converts to compilation failure. 275 if (C->failing()) { 276 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 277 return false; 278 } 279 280 // 2. Finish Graph construction by propagating references to all 281 // java objects through graph. 282 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 283 java_objects_worklist, oop_fields_worklist)) { 284 // All objects escaped or hit time or iterations limits. 285 _collecting = false; 286 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 287 return false; 288 } 289 290 // 3. Adjust scalar_replaceable state of nonescaping objects and push 291 // scalar replaceable allocations on alloc_worklist for processing 292 // in split_unique_types(). 293 GrowableArray<JavaObjectNode*> jobj_worklist; 294 int non_escaped_length = non_escaped_allocs_worklist.length(); 295 bool found_nsr_alloc = false; 296 for (int next = 0; next < non_escaped_length; next++) { 297 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 298 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 299 Node* n = ptn->ideal_node(); 300 if (n->is_Allocate()) { 301 n->as_Allocate()->_is_non_escaping = noescape; 302 } 303 if (noescape && ptn->scalar_replaceable()) { 304 adjust_scalar_replaceable_state(ptn, reducible_merges); 305 if (ptn->scalar_replaceable()) { 306 jobj_worklist.push(ptn); 307 } else { 308 found_nsr_alloc = true; 309 } 310 } 311 } 312 313 // Propagate NSR (Not Scalar Replaceable) state. 314 if (found_nsr_alloc) { 315 find_scalar_replaceable_allocs(jobj_worklist); 316 } 317 318 // alloc_worklist will be processed in reverse push order. 319 // Therefore the reducible Phis will be processed for last and that's what we 320 // want because by then the scalarizable inputs of the merge will already have 321 // an unique instance type. 322 for (uint i = 0; i < reducible_merges.size(); i++ ) { 323 Node* n = reducible_merges.at(i); 324 alloc_worklist.append(n); 325 } 326 327 for (int next = 0; next < jobj_worklist.length(); ++next) { 328 JavaObjectNode* jobj = jobj_worklist.at(next); 329 if (jobj->scalar_replaceable()) { 330 alloc_worklist.append(jobj->ideal_node()); 331 } 332 } 333 334 #ifdef ASSERT 335 if (VerifyConnectionGraph) { 336 // Verify that graph is complete - no new edges could be added or needed. 337 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 338 java_objects_worklist, addp_worklist); 339 } 340 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 341 assert(null_obj->escape_state() == PointsToNode::NoEscape && 342 null_obj->edge_count() == 0 && 343 !null_obj->arraycopy_src() && 344 !null_obj->arraycopy_dst(), "sanity"); 345 #endif 346 347 _collecting = false; 348 349 } // TracePhase t3("connectionGraph") 350 351 // 4. Optimize ideal graph based on EA information. 352 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 353 if (has_non_escaping_obj) { 354 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 355 } 356 357 #ifndef PRODUCT 358 if (PrintEscapeAnalysis) { 359 dump(ptnodes_worklist); // Dump ConnectionGraph 360 } 361 #endif 362 363 #ifdef ASSERT 364 if (VerifyConnectionGraph) { 365 int alloc_length = alloc_worklist.length(); 366 for (int next = 0; next < alloc_length; ++next) { 367 Node* n = alloc_worklist.at(next); 368 PointsToNode* ptn = ptnode_adr(n->_idx); 369 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 370 } 371 } 372 #endif 373 374 // 5. Separate memory graph for scalar replaceable allcations. 375 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 376 if (has_scalar_replaceable_candidates && EliminateAllocations) { 377 assert(C->do_aliasing(), "Aliasing should be enabled"); 378 // Now use the escape information to create unique types for 379 // scalar replaceable objects. 380 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 381 if (C->failing()) { 382 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 383 return false; 384 } 385 C->print_method(PHASE_AFTER_EA, 2); 386 387 #ifdef ASSERT 388 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 389 tty->print("=== No allocations eliminated for "); 390 C->method()->print_short_name(); 391 if (!EliminateAllocations) { 392 tty->print(" since EliminateAllocations is off ==="); 393 } else if(!has_scalar_replaceable_candidates) { 394 tty->print(" since there are no scalar replaceable candidates ==="); 395 } 396 tty->cr(); 397 #endif 398 } 399 400 // 6. Remove reducible allocation merges from ideal graph 401 if (ReduceAllocationMerges && reducible_merges.size() > 0) { 402 bool delay = _igvn->delay_transform(); 403 _igvn->set_delay_transform(true); 404 for (uint i = 0; i < reducible_merges.size(); i++ ) { 405 Node* n = reducible_merges.at(i); 406 reduce_phi(n->as_Phi()); 407 if (C->failing()) { 408 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 409 return false; 410 } 411 } 412 _igvn->set_delay_transform(delay); 413 } 414 415 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 416 // java calls if they pass ArgEscape objects as parameters. 417 if (has_non_escaping_obj && 418 (C->env()->should_retain_local_variables() || 419 C->env()->jvmti_can_get_owned_monitor_info() || 420 C->env()->jvmti_can_walk_any_space() || 421 DeoptimizeObjectsALot)) { 422 int sfn_length = sfn_worklist.length(); 423 for (int next = 0; next < sfn_length; next++) { 424 SafePointNode* sfn = sfn_worklist.at(next); 425 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 426 if (sfn->is_CallJava()) { 427 CallJavaNode* call = sfn->as_CallJava(); 428 call->set_arg_escape(has_arg_escape(call)); 429 } 430 } 431 } 432 433 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 434 return has_non_escaping_obj; 435 } 436 437 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 438 // if at least one scalar replaceable allocation participates in the merge and 439 // no input to the Phi is nullable. 440 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 441 // Check if there is a scalar replaceable allocate in the Phi 442 bool found_sr_allocate = false; 443 444 for (uint i = 1; i < ophi->req(); i++) { 445 // Right now we can't restore a "null" pointer during deoptimization 446 const Type* inp_t = _igvn->type(ophi->in(i)); 447 if (inp_t == nullptr || inp_t->make_oopptr() == nullptr || inp_t->make_oopptr()->maybe_null()) { 448 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Input %d is nullable.", ophi->_idx, _invocation, i);) 449 return false; 450 } 451 452 // We are looking for at least one SR object in the merge 453 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 454 if (ptn != nullptr && ptn->scalar_replaceable()) { 455 assert(ptn->ideal_node() != nullptr && ptn->ideal_node()->is_Allocate(), "sanity"); 456 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 457 458 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 459 found_sr_allocate = true; 460 } else { 461 ptn->set_scalar_replaceable(false); 462 } 463 } 464 } 465 466 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 467 return found_sr_allocate; 468 } 469 470 // Check if we are able to untangle the merge. Right now we only reduce Phis 471 // which are only used as debug information. 472 bool ConnectionGraph::can_reduce_phi_check_users(PhiNode* ophi) const { 473 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 474 Node* use = ophi->fast_out(i); 475 476 if (use->is_SafePoint()) { 477 if (use->is_Call() && use->as_Call()->has_non_debug_use(ophi)) { 478 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", ophi->_idx, _invocation);) 479 return false; 480 } 481 } else if (use->is_AddP()) { 482 Node* addp = use; 483 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 484 Node* use_use = addp->fast_out(j); 485 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 486 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", ophi->_idx, _invocation, use_use->Name());) 487 return false; 488 } 489 } 490 } else { 491 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", ophi->_idx, _invocation, use->_idx, use->Name());) 492 return false; 493 } 494 } 495 496 return true; 497 } 498 499 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 500 // only used in some certain code shapes. Check comments in 501 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 502 // details. 503 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 504 // If there was an error attempting to reduce allocation merges for this 505 // method we might have disabled the compilation and be retrying 506 // with RAM disabled. 507 if (!_compile->do_reduce_allocation_merges()) { 508 return false; 509 } 510 511 const Type* phi_t = _igvn->type(ophi); 512 if (phi_t == nullptr || phi_t->make_ptr() == nullptr || 513 phi_t->make_ptr()->isa_instptr() == nullptr || 514 !phi_t->make_ptr()->isa_instptr()->klass_is_exact()) { 515 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can NOT reduce Phi %d during invocation %d because it's nullable.", ophi->_idx, _invocation); }) 516 return false; 517 } 518 519 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_phi_check_users(ophi)) { 520 return false; 521 } 522 523 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 524 return true; 525 } 526 527 void ConnectionGraph::reduce_phi_on_field_access(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist) { 528 // We'll pass this to 'split_through_phi' so that it'll do the split even 529 // though the load doesn't have an unique instance type. 530 bool ignore_missing_instance_id = true; 531 532 // Iterate over Phi outputs looking for an AddP 533 for (int j = ophi->outcnt()-1; j >= 0;) { 534 Node* previous_addp = ophi->raw_out(j); 535 uint num_edges = 1; 536 if (previous_addp->is_AddP()) { 537 // All AddPs are present in the connection graph 538 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 539 num_edges = previous_addp->in(AddPNode::Address) == previous_addp->in(AddPNode::Base) ? 2 : 1; 540 541 // Iterate over AddP looking for a Load 542 for (int k = previous_addp->outcnt()-1; k >= 0;) { 543 Node* previous_load = previous_addp->raw_out(k); 544 if (previous_load->is_Load()) { 545 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 546 _igvn->replace_node(previous_load, data_phi); 547 assert(data_phi != nullptr, "Output of split_through_phi is null."); 548 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 549 550 // Push the newly created AddP on alloc_worklist and patch 551 // the connection graph. Note that the changes in the CG below 552 // won't affect the ES of objects since the new nodes have the 553 // same status as the old ones. 554 if (data_phi != nullptr && data_phi->is_Phi()) { 555 for (uint i = 1; i < data_phi->req(); i++) { 556 Node* new_load = data_phi->in(i); 557 if (new_load->is_Load()) { 558 Node* new_addp = new_load->in(MemNode::Address); 559 Node* base = get_addp_base(new_addp); 560 561 // The base might not be something that we can create an unique 562 // type for. If that's the case we are done with that input. 563 PointsToNode* jobj_ptn = unique_java_object(base); 564 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 565 continue; 566 } 567 568 // Push to alloc_worklist since the base has an unique_type 569 alloc_worklist.append_if_missing(new_addp); 570 571 // Now let's add the node to the connection graph 572 _nodes.at_grow(new_addp->_idx, nullptr); 573 add_field(new_addp, fn->escape_state(), fn->offset()); 574 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 575 576 // If the load doesn't load an object then it won't be 577 // part of the connection graph 578 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 579 if (curr_load_ptn != nullptr) { 580 _nodes.at_grow(new_load->_idx, nullptr); 581 add_local_var(new_load, curr_load_ptn->escape_state()); 582 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 583 } 584 } 585 } 586 } 587 } 588 --k; 589 k = MIN2(k, (int)previous_addp->outcnt()-1); 590 } 591 592 // Remove the old AddP from the processing list because it's dead now 593 alloc_worklist.remove_if_existing(previous_addp); 594 } 595 j -= num_edges; 596 j = MIN2(j, (int)ophi->outcnt()-1); 597 } 598 } 599 600 // This method will create a SafePointScalarObjectNode for each combination of 601 // scalar replaceable allocation in 'ophi' and SafePoint node in 'safepoints'. 602 // The method will create a SafePointScalarMERGEnode for each combination of 603 // 'ophi' and SafePoint node in 'safepoints'. 604 // Each SafePointScalarMergeNode created here may describe multiple scalar 605 // replaced objects - check detailed description in SafePointScalarMergeNode 606 // class header. 607 // 608 // This method will set entries in the Phi that are scalar replaceable to 'null'. 609 void ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi, Unique_Node_List* safepoints) { 610 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 611 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 612 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 613 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 614 uint number_of_sr_objects = 0; 615 PhaseMacroExpand mexp(*_igvn); 616 617 _igvn->hash_delete(ophi); 618 619 // Fill in the 'selector' Phi. If index 'i' of the selector is: 620 // -> a '-1' constant, the i'th input of the original Phi is NSR. 621 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and the 622 // info about the scalarized object will be at index x of 623 // ObjectMergeValue::possible_objects 624 for (uint i = 1; i < ophi->req(); i++) { 625 Node* base = ophi->in(i); 626 JavaObjectNode* ptn = unique_java_object(base); 627 628 if (ptn != nullptr && ptn->scalar_replaceable()) { 629 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 630 selector->set_req(i, sr_obj_idx); 631 number_of_sr_objects++; 632 } 633 } 634 635 // Update the debug information of all safepoints in turn 636 for (uint spi = 0; spi < safepoints->size(); spi++) { 637 SafePointNode* sfpt = safepoints->at(spi)->as_SafePoint(); 638 JVMState *jvms = sfpt->jvms(); 639 uint merge_idx = (sfpt->req() - jvms->scloff()); 640 int debug_start = jvms->debug_start(); 641 642 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 643 smerge->init_req(0, _compile->root()); 644 _igvn->register_new_node_with_optimizer(smerge); 645 646 // The next two inputs are: 647 // (1) A copy of the original pointer to NSR objects. 648 // (2) A selector, used to decide if we need to rematerialize an object 649 // or use the pointer to a NSR object. 650 // See more details of these fields in the declaration of SafePointScalarMergeNode 651 sfpt->add_req(ophi); 652 sfpt->add_req(selector); 653 654 for (uint i = 1; i < ophi->req(); i++) { 655 Node* base = ophi->in(i); 656 JavaObjectNode* ptn = unique_java_object(base); 657 658 // If the base is not scalar replaceable we don't need to register information about 659 // it at this time. 660 if (ptn == nullptr || !ptn->scalar_replaceable()) { 661 continue; 662 } 663 664 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 665 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt); 666 if (sobj == nullptr) { 667 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 668 return; 669 } 670 671 // Now make a pass over the debug information replacing any references 672 // to the allocated object with "sobj" 673 Node* ccpp = alloc->result_cast(); 674 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 675 676 // Register the scalarized object as a candidate for reallocation 677 smerge->add_req(sobj); 678 } 679 680 // Replaces debug information references to "ophi" in "sfpt" with references to "smerge" 681 sfpt->replace_edges_in_range(ophi, smerge, debug_start, jvms->debug_end(), _igvn); 682 683 // The call to 'replace_edges_in_range' above might have removed the 684 // reference to ophi that we need at _merge_pointer_idx. The line below make 685 // sure the reference is maintained. 686 sfpt->set_req(smerge->merge_pointer_idx(jvms), ophi); 687 _igvn->_worklist.push(sfpt); 688 } 689 690 // Now we can change ophi since we don't need to know the types 691 // of the input allocations anymore. 692 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 693 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 694 for (uint i = 1; i < ophi->req(); i++) { 695 Node* base = ophi->in(i); 696 JavaObjectNode* ptn = unique_java_object(base); 697 698 if (ptn != nullptr && ptn->scalar_replaceable()) { 699 new_phi->set_req(i, null_ptr); 700 } else { 701 new_phi->set_req(i, ophi->in(i)); 702 } 703 } 704 705 _igvn->replace_node(ophi, new_phi); 706 _igvn->hash_insert(ophi); 707 _igvn->_worklist.push(ophi); 708 } 709 710 void ConnectionGraph::reduce_phi(PhiNode* ophi) { 711 Unique_Node_List safepoints; 712 713 for (uint i = 0; i < ophi->outcnt(); i++) { 714 Node* use = ophi->raw_out(i); 715 716 // All SafePoint nodes using the same Phi node use the same debug 717 // information (regarding the Phi). Furthermore, reducing the Phi used by a 718 // SafePoint requires changing the Phi. Therefore, I collect all safepoints 719 // and patch them all at once later. 720 if (use->is_SafePoint()) { 721 safepoints.push(use->as_SafePoint()); 722 } else { 723 assert(false, "Unexpected use of reducible Phi."); 724 } 725 } 726 727 if (safepoints.size() > 0) { 728 reduce_phi_on_safepoints(ophi, &safepoints); 729 } 730 } 731 732 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 733 Unique_Node_List ideal_nodes; 734 735 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 736 ideal_nodes.push(root); 737 738 for (uint next = 0; next < ideal_nodes.size(); ++next) { 739 Node* n = ideal_nodes.at(next); 740 741 if (n->is_SafePointScalarMerge()) { 742 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 743 744 // Validate inputs of merge 745 for (uint i = 1; i < merge->req(); i++) { 746 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 747 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 748 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 749 } 750 } 751 752 // Validate users of merge 753 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 754 Node* sfpt = merge->fast_out(i); 755 if (sfpt->is_SafePoint()) { 756 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 757 758 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 759 assert(false, "SafePointScalarMerge nodes can't be nested."); 760 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 761 } 762 } else { 763 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 764 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 765 } 766 } 767 } 768 769 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 770 Node* m = n->fast_out(i); 771 ideal_nodes.push(m); 772 } 773 } 774 } 775 776 // Returns true if there is an object in the scope of sfn that does not escape globally. 777 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 778 Compile* C = _compile; 779 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 780 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 781 DeoptimizeObjectsALot) { 782 // Jvmti agents can access locals. Must provide info about local objects at runtime. 783 int num_locs = jvms->loc_size(); 784 for (int idx = 0; idx < num_locs; idx++) { 785 Node* l = sfn->local(jvms, idx); 786 if (not_global_escape(l)) { 787 return true; 788 } 789 } 790 } 791 if (C->env()->jvmti_can_get_owned_monitor_info() || 792 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 793 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 794 int num_mon = jvms->nof_monitors(); 795 for (int idx = 0; idx < num_mon; idx++) { 796 Node* m = sfn->monitor_obj(jvms, idx); 797 if (m != nullptr && not_global_escape(m)) { 798 return true; 799 } 800 } 801 } 802 } 803 return false; 804 } 805 806 // Returns true if at least one of the arguments to the call is an object 807 // that does not escape globally. 808 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 809 if (call->method() != nullptr) { 810 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 811 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 812 Node* p = call->in(idx); 813 if (not_global_escape(p)) { 814 return true; 815 } 816 } 817 } else { 818 const char* name = call->as_CallStaticJava()->_name; 819 assert(name != nullptr, "no name"); 820 // no arg escapes through uncommon traps 821 if (strcmp(name, "uncommon_trap") != 0) { 822 // process_call_arguments() assumes that all arguments escape globally 823 const TypeTuple* d = call->tf()->domain(); 824 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 825 const Type* at = d->field_at(i); 826 if (at->isa_oopptr() != nullptr) { 827 return true; 828 } 829 } 830 } 831 } 832 return false; 833 } 834 835 836 837 // Utility function for nodes that load an object 838 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 839 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 840 // ThreadLocal has RawPtr type. 841 const Type* t = _igvn->type(n); 842 if (t->make_ptr() != nullptr) { 843 Node* adr = n->in(MemNode::Address); 844 #ifdef ASSERT 845 if (!adr->is_AddP()) { 846 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 847 } else { 848 assert((ptnode_adr(adr->_idx) == nullptr || 849 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 850 } 851 #endif 852 add_local_var_and_edge(n, PointsToNode::NoEscape, 853 adr, delayed_worklist); 854 } 855 } 856 857 // Populate Connection Graph with PointsTo nodes and create simple 858 // connection graph edges. 859 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 860 assert(!_verify, "this method should not be called for verification"); 861 PhaseGVN* igvn = _igvn; 862 uint n_idx = n->_idx; 863 PointsToNode* n_ptn = ptnode_adr(n_idx); 864 if (n_ptn != nullptr) { 865 return; // No need to redefine PointsTo node during first iteration. 866 } 867 int opcode = n->Opcode(); 868 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 869 if (gc_handled) { 870 return; // Ignore node if already handled by GC. 871 } 872 873 if (n->is_Call()) { 874 // Arguments to allocation and locking don't escape. 875 if (n->is_AbstractLock()) { 876 // Put Lock and Unlock nodes on IGVN worklist to process them during 877 // first IGVN optimization when escape information is still available. 878 record_for_optimizer(n); 879 } else if (n->is_Allocate()) { 880 add_call_node(n->as_Call()); 881 record_for_optimizer(n); 882 } else { 883 if (n->is_CallStaticJava()) { 884 const char* name = n->as_CallStaticJava()->_name; 885 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 886 return; // Skip uncommon traps 887 } 888 } 889 // Don't mark as processed since call's arguments have to be processed. 890 delayed_worklist->push(n); 891 // Check if a call returns an object. 892 if ((n->as_Call()->returns_pointer() && 893 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 894 (n->is_CallStaticJava() && 895 n->as_CallStaticJava()->is_boxing_method())) { 896 add_call_node(n->as_Call()); 897 } 898 } 899 return; 900 } 901 // Put this check here to process call arguments since some call nodes 902 // point to phantom_obj. 903 if (n_ptn == phantom_obj || n_ptn == null_obj) { 904 return; // Skip predefined nodes. 905 } 906 switch (opcode) { 907 case Op_AddP: { 908 Node* base = get_addp_base(n); 909 PointsToNode* ptn_base = ptnode_adr(base->_idx); 910 // Field nodes are created for all field types. They are used in 911 // adjust_scalar_replaceable_state() and split_unique_types(). 912 // Note, non-oop fields will have only base edges in Connection 913 // Graph because such fields are not used for oop loads and stores. 914 int offset = address_offset(n, igvn); 915 add_field(n, PointsToNode::NoEscape, offset); 916 if (ptn_base == nullptr) { 917 delayed_worklist->push(n); // Process it later. 918 } else { 919 n_ptn = ptnode_adr(n_idx); 920 add_base(n_ptn->as_Field(), ptn_base); 921 } 922 break; 923 } 924 case Op_CastX2P: { 925 map_ideal_node(n, phantom_obj); 926 break; 927 } 928 case Op_CastPP: 929 case Op_CheckCastPP: 930 case Op_EncodeP: 931 case Op_DecodeN: 932 case Op_EncodePKlass: 933 case Op_DecodeNKlass: { 934 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 935 break; 936 } 937 case Op_CMoveP: { 938 add_local_var(n, PointsToNode::NoEscape); 939 // Do not add edges during first iteration because some could be 940 // not defined yet. 941 delayed_worklist->push(n); 942 break; 943 } 944 case Op_ConP: 945 case Op_ConN: 946 case Op_ConNKlass: { 947 // assume all oop constants globally escape except for null 948 PointsToNode::EscapeState es; 949 const Type* t = igvn->type(n); 950 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 951 es = PointsToNode::NoEscape; 952 } else { 953 es = PointsToNode::GlobalEscape; 954 } 955 PointsToNode* ptn_con = add_java_object(n, es); 956 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 957 break; 958 } 959 case Op_CreateEx: { 960 // assume that all exception objects globally escape 961 map_ideal_node(n, phantom_obj); 962 break; 963 } 964 case Op_LoadKlass: 965 case Op_LoadNKlass: { 966 // Unknown class is loaded 967 map_ideal_node(n, phantom_obj); 968 break; 969 } 970 case Op_LoadP: 971 case Op_LoadN: { 972 add_objload_to_connection_graph(n, delayed_worklist); 973 break; 974 } 975 case Op_Parm: { 976 map_ideal_node(n, phantom_obj); 977 break; 978 } 979 case Op_PartialSubtypeCheck: { 980 // Produces Null or notNull and is used in only in CmpP so 981 // phantom_obj could be used. 982 map_ideal_node(n, phantom_obj); // Result is unknown 983 break; 984 } 985 case Op_Phi: { 986 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 987 // ThreadLocal has RawPtr type. 988 const Type* t = n->as_Phi()->type(); 989 if (t->make_ptr() != nullptr) { 990 add_local_var(n, PointsToNode::NoEscape); 991 // Do not add edges during first iteration because some could be 992 // not defined yet. 993 delayed_worklist->push(n); 994 } 995 break; 996 } 997 case Op_Proj: { 998 // we are only interested in the oop result projection from a call 999 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1000 n->in(0)->as_Call()->returns_pointer()) { 1001 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1002 } 1003 break; 1004 } 1005 case Op_Rethrow: // Exception object escapes 1006 case Op_Return: { 1007 if (n->req() > TypeFunc::Parms && 1008 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1009 // Treat Return value as LocalVar with GlobalEscape escape state. 1010 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1011 } 1012 break; 1013 } 1014 case Op_CompareAndExchangeP: 1015 case Op_CompareAndExchangeN: 1016 case Op_GetAndSetP: 1017 case Op_GetAndSetN: { 1018 add_objload_to_connection_graph(n, delayed_worklist); 1019 // fall-through 1020 } 1021 case Op_StoreP: 1022 case Op_StoreN: 1023 case Op_StoreNKlass: 1024 case Op_WeakCompareAndSwapP: 1025 case Op_WeakCompareAndSwapN: 1026 case Op_CompareAndSwapP: 1027 case Op_CompareAndSwapN: { 1028 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1029 break; 1030 } 1031 case Op_AryEq: 1032 case Op_CountPositives: 1033 case Op_StrComp: 1034 case Op_StrEquals: 1035 case Op_StrIndexOf: 1036 case Op_StrIndexOfChar: 1037 case Op_StrInflatedCopy: 1038 case Op_StrCompressedCopy: 1039 case Op_VectorizedHashCode: 1040 case Op_EncodeISOArray: { 1041 add_local_var(n, PointsToNode::ArgEscape); 1042 delayed_worklist->push(n); // Process it later. 1043 break; 1044 } 1045 case Op_ThreadLocal: { 1046 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1047 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1048 break; 1049 } 1050 case Op_Blackhole: { 1051 // All blackhole pointer arguments are globally escaping. 1052 // Only do this if there is at least one pointer argument. 1053 // Do not add edges during first iteration because some could be 1054 // not defined yet, defer to final step. 1055 for (uint i = 0; i < n->req(); i++) { 1056 Node* in = n->in(i); 1057 if (in != nullptr) { 1058 const Type* at = _igvn->type(in); 1059 if (!at->isa_ptr()) continue; 1060 1061 add_local_var(n, PointsToNode::GlobalEscape); 1062 delayed_worklist->push(n); 1063 break; 1064 } 1065 } 1066 break; 1067 } 1068 default: 1069 ; // Do nothing for nodes not related to EA. 1070 } 1071 return; 1072 } 1073 1074 // Add final simple edges to graph. 1075 void ConnectionGraph::add_final_edges(Node *n) { 1076 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1077 #ifdef ASSERT 1078 if (_verify && n_ptn->is_JavaObject()) 1079 return; // This method does not change graph for JavaObject. 1080 #endif 1081 1082 if (n->is_Call()) { 1083 process_call_arguments(n->as_Call()); 1084 return; 1085 } 1086 assert(n->is_Store() || n->is_LoadStore() || 1087 (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr), 1088 "node should be registered already"); 1089 int opcode = n->Opcode(); 1090 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1091 if (gc_handled) { 1092 return; // Ignore node if already handled by GC. 1093 } 1094 switch (opcode) { 1095 case Op_AddP: { 1096 Node* base = get_addp_base(n); 1097 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1098 assert(ptn_base != nullptr, "field's base should be registered"); 1099 add_base(n_ptn->as_Field(), ptn_base); 1100 break; 1101 } 1102 case Op_CastPP: 1103 case Op_CheckCastPP: 1104 case Op_EncodeP: 1105 case Op_DecodeN: 1106 case Op_EncodePKlass: 1107 case Op_DecodeNKlass: { 1108 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1109 break; 1110 } 1111 case Op_CMoveP: { 1112 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1113 Node* in = n->in(i); 1114 if (in == nullptr) { 1115 continue; // ignore null 1116 } 1117 Node* uncast_in = in->uncast(); 1118 if (uncast_in->is_top() || uncast_in == n) { 1119 continue; // ignore top or inputs which go back this node 1120 } 1121 PointsToNode* ptn = ptnode_adr(in->_idx); 1122 assert(ptn != nullptr, "node should be registered"); 1123 add_edge(n_ptn, ptn); 1124 } 1125 break; 1126 } 1127 case Op_LoadP: 1128 case Op_LoadN: { 1129 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1130 // ThreadLocal has RawPtr type. 1131 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1132 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1133 break; 1134 } 1135 case Op_Phi: { 1136 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1137 // ThreadLocal has RawPtr type. 1138 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1139 for (uint i = 1; i < n->req(); i++) { 1140 Node* in = n->in(i); 1141 if (in == nullptr) { 1142 continue; // ignore null 1143 } 1144 Node* uncast_in = in->uncast(); 1145 if (uncast_in->is_top() || uncast_in == n) { 1146 continue; // ignore top or inputs which go back this node 1147 } 1148 PointsToNode* ptn = ptnode_adr(in->_idx); 1149 assert(ptn != nullptr, "node should be registered"); 1150 add_edge(n_ptn, ptn); 1151 } 1152 break; 1153 } 1154 case Op_Proj: { 1155 // we are only interested in the oop result projection from a call 1156 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1157 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); 1158 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1159 break; 1160 } 1161 case Op_Rethrow: // Exception object escapes 1162 case Op_Return: { 1163 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1164 "Unexpected node type"); 1165 // Treat Return value as LocalVar with GlobalEscape escape state. 1166 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1167 break; 1168 } 1169 case Op_CompareAndExchangeP: 1170 case Op_CompareAndExchangeN: 1171 case Op_GetAndSetP: 1172 case Op_GetAndSetN:{ 1173 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1174 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1175 // fall-through 1176 } 1177 case Op_CompareAndSwapP: 1178 case Op_CompareAndSwapN: 1179 case Op_WeakCompareAndSwapP: 1180 case Op_WeakCompareAndSwapN: 1181 case Op_StoreP: 1182 case Op_StoreN: 1183 case Op_StoreNKlass:{ 1184 add_final_edges_unsafe_access(n, opcode); 1185 break; 1186 } 1187 case Op_VectorizedHashCode: 1188 case Op_AryEq: 1189 case Op_CountPositives: 1190 case Op_StrComp: 1191 case Op_StrEquals: 1192 case Op_StrIndexOf: 1193 case Op_StrIndexOfChar: 1194 case Op_StrInflatedCopy: 1195 case Op_StrCompressedCopy: 1196 case Op_EncodeISOArray: { 1197 // char[]/byte[] arrays passed to string intrinsic do not escape but 1198 // they are not scalar replaceable. Adjust escape state for them. 1199 // Start from in(2) edge since in(1) is memory edge. 1200 for (uint i = 2; i < n->req(); i++) { 1201 Node* adr = n->in(i); 1202 const Type* at = _igvn->type(adr); 1203 if (!adr->is_top() && at->isa_ptr()) { 1204 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1205 at->isa_ptr() != nullptr, "expecting a pointer"); 1206 if (adr->is_AddP()) { 1207 adr = get_addp_base(adr); 1208 } 1209 PointsToNode* ptn = ptnode_adr(adr->_idx); 1210 assert(ptn != nullptr, "node should be registered"); 1211 add_edge(n_ptn, ptn); 1212 } 1213 } 1214 break; 1215 } 1216 case Op_Blackhole: { 1217 // All blackhole pointer arguments are globally escaping. 1218 for (uint i = 0; i < n->req(); i++) { 1219 Node* in = n->in(i); 1220 if (in != nullptr) { 1221 const Type* at = _igvn->type(in); 1222 if (!at->isa_ptr()) continue; 1223 1224 if (in->is_AddP()) { 1225 in = get_addp_base(in); 1226 } 1227 1228 PointsToNode* ptn = ptnode_adr(in->_idx); 1229 assert(ptn != nullptr, "should be defined already"); 1230 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1231 add_edge(n_ptn, ptn); 1232 } 1233 } 1234 break; 1235 } 1236 default: { 1237 // This method should be called only for EA specific nodes which may 1238 // miss some edges when they were created. 1239 #ifdef ASSERT 1240 n->dump(1); 1241 #endif 1242 guarantee(false, "unknown node"); 1243 } 1244 } 1245 return; 1246 } 1247 1248 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1249 Node* adr = n->in(MemNode::Address); 1250 const Type* adr_type = _igvn->type(adr); 1251 adr_type = adr_type->make_ptr(); 1252 if (adr_type == nullptr) { 1253 return; // skip dead nodes 1254 } 1255 if (adr_type->isa_oopptr() 1256 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1257 && adr_type == TypeRawPtr::NOTNULL 1258 && is_captured_store_address(adr))) { 1259 delayed_worklist->push(n); // Process it later. 1260 #ifdef ASSERT 1261 assert (adr->is_AddP(), "expecting an AddP"); 1262 if (adr_type == TypeRawPtr::NOTNULL) { 1263 // Verify a raw address for a store captured by Initialize node. 1264 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1265 assert(offs != Type::OffsetBot, "offset must be a constant"); 1266 } 1267 #endif 1268 } else { 1269 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1270 if (adr->is_BoxLock()) { 1271 return; 1272 } 1273 // Stored value escapes in unsafe access. 1274 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1275 delayed_worklist->push(n); // Process unsafe access later. 1276 return; 1277 } 1278 #ifdef ASSERT 1279 n->dump(1); 1280 assert(false, "not unsafe"); 1281 #endif 1282 } 1283 } 1284 1285 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1286 Node* adr = n->in(MemNode::Address); 1287 const Type *adr_type = _igvn->type(adr); 1288 adr_type = adr_type->make_ptr(); 1289 #ifdef ASSERT 1290 if (adr_type == nullptr) { 1291 n->dump(1); 1292 assert(adr_type != nullptr, "dead node should not be on list"); 1293 return true; 1294 } 1295 #endif 1296 1297 if (adr_type->isa_oopptr() 1298 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1299 && adr_type == TypeRawPtr::NOTNULL 1300 && is_captured_store_address(adr))) { 1301 // Point Address to Value 1302 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1303 assert(adr_ptn != nullptr && 1304 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1305 Node* val = n->in(MemNode::ValueIn); 1306 PointsToNode* ptn = ptnode_adr(val->_idx); 1307 assert(ptn != nullptr, "node should be registered"); 1308 add_edge(adr_ptn, ptn); 1309 return true; 1310 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1311 // Stored value escapes in unsafe access. 1312 Node* val = n->in(MemNode::ValueIn); 1313 PointsToNode* ptn = ptnode_adr(val->_idx); 1314 assert(ptn != nullptr, "node should be registered"); 1315 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1316 // Add edge to object for unsafe access with offset. 1317 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1318 assert(adr_ptn != nullptr, "node should be registered"); 1319 if (adr_ptn->is_Field()) { 1320 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1321 add_edge(adr_ptn, ptn); 1322 } 1323 return true; 1324 } 1325 #ifdef ASSERT 1326 n->dump(1); 1327 assert(false, "not unsafe"); 1328 #endif 1329 return false; 1330 } 1331 1332 void ConnectionGraph::add_call_node(CallNode* call) { 1333 assert(call->returns_pointer(), "only for call which returns pointer"); 1334 uint call_idx = call->_idx; 1335 if (call->is_Allocate()) { 1336 Node* k = call->in(AllocateNode::KlassNode); 1337 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1338 assert(kt != nullptr, "TypeKlassPtr required."); 1339 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1340 bool scalar_replaceable = true; 1341 NOT_PRODUCT(const char* nsr_reason = ""); 1342 if (call->is_AllocateArray()) { 1343 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1344 es = PointsToNode::GlobalEscape; 1345 } else { 1346 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1347 if (length < 0) { 1348 // Not scalar replaceable if the length is not constant. 1349 scalar_replaceable = false; 1350 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1351 } else if (length > EliminateAllocationArraySizeLimit) { 1352 // Not scalar replaceable if the length is too big. 1353 scalar_replaceable = false; 1354 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1355 } 1356 } 1357 } else { // Allocate instance 1358 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1359 es = PointsToNode::GlobalEscape; 1360 } else { 1361 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1362 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 1363 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 1364 ik->is_subclass_of(_compile->env()->Reference_klass()) || 1365 !ik->can_be_instantiated() || 1366 ik->has_finalizer()) { 1367 es = PointsToNode::GlobalEscape; 1368 } else { 1369 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 1370 if (nfields > EliminateAllocationFieldsLimit) { 1371 // Not scalar replaceable if there are too many fields. 1372 scalar_replaceable = false; 1373 NOT_PRODUCT(nsr_reason = "has too many fields"); 1374 } 1375 } 1376 } 1377 } 1378 add_java_object(call, es); 1379 PointsToNode* ptn = ptnode_adr(call_idx); 1380 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1381 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1382 } 1383 } else if (call->is_CallStaticJava()) { 1384 // Call nodes could be different types: 1385 // 1386 // 1. CallDynamicJavaNode (what happened during call is unknown): 1387 // 1388 // - mapped to GlobalEscape JavaObject node if oop is returned; 1389 // 1390 // - all oop arguments are escaping globally; 1391 // 1392 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1393 // 1394 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1395 // 1396 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1397 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1398 // during call is returned; 1399 // - mapped to ArgEscape LocalVar node pointed to object arguments 1400 // which are returned and does not escape during call; 1401 // 1402 // - oop arguments escaping status is defined by bytecode analysis; 1403 // 1404 // For a static call, we know exactly what method is being called. 1405 // Use bytecode estimator to record whether the call's return value escapes. 1406 ciMethod* meth = call->as_CallJava()->method(); 1407 if (meth == nullptr) { 1408 const char* name = call->as_CallStaticJava()->_name; 1409 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 1410 // Returns a newly allocated non-escaped object. 1411 add_java_object(call, PointsToNode::NoEscape); 1412 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1413 } else if (meth->is_boxing_method()) { 1414 // Returns boxing object 1415 PointsToNode::EscapeState es; 1416 vmIntrinsics::ID intr = meth->intrinsic_id(); 1417 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1418 // It does not escape if object is always allocated. 1419 es = PointsToNode::NoEscape; 1420 } else { 1421 // It escapes globally if object could be loaded from cache. 1422 es = PointsToNode::GlobalEscape; 1423 } 1424 add_java_object(call, es); 1425 if (es == PointsToNode::GlobalEscape) { 1426 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 1427 } 1428 } else { 1429 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1430 call_analyzer->copy_dependencies(_compile->dependencies()); 1431 if (call_analyzer->is_return_allocated()) { 1432 // Returns a newly allocated non-escaped object, simply 1433 // update dependency information. 1434 // Mark it as NoEscape so that objects referenced by 1435 // it's fields will be marked as NoEscape at least. 1436 add_java_object(call, PointsToNode::NoEscape); 1437 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1438 } else { 1439 // Determine whether any arguments are returned. 1440 const TypeTuple* d = call->tf()->domain(); 1441 bool ret_arg = false; 1442 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1443 if (d->field_at(i)->isa_ptr() != nullptr && 1444 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1445 ret_arg = true; 1446 break; 1447 } 1448 } 1449 if (ret_arg) { 1450 add_local_var(call, PointsToNode::ArgEscape); 1451 } else { 1452 // Returns unknown object. 1453 map_ideal_node(call, phantom_obj); 1454 } 1455 } 1456 } 1457 } else { 1458 // An other type of call, assume the worst case: 1459 // returned value is unknown and globally escapes. 1460 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1461 map_ideal_node(call, phantom_obj); 1462 } 1463 } 1464 1465 void ConnectionGraph::process_call_arguments(CallNode *call) { 1466 bool is_arraycopy = false; 1467 switch (call->Opcode()) { 1468 #ifdef ASSERT 1469 case Op_Allocate: 1470 case Op_AllocateArray: 1471 case Op_Lock: 1472 case Op_Unlock: 1473 assert(false, "should be done already"); 1474 break; 1475 #endif 1476 case Op_ArrayCopy: 1477 case Op_CallLeafNoFP: 1478 // Most array copies are ArrayCopy nodes at this point but there 1479 // are still a few direct calls to the copy subroutines (See 1480 // PhaseStringOpts::copy_string()) 1481 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1482 call->as_CallLeaf()->is_call_to_arraycopystub(); 1483 // fall through 1484 case Op_CallLeafVector: 1485 case Op_CallLeaf: { 1486 // Stub calls, objects do not escape but they are not scale replaceable. 1487 // Adjust escape state for outgoing arguments. 1488 const TypeTuple * d = call->tf()->domain(); 1489 bool src_has_oops = false; 1490 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1491 const Type* at = d->field_at(i); 1492 Node *arg = call->in(i); 1493 if (arg == nullptr) { 1494 continue; 1495 } 1496 const Type *aat = _igvn->type(arg); 1497 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1498 continue; 1499 } 1500 if (arg->is_AddP()) { 1501 // 1502 // The inline_native_clone() case when the arraycopy stub is called 1503 // after the allocation before Initialize and CheckCastPP nodes. 1504 // Or normal arraycopy for object arrays case. 1505 // 1506 // Set AddP's base (Allocate) as not scalar replaceable since 1507 // pointer to the base (with offset) is passed as argument. 1508 // 1509 arg = get_addp_base(arg); 1510 } 1511 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1512 assert(arg_ptn != nullptr, "should be registered"); 1513 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1514 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1515 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1516 aat->isa_ptr() != nullptr, "expecting an Ptr"); 1517 bool arg_has_oops = aat->isa_oopptr() && 1518 (aat->isa_instptr() || 1519 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr))); 1520 if (i == TypeFunc::Parms) { 1521 src_has_oops = arg_has_oops; 1522 } 1523 // 1524 // src or dst could be j.l.Object when other is basic type array: 1525 // 1526 // arraycopy(char[],0,Object*,0,size); 1527 // arraycopy(Object*,0,char[],0,size); 1528 // 1529 // Don't add edges in such cases. 1530 // 1531 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1532 arg_has_oops && (i > TypeFunc::Parms); 1533 #ifdef ASSERT 1534 if (!(is_arraycopy || 1535 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1536 (call->as_CallLeaf()->_name != nullptr && 1537 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1538 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1539 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1540 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1541 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1542 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1543 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1544 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1545 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1546 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1547 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1548 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1549 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1550 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1551 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1552 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1553 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1554 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1555 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1556 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1557 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1558 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1559 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1560 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1561 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1562 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1563 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1564 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1565 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1566 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1567 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1568 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1569 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1570 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1571 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1572 ))) { 1573 call->dump(); 1574 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1575 } 1576 #endif 1577 // Always process arraycopy's destination object since 1578 // we need to add all possible edges to references in 1579 // source object. 1580 if (arg_esc >= PointsToNode::ArgEscape && 1581 !arg_is_arraycopy_dest) { 1582 continue; 1583 } 1584 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1585 if (call->is_ArrayCopy()) { 1586 ArrayCopyNode* ac = call->as_ArrayCopy(); 1587 if (ac->is_clonebasic() || 1588 ac->is_arraycopy_validated() || 1589 ac->is_copyof_validated() || 1590 ac->is_copyofrange_validated()) { 1591 es = PointsToNode::NoEscape; 1592 } 1593 } 1594 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1595 if (arg_is_arraycopy_dest) { 1596 Node* src = call->in(TypeFunc::Parms); 1597 if (src->is_AddP()) { 1598 src = get_addp_base(src); 1599 } 1600 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1601 assert(src_ptn != nullptr, "should be registered"); 1602 if (arg_ptn != src_ptn) { 1603 // Special arraycopy edge: 1604 // A destination object's field can't have the source object 1605 // as base since objects escape states are not related. 1606 // Only escape state of destination object's fields affects 1607 // escape state of fields in source object. 1608 add_arraycopy(call, es, src_ptn, arg_ptn); 1609 } 1610 } 1611 } 1612 } 1613 break; 1614 } 1615 case Op_CallStaticJava: { 1616 // For a static call, we know exactly what method is being called. 1617 // Use bytecode estimator to record the call's escape affects 1618 #ifdef ASSERT 1619 const char* name = call->as_CallStaticJava()->_name; 1620 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1621 #endif 1622 ciMethod* meth = call->as_CallJava()->method(); 1623 if ((meth != nullptr) && meth->is_boxing_method()) { 1624 break; // Boxing methods do not modify any oops. 1625 } 1626 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 1627 // fall-through if not a Java method or no analyzer information 1628 if (call_analyzer != nullptr) { 1629 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1630 const TypeTuple* d = call->tf()->domain(); 1631 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1632 const Type* at = d->field_at(i); 1633 int k = i - TypeFunc::Parms; 1634 Node* arg = call->in(i); 1635 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1636 if (at->isa_ptr() != nullptr && 1637 call_analyzer->is_arg_returned(k)) { 1638 // The call returns arguments. 1639 if (call_ptn != nullptr) { // Is call's result used? 1640 assert(call_ptn->is_LocalVar(), "node should be registered"); 1641 assert(arg_ptn != nullptr, "node should be registered"); 1642 add_edge(call_ptn, arg_ptn); 1643 } 1644 } 1645 if (at->isa_oopptr() != nullptr && 1646 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1647 if (!call_analyzer->is_arg_stack(k)) { 1648 // The argument global escapes 1649 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1650 } else { 1651 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1652 if (!call_analyzer->is_arg_local(k)) { 1653 // The argument itself doesn't escape, but any fields might 1654 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1655 } 1656 } 1657 } 1658 } 1659 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 1660 // The call returns arguments. 1661 assert(call_ptn->edge_count() > 0, "sanity"); 1662 if (!call_analyzer->is_return_local()) { 1663 // Returns also unknown object. 1664 add_edge(call_ptn, phantom_obj); 1665 } 1666 } 1667 break; 1668 } 1669 } 1670 default: { 1671 // Fall-through here if not a Java method or no analyzer information 1672 // or some other type of call, assume the worst case: all arguments 1673 // globally escape. 1674 const TypeTuple* d = call->tf()->domain(); 1675 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1676 const Type* at = d->field_at(i); 1677 if (at->isa_oopptr() != nullptr) { 1678 Node* arg = call->in(i); 1679 if (arg->is_AddP()) { 1680 arg = get_addp_base(arg); 1681 } 1682 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 1683 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1684 } 1685 } 1686 } 1687 } 1688 } 1689 1690 1691 // Finish Graph construction. 1692 bool ConnectionGraph::complete_connection_graph( 1693 GrowableArray<PointsToNode*>& ptnodes_worklist, 1694 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1695 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1696 GrowableArray<FieldNode*>& oop_fields_worklist) { 1697 // Normally only 1-3 passes needed to build Connection Graph depending 1698 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1699 // Set limit to 20 to catch situation when something did go wrong and 1700 // bailout Escape Analysis. 1701 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1702 #define GRAPH_BUILD_ITER_LIMIT 20 1703 1704 // Propagate GlobalEscape and ArgEscape escape states and check that 1705 // we still have non-escaping objects. The method pushs on _worklist 1706 // Field nodes which reference phantom_object. 1707 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1708 return false; // Nothing to do. 1709 } 1710 // Now propagate references to all JavaObject nodes. 1711 int java_objects_length = java_objects_worklist.length(); 1712 elapsedTimer build_time; 1713 build_time.start(); 1714 elapsedTimer time; 1715 bool timeout = false; 1716 int new_edges = 1; 1717 int iterations = 0; 1718 do { 1719 while ((new_edges > 0) && 1720 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1721 double start_time = time.seconds(); 1722 time.start(); 1723 new_edges = 0; 1724 // Propagate references to phantom_object for nodes pushed on _worklist 1725 // by find_non_escaped_objects() and find_field_value(). 1726 new_edges += add_java_object_edges(phantom_obj, false); 1727 for (int next = 0; next < java_objects_length; ++next) { 1728 JavaObjectNode* ptn = java_objects_worklist.at(next); 1729 new_edges += add_java_object_edges(ptn, true); 1730 1731 #define SAMPLE_SIZE 4 1732 if ((next % SAMPLE_SIZE) == 0) { 1733 // Each 4 iterations calculate how much time it will take 1734 // to complete graph construction. 1735 time.stop(); 1736 // Poll for requests from shutdown mechanism to quiesce compiler 1737 // because Connection graph construction may take long time. 1738 CompileBroker::maybe_block(); 1739 double stop_time = time.seconds(); 1740 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1741 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1742 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1743 timeout = true; 1744 break; // Timeout 1745 } 1746 start_time = stop_time; 1747 time.start(); 1748 } 1749 #undef SAMPLE_SIZE 1750 1751 } 1752 if (timeout) break; 1753 if (new_edges > 0) { 1754 // Update escape states on each iteration if graph was updated. 1755 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1756 return false; // Nothing to do. 1757 } 1758 } 1759 time.stop(); 1760 if (time.seconds() >= EscapeAnalysisTimeout) { 1761 timeout = true; 1762 break; 1763 } 1764 } 1765 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1766 time.start(); 1767 // Find fields which have unknown value. 1768 int fields_length = oop_fields_worklist.length(); 1769 for (int next = 0; next < fields_length; next++) { 1770 FieldNode* field = oop_fields_worklist.at(next); 1771 if (field->edge_count() == 0) { 1772 new_edges += find_field_value(field); 1773 // This code may added new edges to phantom_object. 1774 // Need an other cycle to propagate references to phantom_object. 1775 } 1776 } 1777 time.stop(); 1778 if (time.seconds() >= EscapeAnalysisTimeout) { 1779 timeout = true; 1780 break; 1781 } 1782 } else { 1783 new_edges = 0; // Bailout 1784 } 1785 } while (new_edges > 0); 1786 1787 build_time.stop(); 1788 _build_time = build_time.seconds(); 1789 _build_iterations = iterations; 1790 1791 // Bailout if passed limits. 1792 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1793 Compile* C = _compile; 1794 if (C->log() != nullptr) { 1795 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1796 C->log()->text("%s", timeout ? "time" : "iterations"); 1797 C->log()->end_elem(" limit'"); 1798 } 1799 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1800 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1801 // Possible infinite build_connection_graph loop, 1802 // bailout (no changes to ideal graph were made). 1803 return false; 1804 } 1805 1806 #undef GRAPH_BUILD_ITER_LIMIT 1807 1808 // Find fields initialized by null for non-escaping Allocations. 1809 int non_escaped_length = non_escaped_allocs_worklist.length(); 1810 for (int next = 0; next < non_escaped_length; next++) { 1811 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1812 PointsToNode::EscapeState es = ptn->escape_state(); 1813 assert(es <= PointsToNode::ArgEscape, "sanity"); 1814 if (es == PointsToNode::NoEscape) { 1815 if (find_init_values_null(ptn, _igvn) > 0) { 1816 // Adding references to null object does not change escape states 1817 // since it does not escape. Also no fields are added to null object. 1818 add_java_object_edges(null_obj, false); 1819 } 1820 } 1821 Node* n = ptn->ideal_node(); 1822 if (n->is_Allocate()) { 1823 // The object allocated by this Allocate node will never be 1824 // seen by an other thread. Mark it so that when it is 1825 // expanded no MemBarStoreStore is added. 1826 InitializeNode* ini = n->as_Allocate()->initialization(); 1827 if (ini != nullptr) 1828 ini->set_does_not_escape(); 1829 } 1830 } 1831 return true; // Finished graph construction. 1832 } 1833 1834 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1835 // and check that we still have non-escaping java objects. 1836 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1837 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1838 GrowableArray<PointsToNode*> escape_worklist; 1839 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1840 int ptnodes_length = ptnodes_worklist.length(); 1841 for (int next = 0; next < ptnodes_length; ++next) { 1842 PointsToNode* ptn = ptnodes_worklist.at(next); 1843 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1844 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1845 escape_worklist.push(ptn); 1846 } 1847 } 1848 // Set escape states to referenced nodes (edges list). 1849 while (escape_worklist.length() > 0) { 1850 PointsToNode* ptn = escape_worklist.pop(); 1851 PointsToNode::EscapeState es = ptn->escape_state(); 1852 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1853 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1854 es >= PointsToNode::ArgEscape) { 1855 // GlobalEscape or ArgEscape state of field means it has unknown value. 1856 if (add_edge(ptn, phantom_obj)) { 1857 // New edge was added 1858 add_field_uses_to_worklist(ptn->as_Field()); 1859 } 1860 } 1861 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1862 PointsToNode* e = i.get(); 1863 if (e->is_Arraycopy()) { 1864 assert(ptn->arraycopy_dst(), "sanity"); 1865 // Propagate only fields escape state through arraycopy edge. 1866 if (e->fields_escape_state() < field_es) { 1867 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1868 escape_worklist.push(e); 1869 } 1870 } else if (es >= field_es) { 1871 // fields_escape_state is also set to 'es' if it is less than 'es'. 1872 if (e->escape_state() < es) { 1873 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1874 escape_worklist.push(e); 1875 } 1876 } else { 1877 // Propagate field escape state. 1878 bool es_changed = false; 1879 if (e->fields_escape_state() < field_es) { 1880 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1881 es_changed = true; 1882 } 1883 if ((e->escape_state() < field_es) && 1884 e->is_Field() && ptn->is_JavaObject() && 1885 e->as_Field()->is_oop()) { 1886 // Change escape state of referenced fields. 1887 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1888 es_changed = true; 1889 } else if (e->escape_state() < es) { 1890 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1891 es_changed = true; 1892 } 1893 if (es_changed) { 1894 escape_worklist.push(e); 1895 } 1896 } 1897 } 1898 } 1899 // Remove escaped objects from non_escaped list. 1900 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1901 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1902 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1903 non_escaped_allocs_worklist.delete_at(next); 1904 } 1905 if (ptn->escape_state() == PointsToNode::NoEscape) { 1906 // Find fields in non-escaped allocations which have unknown value. 1907 find_init_values_phantom(ptn); 1908 } 1909 } 1910 return (non_escaped_allocs_worklist.length() > 0); 1911 } 1912 1913 // Add all references to JavaObject node by walking over all uses. 1914 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1915 int new_edges = 0; 1916 if (populate_worklist) { 1917 // Populate _worklist by uses of jobj's uses. 1918 for (UseIterator i(jobj); i.has_next(); i.next()) { 1919 PointsToNode* use = i.get(); 1920 if (use->is_Arraycopy()) { 1921 continue; 1922 } 1923 add_uses_to_worklist(use); 1924 if (use->is_Field() && use->as_Field()->is_oop()) { 1925 // Put on worklist all field's uses (loads) and 1926 // related field nodes (same base and offset). 1927 add_field_uses_to_worklist(use->as_Field()); 1928 } 1929 } 1930 } 1931 for (int l = 0; l < _worklist.length(); l++) { 1932 PointsToNode* use = _worklist.at(l); 1933 if (PointsToNode::is_base_use(use)) { 1934 // Add reference from jobj to field and from field to jobj (field's base). 1935 use = PointsToNode::get_use_node(use)->as_Field(); 1936 if (add_base(use->as_Field(), jobj)) { 1937 new_edges++; 1938 } 1939 continue; 1940 } 1941 assert(!use->is_JavaObject(), "sanity"); 1942 if (use->is_Arraycopy()) { 1943 if (jobj == null_obj) { // null object does not have field edges 1944 continue; 1945 } 1946 // Added edge from Arraycopy node to arraycopy's source java object 1947 if (add_edge(use, jobj)) { 1948 jobj->set_arraycopy_src(); 1949 new_edges++; 1950 } 1951 // and stop here. 1952 continue; 1953 } 1954 if (!add_edge(use, jobj)) { 1955 continue; // No new edge added, there was such edge already. 1956 } 1957 new_edges++; 1958 if (use->is_LocalVar()) { 1959 add_uses_to_worklist(use); 1960 if (use->arraycopy_dst()) { 1961 for (EdgeIterator i(use); i.has_next(); i.next()) { 1962 PointsToNode* e = i.get(); 1963 if (e->is_Arraycopy()) { 1964 if (jobj == null_obj) { // null object does not have field edges 1965 continue; 1966 } 1967 // Add edge from arraycopy's destination java object to Arraycopy node. 1968 if (add_edge(jobj, e)) { 1969 new_edges++; 1970 jobj->set_arraycopy_dst(); 1971 } 1972 } 1973 } 1974 } 1975 } else { 1976 // Added new edge to stored in field values. 1977 // Put on worklist all field's uses (loads) and 1978 // related field nodes (same base and offset). 1979 add_field_uses_to_worklist(use->as_Field()); 1980 } 1981 } 1982 _worklist.clear(); 1983 _in_worklist.reset(); 1984 return new_edges; 1985 } 1986 1987 // Put on worklist all related field nodes. 1988 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1989 assert(field->is_oop(), "sanity"); 1990 int offset = field->offset(); 1991 add_uses_to_worklist(field); 1992 // Loop over all bases of this field and push on worklist Field nodes 1993 // with the same offset and base (since they may reference the same field). 1994 for (BaseIterator i(field); i.has_next(); i.next()) { 1995 PointsToNode* base = i.get(); 1996 add_fields_to_worklist(field, base); 1997 // Check if the base was source object of arraycopy and go over arraycopy's 1998 // destination objects since values stored to a field of source object are 1999 // accessible by uses (loads) of fields of destination objects. 2000 if (base->arraycopy_src()) { 2001 for (UseIterator j(base); j.has_next(); j.next()) { 2002 PointsToNode* arycp = j.get(); 2003 if (arycp->is_Arraycopy()) { 2004 for (UseIterator k(arycp); k.has_next(); k.next()) { 2005 PointsToNode* abase = k.get(); 2006 if (abase->arraycopy_dst() && abase != base) { 2007 // Look for the same arraycopy reference. 2008 add_fields_to_worklist(field, abase); 2009 } 2010 } 2011 } 2012 } 2013 } 2014 } 2015 } 2016 2017 // Put on worklist all related field nodes. 2018 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2019 int offset = field->offset(); 2020 if (base->is_LocalVar()) { 2021 for (UseIterator j(base); j.has_next(); j.next()) { 2022 PointsToNode* f = j.get(); 2023 if (PointsToNode::is_base_use(f)) { // Field 2024 f = PointsToNode::get_use_node(f); 2025 if (f == field || !f->as_Field()->is_oop()) { 2026 continue; 2027 } 2028 int offs = f->as_Field()->offset(); 2029 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2030 add_to_worklist(f); 2031 } 2032 } 2033 } 2034 } else { 2035 assert(base->is_JavaObject(), "sanity"); 2036 if (// Skip phantom_object since it is only used to indicate that 2037 // this field's content globally escapes. 2038 (base != phantom_obj) && 2039 // null object node does not have fields. 2040 (base != null_obj)) { 2041 for (EdgeIterator i(base); i.has_next(); i.next()) { 2042 PointsToNode* f = i.get(); 2043 // Skip arraycopy edge since store to destination object field 2044 // does not update value in source object field. 2045 if (f->is_Arraycopy()) { 2046 assert(base->arraycopy_dst(), "sanity"); 2047 continue; 2048 } 2049 if (f == field || !f->as_Field()->is_oop()) { 2050 continue; 2051 } 2052 int offs = f->as_Field()->offset(); 2053 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2054 add_to_worklist(f); 2055 } 2056 } 2057 } 2058 } 2059 } 2060 2061 // Find fields which have unknown value. 2062 int ConnectionGraph::find_field_value(FieldNode* field) { 2063 // Escaped fields should have init value already. 2064 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2065 int new_edges = 0; 2066 for (BaseIterator i(field); i.has_next(); i.next()) { 2067 PointsToNode* base = i.get(); 2068 if (base->is_JavaObject()) { 2069 // Skip Allocate's fields which will be processed later. 2070 if (base->ideal_node()->is_Allocate()) { 2071 return 0; 2072 } 2073 assert(base == null_obj, "only null ptr base expected here"); 2074 } 2075 } 2076 if (add_edge(field, phantom_obj)) { 2077 // New edge was added 2078 new_edges++; 2079 add_field_uses_to_worklist(field); 2080 } 2081 return new_edges; 2082 } 2083 2084 // Find fields initializing values for allocations. 2085 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2086 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2087 Node* alloc = pta->ideal_node(); 2088 2089 // Do nothing for Allocate nodes since its fields values are 2090 // "known" unless they are initialized by arraycopy/clone. 2091 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2092 return 0; 2093 } 2094 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 2095 #ifdef ASSERT 2096 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { 2097 const char* name = alloc->as_CallStaticJava()->_name; 2098 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 2099 } 2100 #endif 2101 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2102 int new_edges = 0; 2103 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2104 PointsToNode* field = i.get(); 2105 if (field->is_Field() && field->as_Field()->is_oop()) { 2106 if (add_edge(field, phantom_obj)) { 2107 // New edge was added 2108 new_edges++; 2109 add_field_uses_to_worklist(field->as_Field()); 2110 } 2111 } 2112 } 2113 return new_edges; 2114 } 2115 2116 // Find fields initializing values for allocations. 2117 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2118 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2119 Node* alloc = pta->ideal_node(); 2120 // Do nothing for Call nodes since its fields values are unknown. 2121 if (!alloc->is_Allocate()) { 2122 return 0; 2123 } 2124 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2125 bool visited_bottom_offset = false; 2126 GrowableArray<int> offsets_worklist; 2127 int new_edges = 0; 2128 2129 // Check if an oop field's initializing value is recorded and add 2130 // a corresponding null if field's value if it is not recorded. 2131 // Connection Graph does not record a default initialization by null 2132 // captured by Initialize node. 2133 // 2134 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2135 PointsToNode* field = i.get(); // Field (AddP) 2136 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2137 continue; // Not oop field 2138 } 2139 int offset = field->as_Field()->offset(); 2140 if (offset == Type::OffsetBot) { 2141 if (!visited_bottom_offset) { 2142 // OffsetBot is used to reference array's element, 2143 // always add reference to null to all Field nodes since we don't 2144 // known which element is referenced. 2145 if (add_edge(field, null_obj)) { 2146 // New edge was added 2147 new_edges++; 2148 add_field_uses_to_worklist(field->as_Field()); 2149 visited_bottom_offset = true; 2150 } 2151 } 2152 } else { 2153 // Check only oop fields. 2154 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2155 if (adr_type->isa_rawptr()) { 2156 #ifdef ASSERT 2157 // Raw pointers are used for initializing stores so skip it 2158 // since it should be recorded already 2159 Node* base = get_addp_base(field->ideal_node()); 2160 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2161 #endif 2162 continue; 2163 } 2164 if (!offsets_worklist.contains(offset)) { 2165 offsets_worklist.append(offset); 2166 Node* value = nullptr; 2167 if (ini != nullptr) { 2168 // StoreP::memory_type() == T_ADDRESS 2169 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2170 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2171 // Make sure initializing store has the same type as this AddP. 2172 // This AddP may reference non existing field because it is on a 2173 // dead branch of bimorphic call which is not eliminated yet. 2174 if (store != nullptr && store->is_Store() && 2175 store->as_Store()->memory_type() == ft) { 2176 value = store->in(MemNode::ValueIn); 2177 #ifdef ASSERT 2178 if (VerifyConnectionGraph) { 2179 // Verify that AddP already points to all objects the value points to. 2180 PointsToNode* val = ptnode_adr(value->_idx); 2181 assert((val != nullptr), "should be processed already"); 2182 PointsToNode* missed_obj = nullptr; 2183 if (val->is_JavaObject()) { 2184 if (!field->points_to(val->as_JavaObject())) { 2185 missed_obj = val; 2186 } 2187 } else { 2188 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2189 tty->print_cr("----------init store has invalid value -----"); 2190 store->dump(); 2191 val->dump(); 2192 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2193 } 2194 for (EdgeIterator j(val); j.has_next(); j.next()) { 2195 PointsToNode* obj = j.get(); 2196 if (obj->is_JavaObject()) { 2197 if (!field->points_to(obj->as_JavaObject())) { 2198 missed_obj = obj; 2199 break; 2200 } 2201 } 2202 } 2203 } 2204 if (missed_obj != nullptr) { 2205 tty->print_cr("----------field---------------------------------"); 2206 field->dump(); 2207 tty->print_cr("----------missed referernce to object-----------"); 2208 missed_obj->dump(); 2209 tty->print_cr("----------object referernced by init store -----"); 2210 store->dump(); 2211 val->dump(); 2212 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2213 } 2214 } 2215 #endif 2216 } else { 2217 // There could be initializing stores which follow allocation. 2218 // For example, a volatile field store is not collected 2219 // by Initialize node. 2220 // 2221 // Need to check for dependent loads to separate such stores from 2222 // stores which follow loads. For now, add initial value null so 2223 // that compare pointers optimization works correctly. 2224 } 2225 } 2226 if (value == nullptr) { 2227 // A field's initializing value was not recorded. Add null. 2228 if (add_edge(field, null_obj)) { 2229 // New edge was added 2230 new_edges++; 2231 add_field_uses_to_worklist(field->as_Field()); 2232 } 2233 } 2234 } 2235 } 2236 } 2237 return new_edges; 2238 } 2239 2240 // Adjust scalar_replaceable state after Connection Graph is built. 2241 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2242 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2243 // returns true. If one of the constraints in this method set 'jobj' to NSR 2244 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2245 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2246 // that other object and the Phi will become a reducible Phi. 2247 // There could be multiple merges involving the same jobj. 2248 Unique_Node_List candidates; 2249 2250 // Search for non-escaping objects which are not scalar replaceable 2251 // and mark them to propagate the state to referenced objects. 2252 2253 for (UseIterator i(jobj); i.has_next(); i.next()) { 2254 PointsToNode* use = i.get(); 2255 if (use->is_Arraycopy()) { 2256 continue; 2257 } 2258 if (use->is_Field()) { 2259 FieldNode* field = use->as_Field(); 2260 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2261 // 1. An object is not scalar replaceable if the field into which it is 2262 // stored has unknown offset (stored into unknown element of an array). 2263 if (field->offset() == Type::OffsetBot) { 2264 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2265 return; 2266 } 2267 for (BaseIterator i(field); i.has_next(); i.next()) { 2268 PointsToNode* base = i.get(); 2269 // 2. An object is not scalar replaceable if the field into which it is 2270 // stored has multiple bases one of which is null. 2271 if ((base == null_obj) && (field->base_count() > 1)) { 2272 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2273 return; 2274 } 2275 // 2.5. An object is not scalar replaceable if the field into which it is 2276 // stored has NSR base. 2277 if (!base->scalar_replaceable()) { 2278 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2279 return; 2280 } 2281 } 2282 } 2283 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2284 // 3. An object is not scalar replaceable if it is merged with other objects 2285 // and we can't remove the merge 2286 for (EdgeIterator j(use); j.has_next(); j.next()) { 2287 PointsToNode* ptn = j.get(); 2288 if (ptn->is_JavaObject() && ptn != jobj) { 2289 Node* use_n = use->ideal_node(); 2290 2291 // If it's already a candidate or confirmed reducible merge we can skip verification 2292 if (candidates.member(use_n)) { 2293 continue; 2294 } else if (reducible_merges.member(use_n)) { 2295 candidates.push(use_n); 2296 continue; 2297 } 2298 2299 if (ReduceAllocationMerges && use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2300 candidates.push(use_n); 2301 } else { 2302 // Mark all objects as NSR if we can't remove the merge 2303 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2304 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2305 } 2306 } 2307 } 2308 if (!jobj->scalar_replaceable()) { 2309 return; 2310 } 2311 } 2312 2313 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2314 if (j.get()->is_Arraycopy()) { 2315 continue; 2316 } 2317 2318 // Non-escaping object node should point only to field nodes. 2319 FieldNode* field = j.get()->as_Field(); 2320 int offset = field->as_Field()->offset(); 2321 2322 // 4. An object is not scalar replaceable if it has a field with unknown 2323 // offset (array's element is accessed in loop). 2324 if (offset == Type::OffsetBot) { 2325 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2326 return; 2327 } 2328 // 5. Currently an object is not scalar replaceable if a LoadStore node 2329 // access its field since the field value is unknown after it. 2330 // 2331 Node* n = field->ideal_node(); 2332 2333 // Test for an unsafe access that was parsed as maybe off heap 2334 // (with a CheckCastPP to raw memory). 2335 assert(n->is_AddP(), "expect an address computation"); 2336 if (n->in(AddPNode::Base)->is_top() && 2337 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2338 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2339 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2340 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2341 return; 2342 } 2343 2344 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2345 Node* u = n->fast_out(i); 2346 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2347 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2348 return; 2349 } 2350 } 2351 2352 // 6. Or the address may point to more then one object. This may produce 2353 // the false positive result (set not scalar replaceable) 2354 // since the flow-insensitive escape analysis can't separate 2355 // the case when stores overwrite the field's value from the case 2356 // when stores happened on different control branches. 2357 // 2358 // Note: it will disable scalar replacement in some cases: 2359 // 2360 // Point p[] = new Point[1]; 2361 // p[0] = new Point(); // Will be not scalar replaced 2362 // 2363 // but it will save us from incorrect optimizations in next cases: 2364 // 2365 // Point p[] = new Point[1]; 2366 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2367 // 2368 if (field->base_count() > 1 && candidates.size() == 0) { 2369 for (BaseIterator i(field); i.has_next(); i.next()) { 2370 PointsToNode* base = i.get(); 2371 // Don't take into account LocalVar nodes which 2372 // may point to only one object which should be also 2373 // this field's base by now. 2374 if (base->is_JavaObject() && base != jobj) { 2375 // Mark all bases. 2376 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 2377 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 2378 } 2379 } 2380 2381 if (!jobj->scalar_replaceable()) { 2382 return; 2383 } 2384 } 2385 } 2386 2387 // The candidate is truly a reducible merge only if none of the other 2388 // constraints ruled it as NSR. There could be multiple merges involving the 2389 // same jobj. 2390 assert(jobj->scalar_replaceable(), "sanity"); 2391 for (uint i = 0; i < candidates.size(); i++ ) { 2392 Node* candidate = candidates.at(i); 2393 reducible_merges.push(candidate); 2394 } 2395 } 2396 2397 // Propagate NSR (Not scalar replaceable) state. 2398 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 2399 int jobj_length = jobj_worklist.length(); 2400 bool found_nsr_alloc = true; 2401 while (found_nsr_alloc) { 2402 found_nsr_alloc = false; 2403 for (int next = 0; next < jobj_length; ++next) { 2404 JavaObjectNode* jobj = jobj_worklist.at(next); 2405 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 2406 PointsToNode* use = i.get(); 2407 if (use->is_Field()) { 2408 FieldNode* field = use->as_Field(); 2409 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2410 assert(field->offset() != Type::OffsetBot, "sanity"); 2411 for (BaseIterator i(field); i.has_next(); i.next()) { 2412 PointsToNode* base = i.get(); 2413 // An object is not scalar replaceable if the field into which 2414 // it is stored has NSR base. 2415 if ((base != null_obj) && !base->scalar_replaceable()) { 2416 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2417 found_nsr_alloc = true; 2418 break; 2419 } 2420 } 2421 } 2422 } 2423 } 2424 } 2425 } 2426 2427 #ifdef ASSERT 2428 void ConnectionGraph::verify_connection_graph( 2429 GrowableArray<PointsToNode*>& ptnodes_worklist, 2430 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2431 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2432 GrowableArray<Node*>& addp_worklist) { 2433 // Verify that graph is complete - no new edges could be added. 2434 int java_objects_length = java_objects_worklist.length(); 2435 int non_escaped_length = non_escaped_allocs_worklist.length(); 2436 int new_edges = 0; 2437 for (int next = 0; next < java_objects_length; ++next) { 2438 JavaObjectNode* ptn = java_objects_worklist.at(next); 2439 new_edges += add_java_object_edges(ptn, true); 2440 } 2441 assert(new_edges == 0, "graph was not complete"); 2442 // Verify that escape state is final. 2443 int length = non_escaped_allocs_worklist.length(); 2444 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2445 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2446 (non_escaped_length == length) && 2447 (_worklist.length() == 0), "escape state was not final"); 2448 2449 // Verify fields information. 2450 int addp_length = addp_worklist.length(); 2451 for (int next = 0; next < addp_length; ++next ) { 2452 Node* n = addp_worklist.at(next); 2453 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2454 if (field->is_oop()) { 2455 // Verify that field has all bases 2456 Node* base = get_addp_base(n); 2457 PointsToNode* ptn = ptnode_adr(base->_idx); 2458 if (ptn->is_JavaObject()) { 2459 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2460 } else { 2461 assert(ptn->is_LocalVar(), "sanity"); 2462 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2463 PointsToNode* e = i.get(); 2464 if (e->is_JavaObject()) { 2465 assert(field->has_base(e->as_JavaObject()), "sanity"); 2466 } 2467 } 2468 } 2469 // Verify that all fields have initializing values. 2470 if (field->edge_count() == 0) { 2471 tty->print_cr("----------field does not have references----------"); 2472 field->dump(); 2473 for (BaseIterator i(field); i.has_next(); i.next()) { 2474 PointsToNode* base = i.get(); 2475 tty->print_cr("----------field has next base---------------------"); 2476 base->dump(); 2477 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2478 tty->print_cr("----------base has fields-------------------------"); 2479 for (EdgeIterator j(base); j.has_next(); j.next()) { 2480 j.get()->dump(); 2481 } 2482 tty->print_cr("----------base has references---------------------"); 2483 for (UseIterator j(base); j.has_next(); j.next()) { 2484 j.get()->dump(); 2485 } 2486 } 2487 } 2488 for (UseIterator i(field); i.has_next(); i.next()) { 2489 i.get()->dump(); 2490 } 2491 assert(field->edge_count() > 0, "sanity"); 2492 } 2493 } 2494 } 2495 } 2496 #endif 2497 2498 // Optimize ideal graph. 2499 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2500 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2501 Compile* C = _compile; 2502 PhaseIterGVN* igvn = _igvn; 2503 if (EliminateLocks) { 2504 // Mark locks before changing ideal graph. 2505 int cnt = C->macro_count(); 2506 for (int i = 0; i < cnt; i++) { 2507 Node *n = C->macro_node(i); 2508 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2509 AbstractLockNode* alock = n->as_AbstractLock(); 2510 if (!alock->is_non_esc_obj()) { 2511 if (not_global_escape(alock->obj_node())) { 2512 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2513 // The lock could be marked eliminated by lock coarsening 2514 // code during first IGVN before EA. Replace coarsened flag 2515 // to eliminate all associated locks/unlocks. 2516 #ifdef ASSERT 2517 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2518 #endif 2519 alock->set_non_esc_obj(); 2520 } 2521 } 2522 } 2523 } 2524 } 2525 2526 if (OptimizePtrCompare) { 2527 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2528 Node *n = ptr_cmp_worklist.at(i); 2529 const TypeInt* tcmp = optimize_ptr_compare(n); 2530 if (tcmp->singleton()) { 2531 Node* cmp = igvn->makecon(tcmp); 2532 #ifndef PRODUCT 2533 if (PrintOptimizePtrCompare) { 2534 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2535 if (Verbose) { 2536 n->dump(1); 2537 } 2538 } 2539 #endif 2540 igvn->replace_node(n, cmp); 2541 } 2542 } 2543 } 2544 2545 // For MemBarStoreStore nodes added in library_call.cpp, check 2546 // escape status of associated AllocateNode and optimize out 2547 // MemBarStoreStore node if the allocated object never escapes. 2548 for (int i = 0; i < storestore_worklist.length(); i++) { 2549 Node* storestore = storestore_worklist.at(i); 2550 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2551 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2552 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2553 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2554 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2555 igvn->register_new_node_with_optimizer(mb); 2556 igvn->replace_node(storestore, mb); 2557 } 2558 } 2559 } 2560 2561 // Optimize objects compare. 2562 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2563 assert(OptimizePtrCompare, "sanity"); 2564 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2565 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2566 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2567 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2568 2569 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2570 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2571 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2572 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2573 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2574 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2575 2576 // Check simple cases first. 2577 if (jobj1 != nullptr) { 2578 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2579 if (jobj1 == jobj2) { 2580 // Comparing the same not escaping object. 2581 return EQ; 2582 } 2583 Node* obj = jobj1->ideal_node(); 2584 // Comparing not escaping allocation. 2585 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2586 !ptn2->points_to(jobj1)) { 2587 return NE; // This includes nullness check. 2588 } 2589 } 2590 } 2591 if (jobj2 != nullptr) { 2592 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2593 Node* obj = jobj2->ideal_node(); 2594 // Comparing not escaping allocation. 2595 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2596 !ptn1->points_to(jobj2)) { 2597 return NE; // This includes nullness check. 2598 } 2599 } 2600 } 2601 if (jobj1 != nullptr && jobj1 != phantom_obj && 2602 jobj2 != nullptr && jobj2 != phantom_obj && 2603 jobj1->ideal_node()->is_Con() && 2604 jobj2->ideal_node()->is_Con()) { 2605 // Klass or String constants compare. Need to be careful with 2606 // compressed pointers - compare types of ConN and ConP instead of nodes. 2607 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2608 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2609 if (t1->make_ptr() == t2->make_ptr()) { 2610 return EQ; 2611 } else { 2612 return NE; 2613 } 2614 } 2615 if (ptn1->meet(ptn2)) { 2616 return UNKNOWN; // Sets are not disjoint 2617 } 2618 2619 // Sets are disjoint. 2620 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2621 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2622 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2623 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2624 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2625 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2626 // Check nullness of unknown object. 2627 return UNKNOWN; 2628 } 2629 2630 // Disjointness by itself is not sufficient since 2631 // alias analysis is not complete for escaped objects. 2632 // Disjoint sets are definitely unrelated only when 2633 // at least one set has only not escaping allocations. 2634 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2635 if (ptn1->non_escaping_allocation()) { 2636 return NE; 2637 } 2638 } 2639 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2640 if (ptn2->non_escaping_allocation()) { 2641 return NE; 2642 } 2643 } 2644 return UNKNOWN; 2645 } 2646 2647 // Connection Graph construction functions. 2648 2649 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2650 PointsToNode* ptadr = _nodes.at(n->_idx); 2651 if (ptadr != nullptr) { 2652 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2653 return; 2654 } 2655 Compile* C = _compile; 2656 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2657 map_ideal_node(n, ptadr); 2658 } 2659 2660 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2661 PointsToNode* ptadr = _nodes.at(n->_idx); 2662 if (ptadr != nullptr) { 2663 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2664 return ptadr; 2665 } 2666 Compile* C = _compile; 2667 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2668 map_ideal_node(n, ptadr); 2669 return ptadr; 2670 } 2671 2672 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2673 PointsToNode* ptadr = _nodes.at(n->_idx); 2674 if (ptadr != nullptr) { 2675 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2676 return; 2677 } 2678 bool unsafe = false; 2679 bool is_oop = is_oop_field(n, offset, &unsafe); 2680 if (unsafe) { 2681 es = PointsToNode::GlobalEscape; 2682 } 2683 Compile* C = _compile; 2684 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2685 map_ideal_node(n, field); 2686 } 2687 2688 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2689 PointsToNode* src, PointsToNode* dst) { 2690 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2691 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 2692 PointsToNode* ptadr = _nodes.at(n->_idx); 2693 if (ptadr != nullptr) { 2694 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2695 return; 2696 } 2697 Compile* C = _compile; 2698 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2699 map_ideal_node(n, ptadr); 2700 // Add edge from arraycopy node to source object. 2701 (void)add_edge(ptadr, src); 2702 src->set_arraycopy_src(); 2703 // Add edge from destination object to arraycopy node. 2704 (void)add_edge(dst, ptadr); 2705 dst->set_arraycopy_dst(); 2706 } 2707 2708 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2709 const Type* adr_type = n->as_AddP()->bottom_type(); 2710 BasicType bt = T_INT; 2711 if (offset == Type::OffsetBot) { 2712 // Check only oop fields. 2713 if (!adr_type->isa_aryptr() || 2714 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2715 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 2716 // OffsetBot is used to reference array's element. Ignore first AddP. 2717 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 2718 bt = T_OBJECT; 2719 } 2720 } 2721 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2722 if (adr_type->isa_instptr()) { 2723 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2724 if (field != nullptr) { 2725 bt = field->layout_type(); 2726 } else { 2727 // Check for unsafe oop field access 2728 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2729 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2730 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2731 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2732 bt = T_OBJECT; 2733 (*unsafe) = true; 2734 } 2735 } 2736 } else if (adr_type->isa_aryptr()) { 2737 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2738 // Ignore array length load. 2739 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 2740 // Ignore first AddP. 2741 } else { 2742 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2743 bt = elemtype->array_element_basic_type(); 2744 } 2745 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2746 // Allocation initialization, ThreadLocal field access, unsafe access 2747 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2748 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2749 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2750 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2751 bt = T_OBJECT; 2752 } 2753 } 2754 } 2755 // Note: T_NARROWOOP is not classed as a real reference type 2756 return (is_reference_type(bt) || bt == T_NARROWOOP); 2757 } 2758 2759 // Returns unique pointed java object or null. 2760 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 2761 // If the node was created after the escape computation we can't answer. 2762 uint idx = n->_idx; 2763 if (idx >= nodes_size()) { 2764 return nullptr; 2765 } 2766 PointsToNode* ptn = ptnode_adr(idx); 2767 if (ptn == nullptr) { 2768 return nullptr; 2769 } 2770 if (ptn->is_JavaObject()) { 2771 return ptn->as_JavaObject(); 2772 } 2773 assert(ptn->is_LocalVar(), "sanity"); 2774 // Check all java objects it points to. 2775 JavaObjectNode* jobj = nullptr; 2776 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2777 PointsToNode* e = i.get(); 2778 if (e->is_JavaObject()) { 2779 if (jobj == nullptr) { 2780 jobj = e->as_JavaObject(); 2781 } else if (jobj != e) { 2782 return nullptr; 2783 } 2784 } 2785 } 2786 return jobj; 2787 } 2788 2789 // Return true if this node points only to non-escaping allocations. 2790 bool PointsToNode::non_escaping_allocation() { 2791 if (is_JavaObject()) { 2792 Node* n = ideal_node(); 2793 if (n->is_Allocate() || n->is_CallStaticJava()) { 2794 return (escape_state() == PointsToNode::NoEscape); 2795 } else { 2796 return false; 2797 } 2798 } 2799 assert(is_LocalVar(), "sanity"); 2800 // Check all java objects it points to. 2801 for (EdgeIterator i(this); i.has_next(); i.next()) { 2802 PointsToNode* e = i.get(); 2803 if (e->is_JavaObject()) { 2804 Node* n = e->ideal_node(); 2805 if ((e->escape_state() != PointsToNode::NoEscape) || 2806 !(n->is_Allocate() || n->is_CallStaticJava())) { 2807 return false; 2808 } 2809 } 2810 } 2811 return true; 2812 } 2813 2814 // Return true if we know the node does not escape globally. 2815 bool ConnectionGraph::not_global_escape(Node *n) { 2816 assert(!_collecting, "should not call during graph construction"); 2817 // If the node was created after the escape computation we can't answer. 2818 uint idx = n->_idx; 2819 if (idx >= nodes_size()) { 2820 return false; 2821 } 2822 PointsToNode* ptn = ptnode_adr(idx); 2823 if (ptn == nullptr) { 2824 return false; // not in congraph (e.g. ConI) 2825 } 2826 PointsToNode::EscapeState es = ptn->escape_state(); 2827 // If we have already computed a value, return it. 2828 if (es >= PointsToNode::GlobalEscape) { 2829 return false; 2830 } 2831 if (ptn->is_JavaObject()) { 2832 return true; // (es < PointsToNode::GlobalEscape); 2833 } 2834 assert(ptn->is_LocalVar(), "sanity"); 2835 // Check all java objects it points to. 2836 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2837 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2838 return false; 2839 } 2840 } 2841 return true; 2842 } 2843 2844 2845 // Helper functions 2846 2847 // Return true if this node points to specified node or nodes it points to. 2848 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2849 if (is_JavaObject()) { 2850 return (this == ptn); 2851 } 2852 assert(is_LocalVar() || is_Field(), "sanity"); 2853 for (EdgeIterator i(this); i.has_next(); i.next()) { 2854 if (i.get() == ptn) { 2855 return true; 2856 } 2857 } 2858 return false; 2859 } 2860 2861 // Return true if one node points to an other. 2862 bool PointsToNode::meet(PointsToNode* ptn) { 2863 if (this == ptn) { 2864 return true; 2865 } else if (ptn->is_JavaObject()) { 2866 return this->points_to(ptn->as_JavaObject()); 2867 } else if (this->is_JavaObject()) { 2868 return ptn->points_to(this->as_JavaObject()); 2869 } 2870 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2871 int ptn_count = ptn->edge_count(); 2872 for (EdgeIterator i(this); i.has_next(); i.next()) { 2873 PointsToNode* this_e = i.get(); 2874 for (int j = 0; j < ptn_count; j++) { 2875 if (this_e == ptn->edge(j)) { 2876 return true; 2877 } 2878 } 2879 } 2880 return false; 2881 } 2882 2883 #ifdef ASSERT 2884 // Return true if bases point to this java object. 2885 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2886 for (BaseIterator i(this); i.has_next(); i.next()) { 2887 if (i.get() == jobj) { 2888 return true; 2889 } 2890 } 2891 return false; 2892 } 2893 #endif 2894 2895 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2896 // Handle simple case first. 2897 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 2898 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2899 return true; 2900 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2901 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2902 Node* addp_use = addp->fast_out(i); 2903 if (addp_use->is_Store()) { 2904 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2905 if (addp_use->fast_out(j)->is_Initialize()) { 2906 return true; 2907 } 2908 } 2909 } 2910 } 2911 } 2912 return false; 2913 } 2914 2915 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 2916 const Type *adr_type = phase->type(adr); 2917 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 2918 // We are computing a raw address for a store captured by an Initialize 2919 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2920 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2921 assert(offs != Type::OffsetBot || 2922 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2923 "offset must be a constant or it is initialization of array"); 2924 return offs; 2925 } 2926 const TypePtr *t_ptr = adr_type->isa_ptr(); 2927 assert(t_ptr != nullptr, "must be a pointer type"); 2928 return t_ptr->offset(); 2929 } 2930 2931 Node* ConnectionGraph::get_addp_base(Node *addp) { 2932 assert(addp->is_AddP(), "must be AddP"); 2933 // 2934 // AddP cases for Base and Address inputs: 2935 // case #1. Direct object's field reference: 2936 // Allocate 2937 // | 2938 // Proj #5 ( oop result ) 2939 // | 2940 // CheckCastPP (cast to instance type) 2941 // | | 2942 // AddP ( base == address ) 2943 // 2944 // case #2. Indirect object's field reference: 2945 // Phi 2946 // | 2947 // CastPP (cast to instance type) 2948 // | | 2949 // AddP ( base == address ) 2950 // 2951 // case #3. Raw object's field reference for Initialize node: 2952 // Allocate 2953 // | 2954 // Proj #5 ( oop result ) 2955 // top | 2956 // \ | 2957 // AddP ( base == top ) 2958 // 2959 // case #4. Array's element reference: 2960 // {CheckCastPP | CastPP} 2961 // | | | 2962 // | AddP ( array's element offset ) 2963 // | | 2964 // AddP ( array's offset ) 2965 // 2966 // case #5. Raw object's field reference for arraycopy stub call: 2967 // The inline_native_clone() case when the arraycopy stub is called 2968 // after the allocation before Initialize and CheckCastPP nodes. 2969 // Allocate 2970 // | 2971 // Proj #5 ( oop result ) 2972 // | | 2973 // AddP ( base == address ) 2974 // 2975 // case #6. Constant Pool, ThreadLocal, CastX2P or 2976 // Raw object's field reference: 2977 // {ConP, ThreadLocal, CastX2P, raw Load} 2978 // top | 2979 // \ | 2980 // AddP ( base == top ) 2981 // 2982 // case #7. Klass's field reference. 2983 // LoadKlass 2984 // | | 2985 // AddP ( base == address ) 2986 // 2987 // case #8. narrow Klass's field reference. 2988 // LoadNKlass 2989 // | 2990 // DecodeN 2991 // | | 2992 // AddP ( base == address ) 2993 // 2994 // case #9. Mixed unsafe access 2995 // {instance} 2996 // | 2997 // CheckCastPP (raw) 2998 // top | 2999 // \ | 3000 // AddP ( base == top ) 3001 // 3002 Node *base = addp->in(AddPNode::Base); 3003 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3004 base = addp->in(AddPNode::Address); 3005 while (base->is_AddP()) { 3006 // Case #6 (unsafe access) may have several chained AddP nodes. 3007 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3008 base = base->in(AddPNode::Address); 3009 } 3010 if (base->Opcode() == Op_CheckCastPP && 3011 base->bottom_type()->isa_rawptr() && 3012 _igvn->type(base->in(1))->isa_oopptr()) { 3013 base = base->in(1); // Case #9 3014 } else { 3015 Node* uncast_base = base->uncast(); 3016 int opcode = uncast_base->Opcode(); 3017 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3018 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3019 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3020 is_captured_store_address(addp), "sanity"); 3021 } 3022 } 3023 return base; 3024 } 3025 3026 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3027 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3028 Node* addp2 = addp->raw_out(0); 3029 if (addp->outcnt() == 1 && addp2->is_AddP() && 3030 addp2->in(AddPNode::Base) == n && 3031 addp2->in(AddPNode::Address) == addp) { 3032 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3033 // 3034 // Find array's offset to push it on worklist first and 3035 // as result process an array's element offset first (pushed second) 3036 // to avoid CastPP for the array's offset. 3037 // Otherwise the inserted CastPP (LocalVar) will point to what 3038 // the AddP (Field) points to. Which would be wrong since 3039 // the algorithm expects the CastPP has the same point as 3040 // as AddP's base CheckCastPP (LocalVar). 3041 // 3042 // ArrayAllocation 3043 // | 3044 // CheckCastPP 3045 // | 3046 // memProj (from ArrayAllocation CheckCastPP) 3047 // | || 3048 // | || Int (element index) 3049 // | || | ConI (log(element size)) 3050 // | || | / 3051 // | || LShift 3052 // | || / 3053 // | AddP (array's element offset) 3054 // | | 3055 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3056 // | / / 3057 // AddP (array's offset) 3058 // | 3059 // Load/Store (memory operation on array's element) 3060 // 3061 return addp2; 3062 } 3063 return nullptr; 3064 } 3065 3066 // 3067 // Adjust the type and inputs of an AddP which computes the 3068 // address of a field of an instance 3069 // 3070 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3071 PhaseGVN* igvn = _igvn; 3072 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3073 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3074 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3075 if (t == nullptr) { 3076 // We are computing a raw address for a store captured by an Initialize 3077 // compute an appropriate address type (cases #3 and #5). 3078 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3079 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3080 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3081 assert(offs != Type::OffsetBot, "offset must be a constant"); 3082 t = base_t->add_offset(offs)->is_oopptr(); 3083 } 3084 int inst_id = base_t->instance_id(); 3085 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3086 "old type must be non-instance or match new type"); 3087 3088 // The type 't' could be subclass of 'base_t'. 3089 // As result t->offset() could be large then base_t's size and it will 3090 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3091 // constructor verifies correctness of the offset. 3092 // 3093 // It could happened on subclass's branch (from the type profiling 3094 // inlining) which was not eliminated during parsing since the exactness 3095 // of the allocation type was not propagated to the subclass type check. 3096 // 3097 // Or the type 't' could be not related to 'base_t' at all. 3098 // It could happened when CHA type is different from MDO type on a dead path 3099 // (for example, from instanceof check) which is not collapsed during parsing. 3100 // 3101 // Do nothing for such AddP node and don't process its users since 3102 // this code branch will go away. 3103 // 3104 if (!t->is_known_instance() && 3105 !base_t->maybe_java_subtype_of(t)) { 3106 return false; // bail out 3107 } 3108 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 3109 // Do NOT remove the next line: ensure a new alias index is allocated 3110 // for the instance type. Note: C++ will not remove it since the call 3111 // has side effect. 3112 int alias_idx = _compile->get_alias_index(tinst); 3113 igvn->set_type(addp, tinst); 3114 // record the allocation in the node map 3115 set_map(addp, get_map(base->_idx)); 3116 // Set addp's Base and Address to 'base'. 3117 Node *abase = addp->in(AddPNode::Base); 3118 Node *adr = addp->in(AddPNode::Address); 3119 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3120 adr->in(0)->_idx == (uint)inst_id) { 3121 // Skip AddP cases #3 and #5. 3122 } else { 3123 assert(!abase->is_top(), "sanity"); // AddP case #3 3124 if (abase != base) { 3125 igvn->hash_delete(addp); 3126 addp->set_req(AddPNode::Base, base); 3127 if (abase == adr) { 3128 addp->set_req(AddPNode::Address, base); 3129 } else { 3130 // AddP case #4 (adr is array's element offset AddP node) 3131 #ifdef ASSERT 3132 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3133 assert(adr->is_AddP() && atype != nullptr && 3134 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3135 #endif 3136 } 3137 igvn->hash_insert(addp); 3138 } 3139 } 3140 // Put on IGVN worklist since at least addp's type was changed above. 3141 record_for_optimizer(addp); 3142 return true; 3143 } 3144 3145 // 3146 // Create a new version of orig_phi if necessary. Returns either the newly 3147 // created phi or an existing phi. Sets create_new to indicate whether a new 3148 // phi was created. Cache the last newly created phi in the node map. 3149 // 3150 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3151 Compile *C = _compile; 3152 PhaseGVN* igvn = _igvn; 3153 new_created = false; 3154 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3155 // nothing to do if orig_phi is bottom memory or matches alias_idx 3156 if (phi_alias_idx == alias_idx) { 3157 return orig_phi; 3158 } 3159 // Have we recently created a Phi for this alias index? 3160 PhiNode *result = get_map_phi(orig_phi->_idx); 3161 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3162 return result; 3163 } 3164 // Previous check may fail when the same wide memory Phi was split into Phis 3165 // for different memory slices. Search all Phis for this region. 3166 if (result != nullptr) { 3167 Node* region = orig_phi->in(0); 3168 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3169 Node* phi = region->fast_out(i); 3170 if (phi->is_Phi() && 3171 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3172 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3173 return phi->as_Phi(); 3174 } 3175 } 3176 } 3177 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3178 if (C->do_escape_analysis() == true && !C->failing()) { 3179 // Retry compilation without escape analysis. 3180 // If this is the first failure, the sentinel string will "stick" 3181 // to the Compile object, and the C2Compiler will see it and retry. 3182 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3183 } 3184 return nullptr; 3185 } 3186 orig_phi_worklist.append_if_missing(orig_phi); 3187 const TypePtr *atype = C->get_adr_type(alias_idx); 3188 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3189 C->copy_node_notes_to(result, orig_phi); 3190 igvn->set_type(result, result->bottom_type()); 3191 record_for_optimizer(result); 3192 set_map(orig_phi, result); 3193 new_created = true; 3194 return result; 3195 } 3196 3197 // 3198 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3199 // specified alias index. 3200 // 3201 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 3202 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3203 Compile *C = _compile; 3204 PhaseGVN* igvn = _igvn; 3205 bool new_phi_created; 3206 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3207 if (!new_phi_created) { 3208 return result; 3209 } 3210 GrowableArray<PhiNode *> phi_list; 3211 GrowableArray<uint> cur_input; 3212 PhiNode *phi = orig_phi; 3213 uint idx = 1; 3214 bool finished = false; 3215 while(!finished) { 3216 while (idx < phi->req()) { 3217 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 3218 if (mem != nullptr && mem->is_Phi()) { 3219 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3220 if (new_phi_created) { 3221 // found an phi for which we created a new split, push current one on worklist and begin 3222 // processing new one 3223 phi_list.push(phi); 3224 cur_input.push(idx); 3225 phi = mem->as_Phi(); 3226 result = newphi; 3227 idx = 1; 3228 continue; 3229 } else { 3230 mem = newphi; 3231 } 3232 } 3233 if (C->failing()) { 3234 return nullptr; 3235 } 3236 result->set_req(idx++, mem); 3237 } 3238 #ifdef ASSERT 3239 // verify that the new Phi has an input for each input of the original 3240 assert( phi->req() == result->req(), "must have same number of inputs."); 3241 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3242 #endif 3243 // Check if all new phi's inputs have specified alias index. 3244 // Otherwise use old phi. 3245 for (uint i = 1; i < phi->req(); i++) { 3246 Node* in = result->in(i); 3247 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3248 } 3249 // we have finished processing a Phi, see if there are any more to do 3250 finished = (phi_list.length() == 0 ); 3251 if (!finished) { 3252 phi = phi_list.pop(); 3253 idx = cur_input.pop(); 3254 PhiNode *prev_result = get_map_phi(phi->_idx); 3255 prev_result->set_req(idx++, result); 3256 result = prev_result; 3257 } 3258 } 3259 return result; 3260 } 3261 3262 // 3263 // The next methods are derived from methods in MemNode. 3264 // 3265 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3266 Node *mem = mmem; 3267 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3268 // means an array I have not precisely typed yet. Do not do any 3269 // alias stuff with it any time soon. 3270 if (toop->base() != Type::AnyPtr && 3271 !(toop->isa_instptr() && 3272 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3273 toop->offset() == Type::OffsetBot)) { 3274 mem = mmem->memory_at(alias_idx); 3275 // Update input if it is progress over what we have now 3276 } 3277 return mem; 3278 } 3279 3280 // 3281 // Move memory users to their memory slices. 3282 // 3283 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 3284 Compile* C = _compile; 3285 PhaseGVN* igvn = _igvn; 3286 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 3287 assert(tp != nullptr, "ptr type"); 3288 int alias_idx = C->get_alias_index(tp); 3289 int general_idx = C->get_general_index(alias_idx); 3290 3291 // Move users first 3292 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3293 Node* use = n->fast_out(i); 3294 if (use->is_MergeMem()) { 3295 MergeMemNode* mmem = use->as_MergeMem(); 3296 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 3297 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 3298 continue; // Nothing to do 3299 } 3300 // Replace previous general reference to mem node. 3301 uint orig_uniq = C->unique(); 3302 Node* m = find_inst_mem(n, general_idx, orig_phis); 3303 assert(orig_uniq == C->unique(), "no new nodes"); 3304 mmem->set_memory_at(general_idx, m); 3305 --imax; 3306 --i; 3307 } else if (use->is_MemBar()) { 3308 assert(!use->is_Initialize(), "initializing stores should not be moved"); 3309 if (use->req() > MemBarNode::Precedent && 3310 use->in(MemBarNode::Precedent) == n) { 3311 // Don't move related membars. 3312 record_for_optimizer(use); 3313 continue; 3314 } 3315 tp = use->as_MemBar()->adr_type()->isa_ptr(); 3316 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 3317 alias_idx == general_idx) { 3318 continue; // Nothing to do 3319 } 3320 // Move to general memory slice. 3321 uint orig_uniq = C->unique(); 3322 Node* m = find_inst_mem(n, general_idx, orig_phis); 3323 assert(orig_uniq == C->unique(), "no new nodes"); 3324 igvn->hash_delete(use); 3325 imax -= use->replace_edge(n, m, igvn); 3326 igvn->hash_insert(use); 3327 record_for_optimizer(use); 3328 --i; 3329 #ifdef ASSERT 3330 } else if (use->is_Mem()) { 3331 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 3332 // Don't move related cardmark. 3333 continue; 3334 } 3335 // Memory nodes should have new memory input. 3336 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 3337 assert(tp != nullptr, "ptr type"); 3338 int idx = C->get_alias_index(tp); 3339 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 3340 "Following memory nodes should have new memory input or be on the same memory slice"); 3341 } else if (use->is_Phi()) { 3342 // Phi nodes should be split and moved already. 3343 tp = use->as_Phi()->adr_type()->isa_ptr(); 3344 assert(tp != nullptr, "ptr type"); 3345 int idx = C->get_alias_index(tp); 3346 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 3347 } else { 3348 use->dump(); 3349 assert(false, "should not be here"); 3350 #endif 3351 } 3352 } 3353 } 3354 3355 // 3356 // Search memory chain of "mem" to find a MemNode whose address 3357 // is the specified alias index. 3358 // 3359 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 3360 if (orig_mem == nullptr) { 3361 return orig_mem; 3362 } 3363 Compile* C = _compile; 3364 PhaseGVN* igvn = _igvn; 3365 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 3366 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 3367 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 3368 Node *prev = nullptr; 3369 Node *result = orig_mem; 3370 while (prev != result) { 3371 prev = result; 3372 if (result == start_mem) { 3373 break; // hit one of our sentinels 3374 } 3375 if (result->is_Mem()) { 3376 const Type *at = igvn->type(result->in(MemNode::Address)); 3377 if (at == Type::TOP) { 3378 break; // Dead 3379 } 3380 assert (at->isa_ptr() != nullptr, "pointer type required."); 3381 int idx = C->get_alias_index(at->is_ptr()); 3382 if (idx == alias_idx) { 3383 break; // Found 3384 } 3385 if (!is_instance && (at->isa_oopptr() == nullptr || 3386 !at->is_oopptr()->is_known_instance())) { 3387 break; // Do not skip store to general memory slice. 3388 } 3389 result = result->in(MemNode::Memory); 3390 } 3391 if (!is_instance) { 3392 continue; // don't search further for non-instance types 3393 } 3394 // skip over a call which does not affect this memory slice 3395 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 3396 Node *proj_in = result->in(0); 3397 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 3398 break; // hit one of our sentinels 3399 } else if (proj_in->is_Call()) { 3400 // ArrayCopy node processed here as well 3401 CallNode *call = proj_in->as_Call(); 3402 if (!call->may_modify(toop, igvn)) { 3403 result = call->in(TypeFunc::Memory); 3404 } 3405 } else if (proj_in->is_Initialize()) { 3406 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 3407 // Stop if this is the initialization for the object instance which 3408 // which contains this memory slice, otherwise skip over it. 3409 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 3410 result = proj_in->in(TypeFunc::Memory); 3411 } 3412 } else if (proj_in->is_MemBar()) { 3413 // Check if there is an array copy for a clone 3414 // Step over GC barrier when ReduceInitialCardMarks is disabled 3415 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3416 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3417 3418 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3419 // Stop if it is a clone 3420 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3421 if (ac->may_modify(toop, igvn)) { 3422 break; 3423 } 3424 } 3425 result = proj_in->in(TypeFunc::Memory); 3426 } 3427 } else if (result->is_MergeMem()) { 3428 MergeMemNode *mmem = result->as_MergeMem(); 3429 result = step_through_mergemem(mmem, alias_idx, toop); 3430 if (result == mmem->base_memory()) { 3431 // Didn't find instance memory, search through general slice recursively. 3432 result = mmem->memory_at(C->get_general_index(alias_idx)); 3433 result = find_inst_mem(result, alias_idx, orig_phis); 3434 if (C->failing()) { 3435 return nullptr; 3436 } 3437 mmem->set_memory_at(alias_idx, result); 3438 } 3439 } else if (result->is_Phi() && 3440 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3441 Node *un = result->as_Phi()->unique_input(igvn); 3442 if (un != nullptr) { 3443 orig_phis.append_if_missing(result->as_Phi()); 3444 result = un; 3445 } else { 3446 break; 3447 } 3448 } else if (result->is_ClearArray()) { 3449 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3450 // Can not bypass initialization of the instance 3451 // we are looking for. 3452 break; 3453 } 3454 // Otherwise skip it (the call updated 'result' value). 3455 } else if (result->Opcode() == Op_SCMemProj) { 3456 Node* mem = result->in(0); 3457 Node* adr = nullptr; 3458 if (mem->is_LoadStore()) { 3459 adr = mem->in(MemNode::Address); 3460 } else { 3461 assert(mem->Opcode() == Op_EncodeISOArray || 3462 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3463 adr = mem->in(3); // Memory edge corresponds to destination array 3464 } 3465 const Type *at = igvn->type(adr); 3466 if (at != Type::TOP) { 3467 assert(at->isa_ptr() != nullptr, "pointer type required."); 3468 int idx = C->get_alias_index(at->is_ptr()); 3469 if (idx == alias_idx) { 3470 // Assert in debug mode 3471 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3472 break; // In product mode return SCMemProj node 3473 } 3474 } 3475 result = mem->in(MemNode::Memory); 3476 } else if (result->Opcode() == Op_StrInflatedCopy) { 3477 Node* adr = result->in(3); // Memory edge corresponds to destination array 3478 const Type *at = igvn->type(adr); 3479 if (at != Type::TOP) { 3480 assert(at->isa_ptr() != nullptr, "pointer type required."); 3481 int idx = C->get_alias_index(at->is_ptr()); 3482 if (idx == alias_idx) { 3483 // Assert in debug mode 3484 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3485 break; // In product mode return SCMemProj node 3486 } 3487 } 3488 result = result->in(MemNode::Memory); 3489 } 3490 } 3491 if (result->is_Phi()) { 3492 PhiNode *mphi = result->as_Phi(); 3493 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3494 const TypePtr *t = mphi->adr_type(); 3495 if (!is_instance) { 3496 // Push all non-instance Phis on the orig_phis worklist to update inputs 3497 // during Phase 4 if needed. 3498 orig_phis.append_if_missing(mphi); 3499 } else if (C->get_alias_index(t) != alias_idx) { 3500 // Create a new Phi with the specified alias index type. 3501 result = split_memory_phi(mphi, alias_idx, orig_phis); 3502 } 3503 } 3504 // the result is either MemNode, PhiNode, InitializeNode. 3505 return result; 3506 } 3507 3508 // 3509 // Convert the types of non-escaped object to instance types where possible, 3510 // propagate the new type information through the graph, and update memory 3511 // edges and MergeMem inputs to reflect the new type. 3512 // 3513 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3514 // The processing is done in 4 phases: 3515 // 3516 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3517 // types for the CheckCastPP for allocations where possible. 3518 // Propagate the new types through users as follows: 3519 // casts and Phi: push users on alloc_worklist 3520 // AddP: cast Base and Address inputs to the instance type 3521 // push any AddP users on alloc_worklist and push any memnode 3522 // users onto memnode_worklist. 3523 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3524 // search the Memory chain for a store with the appropriate type 3525 // address type. If a Phi is found, create a new version with 3526 // the appropriate memory slices from each of the Phi inputs. 3527 // For stores, process the users as follows: 3528 // MemNode: push on memnode_worklist 3529 // MergeMem: push on mergemem_worklist 3530 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3531 // moving the first node encountered of each instance type to the 3532 // the input corresponding to its alias index. 3533 // appropriate memory slice. 3534 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3535 // 3536 // In the following example, the CheckCastPP nodes are the cast of allocation 3537 // results and the allocation of node 29 is non-escaped and eligible to be an 3538 // instance type. 3539 // 3540 // We start with: 3541 // 3542 // 7 Parm #memory 3543 // 10 ConI "12" 3544 // 19 CheckCastPP "Foo" 3545 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3546 // 29 CheckCastPP "Foo" 3547 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3548 // 3549 // 40 StoreP 25 7 20 ... alias_index=4 3550 // 50 StoreP 35 40 30 ... alias_index=4 3551 // 60 StoreP 45 50 20 ... alias_index=4 3552 // 70 LoadP _ 60 30 ... alias_index=4 3553 // 80 Phi 75 50 60 Memory alias_index=4 3554 // 90 LoadP _ 80 30 ... alias_index=4 3555 // 100 LoadP _ 80 20 ... alias_index=4 3556 // 3557 // 3558 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3559 // and creating a new alias index for node 30. This gives: 3560 // 3561 // 7 Parm #memory 3562 // 10 ConI "12" 3563 // 19 CheckCastPP "Foo" 3564 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3565 // 29 CheckCastPP "Foo" iid=24 3566 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3567 // 3568 // 40 StoreP 25 7 20 ... alias_index=4 3569 // 50 StoreP 35 40 30 ... alias_index=6 3570 // 60 StoreP 45 50 20 ... alias_index=4 3571 // 70 LoadP _ 60 30 ... alias_index=6 3572 // 80 Phi 75 50 60 Memory alias_index=4 3573 // 90 LoadP _ 80 30 ... alias_index=6 3574 // 100 LoadP _ 80 20 ... alias_index=4 3575 // 3576 // In phase 2, new memory inputs are computed for the loads and stores, 3577 // And a new version of the phi is created. In phase 4, the inputs to 3578 // node 80 are updated and then the memory nodes are updated with the 3579 // values computed in phase 2. This results in: 3580 // 3581 // 7 Parm #memory 3582 // 10 ConI "12" 3583 // 19 CheckCastPP "Foo" 3584 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3585 // 29 CheckCastPP "Foo" iid=24 3586 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3587 // 3588 // 40 StoreP 25 7 20 ... alias_index=4 3589 // 50 StoreP 35 7 30 ... alias_index=6 3590 // 60 StoreP 45 40 20 ... alias_index=4 3591 // 70 LoadP _ 50 30 ... alias_index=6 3592 // 80 Phi 75 40 60 Memory alias_index=4 3593 // 120 Phi 75 50 50 Memory alias_index=6 3594 // 90 LoadP _ 120 30 ... alias_index=6 3595 // 100 LoadP _ 80 20 ... alias_index=4 3596 // 3597 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3598 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3599 GrowableArray<MergeMemNode*> &mergemem_worklist, 3600 Unique_Node_List &reducible_merges) { 3601 GrowableArray<Node *> memnode_worklist; 3602 GrowableArray<PhiNode *> orig_phis; 3603 PhaseIterGVN *igvn = _igvn; 3604 uint new_index_start = (uint) _compile->num_alias_types(); 3605 VectorSet visited; 3606 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3607 uint unique_old = _compile->unique(); 3608 3609 // Phase 1: Process possible allocations from alloc_worklist. 3610 // Create instance types for the CheckCastPP for allocations where possible. 3611 // 3612 // (Note: don't forget to change the order of the second AddP node on 3613 // the alloc_worklist if the order of the worklist processing is changed, 3614 // see the comment in find_second_addp().) 3615 // 3616 while (alloc_worklist.length() != 0) { 3617 Node *n = alloc_worklist.pop(); 3618 uint ni = n->_idx; 3619 if (n->is_Call()) { 3620 CallNode *alloc = n->as_Call(); 3621 // copy escape information to call node 3622 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3623 PointsToNode::EscapeState es = ptn->escape_state(); 3624 // We have an allocation or call which returns a Java object, 3625 // see if it is non-escaped. 3626 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3627 continue; 3628 } 3629 // Find CheckCastPP for the allocate or for the return value of a call 3630 n = alloc->result_cast(); 3631 if (n == nullptr) { // No uses except Initialize node 3632 if (alloc->is_Allocate()) { 3633 // Set the scalar_replaceable flag for allocation 3634 // so it could be eliminated if it has no uses. 3635 alloc->as_Allocate()->_is_scalar_replaceable = true; 3636 } 3637 continue; 3638 } 3639 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3640 // we could reach here for allocate case if one init is associated with many allocs. 3641 if (alloc->is_Allocate()) { 3642 alloc->as_Allocate()->_is_scalar_replaceable = false; 3643 } 3644 continue; 3645 } 3646 3647 // The inline code for Object.clone() casts the allocation result to 3648 // java.lang.Object and then to the actual type of the allocated 3649 // object. Detect this case and use the second cast. 3650 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3651 // the allocation result is cast to java.lang.Object and then 3652 // to the actual Array type. 3653 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3654 && (alloc->is_AllocateArray() || 3655 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3656 Node *cast2 = nullptr; 3657 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3658 Node *use = n->fast_out(i); 3659 if (use->is_CheckCastPP()) { 3660 cast2 = use; 3661 break; 3662 } 3663 } 3664 if (cast2 != nullptr) { 3665 n = cast2; 3666 } else { 3667 // Non-scalar replaceable if the allocation type is unknown statically 3668 // (reflection allocation), the object can't be restored during 3669 // deoptimization without precise type. 3670 continue; 3671 } 3672 } 3673 3674 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3675 if (t == nullptr) { 3676 continue; // not a TypeOopPtr 3677 } 3678 if (!t->klass_is_exact()) { 3679 continue; // not an unique type 3680 } 3681 if (alloc->is_Allocate()) { 3682 // Set the scalar_replaceable flag for allocation 3683 // so it could be eliminated. 3684 alloc->as_Allocate()->_is_scalar_replaceable = true; 3685 } 3686 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3687 // in order for an object to be scalar-replaceable, it must be: 3688 // - a direct allocation (not a call returning an object) 3689 // - non-escaping 3690 // - eligible to be a unique type 3691 // - not determined to be ineligible by escape analysis 3692 set_map(alloc, n); 3693 set_map(n, alloc); 3694 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3695 igvn->hash_delete(n); 3696 igvn->set_type(n, tinst); 3697 n->raise_bottom_type(tinst); 3698 igvn->hash_insert(n); 3699 record_for_optimizer(n); 3700 // Allocate an alias index for the header fields. Accesses to 3701 // the header emitted during macro expansion wouldn't have 3702 // correct memory state otherwise. 3703 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3704 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3705 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3706 3707 // First, put on the worklist all Field edges from Connection Graph 3708 // which is more accurate than putting immediate users from Ideal Graph. 3709 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3710 PointsToNode* tgt = e.get(); 3711 if (tgt->is_Arraycopy()) { 3712 continue; 3713 } 3714 Node* use = tgt->ideal_node(); 3715 assert(tgt->is_Field() && use->is_AddP(), 3716 "only AddP nodes are Field edges in CG"); 3717 if (use->outcnt() > 0) { // Don't process dead nodes 3718 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3719 if (addp2 != nullptr) { 3720 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3721 alloc_worklist.append_if_missing(addp2); 3722 } 3723 alloc_worklist.append_if_missing(use); 3724 } 3725 } 3726 3727 // An allocation may have an Initialize which has raw stores. Scan 3728 // the users of the raw allocation result and push AddP users 3729 // on alloc_worklist. 3730 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3731 assert (raw_result != nullptr, "must have an allocation result"); 3732 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3733 Node *use = raw_result->fast_out(i); 3734 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3735 Node* addp2 = find_second_addp(use, raw_result); 3736 if (addp2 != nullptr) { 3737 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3738 alloc_worklist.append_if_missing(addp2); 3739 } 3740 alloc_worklist.append_if_missing(use); 3741 } else if (use->is_MemBar()) { 3742 memnode_worklist.append_if_missing(use); 3743 } 3744 } 3745 } 3746 } else if (n->is_AddP()) { 3747 Node* addp_base = get_addp_base(n); 3748 if (addp_base != nullptr && reducible_merges.member(addp_base)) { 3749 // This AddP will go away when we reduce the the Phi 3750 continue; 3751 } 3752 JavaObjectNode* jobj = unique_java_object(addp_base); 3753 if (jobj == nullptr || jobj == phantom_obj) { 3754 #ifdef ASSERT 3755 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3756 ptnode_adr(n->_idx)->dump(); 3757 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3758 #endif 3759 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3760 return; 3761 } 3762 Node *base = get_map(jobj->idx()); // CheckCastPP node 3763 if (!split_AddP(n, base)) continue; // wrong type from dead path 3764 } else if (n->is_Phi() || 3765 n->is_CheckCastPP() || 3766 n->is_EncodeP() || 3767 n->is_DecodeN() || 3768 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3769 if (visited.test_set(n->_idx)) { 3770 assert(n->is_Phi(), "loops only through Phi's"); 3771 continue; // already processed 3772 } 3773 // Reducible Phi's will be removed from the graph after split_unique_types finishes 3774 if (reducible_merges.member(n)) { 3775 // Split loads through phi 3776 reduce_phi_on_field_access(n->as_Phi(), alloc_worklist); 3777 continue; 3778 } 3779 JavaObjectNode* jobj = unique_java_object(n); 3780 if (jobj == nullptr || jobj == phantom_obj) { 3781 #ifdef ASSERT 3782 ptnode_adr(n->_idx)->dump(); 3783 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 3784 #endif 3785 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3786 return; 3787 } else { 3788 Node *val = get_map(jobj->idx()); // CheckCastPP node 3789 TypeNode *tn = n->as_Type(); 3790 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3791 assert(tinst != nullptr && tinst->is_known_instance() && 3792 tinst->instance_id() == jobj->idx() , "instance type expected."); 3793 3794 const Type *tn_type = igvn->type(tn); 3795 const TypeOopPtr *tn_t; 3796 if (tn_type->isa_narrowoop()) { 3797 tn_t = tn_type->make_ptr()->isa_oopptr(); 3798 } else { 3799 tn_t = tn_type->isa_oopptr(); 3800 } 3801 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 3802 if (tn_type->isa_narrowoop()) { 3803 tn_type = tinst->make_narrowoop(); 3804 } else { 3805 tn_type = tinst; 3806 } 3807 igvn->hash_delete(tn); 3808 igvn->set_type(tn, tn_type); 3809 tn->set_type(tn_type); 3810 igvn->hash_insert(tn); 3811 record_for_optimizer(n); 3812 } else { 3813 assert(tn_type == TypePtr::NULL_PTR || 3814 tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t), 3815 "unexpected type"); 3816 continue; // Skip dead path with different type 3817 } 3818 } 3819 } else { 3820 debug_only(n->dump();) 3821 assert(false, "EA: unexpected node"); 3822 continue; 3823 } 3824 // push allocation's users on appropriate worklist 3825 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3826 Node *use = n->fast_out(i); 3827 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3828 // Load/store to instance's field 3829 memnode_worklist.append_if_missing(use); 3830 } else if (use->is_MemBar()) { 3831 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3832 memnode_worklist.append_if_missing(use); 3833 } 3834 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3835 Node* addp2 = find_second_addp(use, n); 3836 if (addp2 != nullptr) { 3837 alloc_worklist.append_if_missing(addp2); 3838 } 3839 alloc_worklist.append_if_missing(use); 3840 } else if (use->is_Phi() || 3841 use->is_CheckCastPP() || 3842 use->is_EncodeNarrowPtr() || 3843 use->is_DecodeNarrowPtr() || 3844 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3845 alloc_worklist.append_if_missing(use); 3846 #ifdef ASSERT 3847 } else if (use->is_Mem()) { 3848 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3849 } else if (use->is_MergeMem()) { 3850 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3851 } else if (use->is_SafePoint()) { 3852 // Look for MergeMem nodes for calls which reference unique allocation 3853 // (through CheckCastPP nodes) even for debug info. 3854 Node* m = use->in(TypeFunc::Memory); 3855 if (m->is_MergeMem()) { 3856 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3857 } 3858 } else if (use->Opcode() == Op_EncodeISOArray) { 3859 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3860 // EncodeISOArray overwrites destination array 3861 memnode_worklist.append_if_missing(use); 3862 } 3863 } else { 3864 uint op = use->Opcode(); 3865 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3866 (use->in(MemNode::Memory) == n)) { 3867 // They overwrite memory edge corresponding to destination array, 3868 memnode_worklist.append_if_missing(use); 3869 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3870 op == Op_CastP2X || op == Op_StoreCM || 3871 op == Op_FastLock || op == Op_AryEq || 3872 op == Op_StrComp || op == Op_CountPositives || 3873 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3874 op == Op_StrEquals || op == Op_VectorizedHashCode || 3875 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3876 op == Op_SubTypeCheck || 3877 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3878 n->dump(); 3879 use->dump(); 3880 assert(false, "EA: missing allocation reference path"); 3881 } 3882 #endif 3883 } 3884 } 3885 3886 } 3887 3888 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3889 // type, record it in the ArrayCopy node so we know what memory this 3890 // node uses/modified. 3891 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3892 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3893 Node* dest = ac->in(ArrayCopyNode::Dest); 3894 if (dest->is_AddP()) { 3895 dest = get_addp_base(dest); 3896 } 3897 JavaObjectNode* jobj = unique_java_object(dest); 3898 if (jobj != nullptr) { 3899 Node *base = get_map(jobj->idx()); 3900 if (base != nullptr) { 3901 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3902 ac->_dest_type = base_t; 3903 } 3904 } 3905 Node* src = ac->in(ArrayCopyNode::Src); 3906 if (src->is_AddP()) { 3907 src = get_addp_base(src); 3908 } 3909 jobj = unique_java_object(src); 3910 if (jobj != nullptr) { 3911 Node* base = get_map(jobj->idx()); 3912 if (base != nullptr) { 3913 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3914 ac->_src_type = base_t; 3915 } 3916 } 3917 } 3918 3919 // New alias types were created in split_AddP(). 3920 uint new_index_end = (uint) _compile->num_alias_types(); 3921 3922 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3923 // compute new values for Memory inputs (the Memory inputs are not 3924 // actually updated until phase 4.) 3925 if (memnode_worklist.length() == 0) 3926 return; // nothing to do 3927 while (memnode_worklist.length() != 0) { 3928 Node *n = memnode_worklist.pop(); 3929 if (visited.test_set(n->_idx)) { 3930 continue; 3931 } 3932 if (n->is_Phi() || n->is_ClearArray()) { 3933 // we don't need to do anything, but the users must be pushed 3934 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3935 // we don't need to do anything, but the users must be pushed 3936 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3937 if (n == nullptr) { 3938 continue; 3939 } 3940 } else if (n->Opcode() == Op_StrCompressedCopy || 3941 n->Opcode() == Op_EncodeISOArray) { 3942 // get the memory projection 3943 n = n->find_out_with(Op_SCMemProj); 3944 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 3945 } else { 3946 assert(n->is_Mem(), "memory node required."); 3947 Node *addr = n->in(MemNode::Address); 3948 const Type *addr_t = igvn->type(addr); 3949 if (addr_t == Type::TOP) { 3950 continue; 3951 } 3952 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 3953 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3954 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3955 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3956 if (_compile->failing()) { 3957 return; 3958 } 3959 if (mem != n->in(MemNode::Memory)) { 3960 // We delay the memory edge update since we need old one in 3961 // MergeMem code below when instances memory slices are separated. 3962 set_map(n, mem); 3963 } 3964 if (n->is_Load()) { 3965 continue; // don't push users 3966 } else if (n->is_LoadStore()) { 3967 // get the memory projection 3968 n = n->find_out_with(Op_SCMemProj); 3969 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 3970 } 3971 } 3972 // push user on appropriate worklist 3973 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3974 Node *use = n->fast_out(i); 3975 if (use->is_Phi() || use->is_ClearArray()) { 3976 memnode_worklist.append_if_missing(use); 3977 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3978 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 3979 continue; 3980 } 3981 memnode_worklist.append_if_missing(use); 3982 } else if (use->is_MemBar()) { 3983 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3984 memnode_worklist.append_if_missing(use); 3985 } 3986 #ifdef ASSERT 3987 } else if(use->is_Mem()) { 3988 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3989 } else if (use->is_MergeMem()) { 3990 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3991 } else if (use->Opcode() == Op_EncodeISOArray) { 3992 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3993 // EncodeISOArray overwrites destination array 3994 memnode_worklist.append_if_missing(use); 3995 } 3996 } else { 3997 uint op = use->Opcode(); 3998 if ((use->in(MemNode::Memory) == n) && 3999 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4000 // They overwrite memory edge corresponding to destination array, 4001 memnode_worklist.append_if_missing(use); 4002 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4003 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4004 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4005 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 4006 n->dump(); 4007 use->dump(); 4008 assert(false, "EA: missing memory path"); 4009 } 4010 #endif 4011 } 4012 } 4013 } 4014 4015 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4016 // Walk each memory slice moving the first node encountered of each 4017 // instance type to the input corresponding to its alias index. 4018 uint length = mergemem_worklist.length(); 4019 for( uint next = 0; next < length; ++next ) { 4020 MergeMemNode* nmm = mergemem_worklist.at(next); 4021 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4022 // Note: we don't want to use MergeMemStream here because we only want to 4023 // scan inputs which exist at the start, not ones we add during processing. 4024 // Note 2: MergeMem may already contains instance memory slices added 4025 // during find_inst_mem() call when memory nodes were processed above. 4026 igvn->hash_delete(nmm); 4027 uint nslices = MIN2(nmm->req(), new_index_start); 4028 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4029 Node* mem = nmm->in(i); 4030 Node* cur = nullptr; 4031 if (mem == nullptr || mem->is_top()) { 4032 continue; 4033 } 4034 // First, update mergemem by moving memory nodes to corresponding slices 4035 // if their type became more precise since this mergemem was created. 4036 while (mem->is_Mem()) { 4037 const Type *at = igvn->type(mem->in(MemNode::Address)); 4038 if (at != Type::TOP) { 4039 assert (at->isa_ptr() != nullptr, "pointer type required."); 4040 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4041 if (idx == i) { 4042 if (cur == nullptr) { 4043 cur = mem; 4044 } 4045 } else { 4046 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4047 nmm->set_memory_at(idx, mem); 4048 } 4049 } 4050 } 4051 mem = mem->in(MemNode::Memory); 4052 } 4053 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4054 // Find any instance of the current type if we haven't encountered 4055 // already a memory slice of the instance along the memory chain. 4056 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4057 if((uint)_compile->get_general_index(ni) == i) { 4058 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4059 if (nmm->is_empty_memory(m)) { 4060 Node* result = find_inst_mem(mem, ni, orig_phis); 4061 if (_compile->failing()) { 4062 return; 4063 } 4064 nmm->set_memory_at(ni, result); 4065 } 4066 } 4067 } 4068 } 4069 // Find the rest of instances values 4070 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4071 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4072 Node* result = step_through_mergemem(nmm, ni, tinst); 4073 if (result == nmm->base_memory()) { 4074 // Didn't find instance memory, search through general slice recursively. 4075 result = nmm->memory_at(_compile->get_general_index(ni)); 4076 result = find_inst_mem(result, ni, orig_phis); 4077 if (_compile->failing()) { 4078 return; 4079 } 4080 nmm->set_memory_at(ni, result); 4081 } 4082 } 4083 igvn->hash_insert(nmm); 4084 record_for_optimizer(nmm); 4085 } 4086 4087 // Phase 4: Update the inputs of non-instance memory Phis and 4088 // the Memory input of memnodes 4089 // First update the inputs of any non-instance Phi's from 4090 // which we split out an instance Phi. Note we don't have 4091 // to recursively process Phi's encountered on the input memory 4092 // chains as is done in split_memory_phi() since they will 4093 // also be processed here. 4094 for (int j = 0; j < orig_phis.length(); j++) { 4095 PhiNode *phi = orig_phis.at(j); 4096 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4097 igvn->hash_delete(phi); 4098 for (uint i = 1; i < phi->req(); i++) { 4099 Node *mem = phi->in(i); 4100 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4101 if (_compile->failing()) { 4102 return; 4103 } 4104 if (mem != new_mem) { 4105 phi->set_req(i, new_mem); 4106 } 4107 } 4108 igvn->hash_insert(phi); 4109 record_for_optimizer(phi); 4110 } 4111 4112 // Update the memory inputs of MemNodes with the value we computed 4113 // in Phase 2 and move stores memory users to corresponding memory slices. 4114 // Disable memory split verification code until the fix for 6984348. 4115 // Currently it produces false negative results since it does not cover all cases. 4116 #if 0 // ifdef ASSERT 4117 visited.Reset(); 4118 Node_Stack old_mems(arena, _compile->unique() >> 2); 4119 #endif 4120 for (uint i = 0; i < ideal_nodes.size(); i++) { 4121 Node* n = ideal_nodes.at(i); 4122 Node* nmem = get_map(n->_idx); 4123 assert(nmem != nullptr, "sanity"); 4124 if (n->is_Mem()) { 4125 #if 0 // ifdef ASSERT 4126 Node* old_mem = n->in(MemNode::Memory); 4127 if (!visited.test_set(old_mem->_idx)) { 4128 old_mems.push(old_mem, old_mem->outcnt()); 4129 } 4130 #endif 4131 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4132 if (!n->is_Load()) { 4133 // Move memory users of a store first. 4134 move_inst_mem(n, orig_phis); 4135 } 4136 // Now update memory input 4137 igvn->hash_delete(n); 4138 n->set_req(MemNode::Memory, nmem); 4139 igvn->hash_insert(n); 4140 record_for_optimizer(n); 4141 } else { 4142 assert(n->is_Allocate() || n->is_CheckCastPP() || 4143 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4144 } 4145 } 4146 #if 0 // ifdef ASSERT 4147 // Verify that memory was split correctly 4148 while (old_mems.is_nonempty()) { 4149 Node* old_mem = old_mems.node(); 4150 uint old_cnt = old_mems.index(); 4151 old_mems.pop(); 4152 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4153 } 4154 #endif 4155 } 4156 4157 #ifndef PRODUCT 4158 int ConnectionGraph::_no_escape_counter = 0; 4159 int ConnectionGraph::_arg_escape_counter = 0; 4160 int ConnectionGraph::_global_escape_counter = 0; 4161 4162 static const char *node_type_names[] = { 4163 "UnknownType", 4164 "JavaObject", 4165 "LocalVar", 4166 "Field", 4167 "Arraycopy" 4168 }; 4169 4170 static const char *esc_names[] = { 4171 "UnknownEscape", 4172 "NoEscape", 4173 "ArgEscape", 4174 "GlobalEscape" 4175 }; 4176 4177 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4178 NodeType nt = node_type(); 4179 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4180 if (print_state) { 4181 EscapeState es = escape_state(); 4182 EscapeState fields_es = fields_escape_state(); 4183 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4184 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4185 out->print("NSR "); 4186 } 4187 } 4188 } 4189 4190 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4191 dump_header(print_state, out); 4192 if (is_Field()) { 4193 FieldNode* f = (FieldNode*)this; 4194 if (f->is_oop()) { 4195 out->print("oop "); 4196 } 4197 if (f->offset() > 0) { 4198 out->print("+%d ", f->offset()); 4199 } 4200 out->print("("); 4201 for (BaseIterator i(f); i.has_next(); i.next()) { 4202 PointsToNode* b = i.get(); 4203 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4204 } 4205 out->print(" )"); 4206 } 4207 out->print("["); 4208 for (EdgeIterator i(this); i.has_next(); i.next()) { 4209 PointsToNode* e = i.get(); 4210 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4211 } 4212 out->print(" ["); 4213 for (UseIterator i(this); i.has_next(); i.next()) { 4214 PointsToNode* u = i.get(); 4215 bool is_base = false; 4216 if (PointsToNode::is_base_use(u)) { 4217 is_base = true; 4218 u = PointsToNode::get_use_node(u)->as_Field(); 4219 } 4220 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 4221 } 4222 out->print(" ]] "); 4223 if (_node == nullptr) { 4224 out->print("<null>%s", newline ? "\n" : ""); 4225 } else { 4226 _node->dump(newline ? "\n" : "", false, out); 4227 } 4228 } 4229 4230 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 4231 bool first = true; 4232 int ptnodes_length = ptnodes_worklist.length(); 4233 for (int i = 0; i < ptnodes_length; i++) { 4234 PointsToNode *ptn = ptnodes_worklist.at(i); 4235 if (ptn == nullptr || !ptn->is_JavaObject()) { 4236 continue; 4237 } 4238 PointsToNode::EscapeState es = ptn->escape_state(); 4239 if ((es != PointsToNode::NoEscape) && !Verbose) { 4240 continue; 4241 } 4242 Node* n = ptn->ideal_node(); 4243 if (n->is_Allocate() || (n->is_CallStaticJava() && 4244 n->as_CallStaticJava()->is_boxing_method())) { 4245 if (first) { 4246 tty->cr(); 4247 tty->print("======== Connection graph for "); 4248 _compile->method()->print_short_name(); 4249 tty->cr(); 4250 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 4251 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 4252 tty->cr(); 4253 first = false; 4254 } 4255 ptn->dump(); 4256 // Print all locals and fields which reference this allocation 4257 for (UseIterator j(ptn); j.has_next(); j.next()) { 4258 PointsToNode* use = j.get(); 4259 if (use->is_LocalVar()) { 4260 use->dump(Verbose); 4261 } else if (Verbose) { 4262 use->dump(); 4263 } 4264 } 4265 tty->cr(); 4266 } 4267 } 4268 } 4269 4270 void ConnectionGraph::print_statistics() { 4271 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 4272 } 4273 4274 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 4275 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 4276 return; 4277 } 4278 for (int next = 0; next < java_objects_worklist.length(); ++next) { 4279 JavaObjectNode* ptn = java_objects_worklist.at(next); 4280 if (ptn->ideal_node()->is_Allocate()) { 4281 if (ptn->escape_state() == PointsToNode::NoEscape) { 4282 Atomic::inc(&ConnectionGraph::_no_escape_counter); 4283 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 4284 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 4285 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 4286 Atomic::inc(&ConnectionGraph::_global_escape_counter); 4287 } else { 4288 assert(false, "Unexpected Escape State"); 4289 } 4290 } 4291 } 4292 } 4293 4294 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 4295 if (_compile->directive()->TraceEscapeAnalysisOption) { 4296 assert(ptn != nullptr, "should not be null"); 4297 assert(reason != nullptr, "should not be null"); 4298 ptn->dump_header(true); 4299 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 4300 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 4301 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 4302 } 4303 } 4304 4305 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 4306 if (_compile->directive()->TraceEscapeAnalysisOption) { 4307 stringStream ss; 4308 ss.print("propagated from: "); 4309 from->dump(true, &ss, false); 4310 return ss.as_string(); 4311 } else { 4312 return nullptr; 4313 } 4314 } 4315 4316 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 4317 if (_compile->directive()->TraceEscapeAnalysisOption) { 4318 stringStream ss; 4319 ss.print("escapes as arg to:"); 4320 call->dump("", false, &ss); 4321 return ss.as_string(); 4322 } else { 4323 return nullptr; 4324 } 4325 } 4326 4327 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 4328 if (_compile->directive()->TraceEscapeAnalysisOption) { 4329 stringStream ss; 4330 ss.print("is merged with other object: "); 4331 other->dump_header(true, &ss); 4332 return ss.as_string(); 4333 } else { 4334 return nullptr; 4335 } 4336 } 4337 4338 #endif 4339 4340 void ConnectionGraph::record_for_optimizer(Node *n) { 4341 _igvn->_worklist.push(n); 4342 _igvn->add_users_to_worklist(n); 4343 }