1 /* 2 * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/macro.hpp" 40 #include "opto/locknode.hpp" 41 #include "opto/phaseX.hpp" 42 #include "opto/movenode.hpp" 43 #include "opto/narrowptrnode.hpp" 44 #include "opto/castnode.hpp" 45 #include "opto/rootnode.hpp" 46 #include "utilities/macros.hpp" 47 48 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 49 // If ReduceAllocationMerges is enabled we might call split_through_phi during 50 // split_unique_types and that will create additional nodes that need to be 51 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 52 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 53 // the array will be reallocated. 54 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 55 _in_worklist(C->comp_arena()), 56 _next_pidx(0), 57 _collecting(true), 58 _verify(false), 59 _compile(C), 60 _igvn(igvn), 61 _invocation(invocation), 62 _build_iterations(0), 63 _build_time(0.), 64 _node_map(C->comp_arena()) { 65 // Add unknown java object. 66 add_java_object(C->top(), PointsToNode::GlobalEscape); 67 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 68 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 69 // Add ConP and ConN null oop nodes 70 Node* oop_null = igvn->zerocon(T_OBJECT); 71 assert(oop_null->_idx < nodes_size(), "should be created already"); 72 add_java_object(oop_null, PointsToNode::NoEscape); 73 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 74 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 75 if (UseCompressedOops) { 76 Node* noop_null = igvn->zerocon(T_NARROWOOP); 77 assert(noop_null->_idx < nodes_size(), "should be created already"); 78 map_ideal_node(noop_null, null_obj); 79 } 80 } 81 82 bool ConnectionGraph::has_candidates(Compile *C) { 83 // EA brings benefits only when the code has allocations and/or locks which 84 // are represented by ideal Macro nodes. 85 int cnt = C->macro_count(); 86 for (int i = 0; i < cnt; i++) { 87 Node *n = C->macro_node(i); 88 if (n->is_Allocate()) { 89 return true; 90 } 91 if (n->is_Lock()) { 92 Node* obj = n->as_Lock()->obj_node()->uncast(); 93 if (!(obj->is_Parm() || obj->is_Con())) { 94 return true; 95 } 96 } 97 if (n->is_CallStaticJava() && 98 n->as_CallStaticJava()->is_boxing_method()) { 99 return true; 100 } 101 } 102 return false; 103 } 104 105 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 106 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 107 ResourceMark rm; 108 109 // Add ConP and ConN null oop nodes before ConnectionGraph construction 110 // to create space for them in ConnectionGraph::_nodes[]. 111 Node* oop_null = igvn->zerocon(T_OBJECT); 112 Node* noop_null = igvn->zerocon(T_NARROWOOP); 113 int invocation = 0; 114 if (C->congraph() != nullptr) { 115 invocation = C->congraph()->_invocation + 1; 116 } 117 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 118 // Perform escape analysis 119 if (congraph->compute_escape()) { 120 // There are non escaping objects. 121 C->set_congraph(congraph); 122 } 123 // Cleanup. 124 if (oop_null->outcnt() == 0) { 125 igvn->hash_delete(oop_null); 126 } 127 if (noop_null->outcnt() == 0) { 128 igvn->hash_delete(noop_null); 129 } 130 } 131 132 bool ConnectionGraph::compute_escape() { 133 Compile* C = _compile; 134 PhaseGVN* igvn = _igvn; 135 136 // Worklists used by EA. 137 Unique_Node_List delayed_worklist; 138 Unique_Node_List reducible_merges; 139 GrowableArray<Node*> alloc_worklist; 140 GrowableArray<Node*> ptr_cmp_worklist; 141 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 142 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 143 GrowableArray<PointsToNode*> ptnodes_worklist; 144 GrowableArray<JavaObjectNode*> java_objects_worklist; 145 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 146 GrowableArray<FieldNode*> oop_fields_worklist; 147 GrowableArray<SafePointNode*> sfn_worklist; 148 GrowableArray<MergeMemNode*> mergemem_worklist; 149 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 150 151 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 152 153 // 1. Populate Connection Graph (CG) with PointsTo nodes. 154 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 155 // Initialize worklist 156 if (C->root() != nullptr) { 157 ideal_nodes.push(C->root()); 158 } 159 // Processed ideal nodes are unique on ideal_nodes list 160 // but several ideal nodes are mapped to the phantom_obj. 161 // To avoid duplicated entries on the following worklists 162 // add the phantom_obj only once to them. 163 ptnodes_worklist.append(phantom_obj); 164 java_objects_worklist.append(phantom_obj); 165 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 166 Node* n = ideal_nodes.at(next); 167 // Create PointsTo nodes and add them to Connection Graph. Called 168 // only once per ideal node since ideal_nodes is Unique_Node list. 169 add_node_to_connection_graph(n, &delayed_worklist); 170 PointsToNode* ptn = ptnode_adr(n->_idx); 171 if (ptn != nullptr && ptn != phantom_obj) { 172 ptnodes_worklist.append(ptn); 173 if (ptn->is_JavaObject()) { 174 java_objects_worklist.append(ptn->as_JavaObject()); 175 if ((n->is_Allocate() || n->is_CallStaticJava()) && 176 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 177 // Only allocations and java static calls results are interesting. 178 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 179 } 180 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 181 oop_fields_worklist.append(ptn->as_Field()); 182 } 183 } 184 // Collect some interesting nodes for further use. 185 switch (n->Opcode()) { 186 case Op_MergeMem: 187 // Collect all MergeMem nodes to add memory slices for 188 // scalar replaceable objects in split_unique_types(). 189 mergemem_worklist.append(n->as_MergeMem()); 190 break; 191 case Op_CmpP: 192 case Op_CmpN: 193 // Collect compare pointers nodes. 194 if (OptimizePtrCompare) { 195 ptr_cmp_worklist.append(n); 196 } 197 break; 198 case Op_MemBarStoreStore: 199 // Collect all MemBarStoreStore nodes so that depending on the 200 // escape status of the associated Allocate node some of them 201 // may be eliminated. 202 if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) { 203 storestore_worklist.append(n->as_MemBarStoreStore()); 204 } 205 break; 206 case Op_MemBarRelease: 207 if (n->req() > MemBarNode::Precedent) { 208 record_for_optimizer(n); 209 } 210 break; 211 #ifdef ASSERT 212 case Op_AddP: 213 // Collect address nodes for graph verification. 214 addp_worklist.append(n); 215 break; 216 #endif 217 case Op_ArrayCopy: 218 // Keep a list of ArrayCopy nodes so if one of its input is non 219 // escaping, we can record a unique type 220 arraycopy_worklist.append(n->as_ArrayCopy()); 221 break; 222 default: 223 // not interested now, ignore... 224 break; 225 } 226 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 227 Node* m = n->fast_out(i); // Get user 228 ideal_nodes.push(m); 229 } 230 if (n->is_SafePoint()) { 231 sfn_worklist.append(n->as_SafePoint()); 232 } 233 } 234 235 #ifndef PRODUCT 236 if (_compile->directive()->TraceEscapeAnalysisOption) { 237 tty->print("+++++ Initial worklist for "); 238 _compile->method()->print_name(); 239 tty->print_cr(" (ea_inv=%d)", _invocation); 240 for (int i = 0; i < ptnodes_worklist.length(); i++) { 241 PointsToNode* ptn = ptnodes_worklist.at(i); 242 ptn->dump(); 243 } 244 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 245 } 246 #endif 247 248 if (non_escaped_allocs_worklist.length() == 0) { 249 _collecting = false; 250 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 251 return false; // Nothing to do. 252 } 253 // Add final simple edges to graph. 254 while(delayed_worklist.size() > 0) { 255 Node* n = delayed_worklist.pop(); 256 add_final_edges(n); 257 } 258 259 #ifdef ASSERT 260 if (VerifyConnectionGraph) { 261 // Verify that no new simple edges could be created and all 262 // local vars has edges. 263 _verify = true; 264 int ptnodes_length = ptnodes_worklist.length(); 265 for (int next = 0; next < ptnodes_length; ++next) { 266 PointsToNode* ptn = ptnodes_worklist.at(next); 267 add_final_edges(ptn->ideal_node()); 268 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 269 ptn->dump(); 270 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 271 } 272 } 273 _verify = false; 274 } 275 #endif 276 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 277 // processing, calls to CI to resolve symbols (types, fields, methods) 278 // referenced in bytecode. During symbol resolution VM may throw 279 // an exception which CI cleans and converts to compilation failure. 280 if (C->failing()) { 281 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 282 return false; 283 } 284 285 // 2. Finish Graph construction by propagating references to all 286 // java objects through graph. 287 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 288 java_objects_worklist, oop_fields_worklist)) { 289 // All objects escaped or hit time or iterations limits. 290 _collecting = false; 291 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 292 return false; 293 } 294 295 // 3. Adjust scalar_replaceable state of nonescaping objects and push 296 // scalar replaceable allocations on alloc_worklist for processing 297 // in split_unique_types(). 298 GrowableArray<JavaObjectNode*> jobj_worklist; 299 int non_escaped_length = non_escaped_allocs_worklist.length(); 300 bool found_nsr_alloc = false; 301 for (int next = 0; next < non_escaped_length; next++) { 302 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 303 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 304 Node* n = ptn->ideal_node(); 305 if (n->is_Allocate()) { 306 n->as_Allocate()->_is_non_escaping = noescape; 307 } 308 if (noescape && ptn->scalar_replaceable()) { 309 adjust_scalar_replaceable_state(ptn, reducible_merges); 310 if (ptn->scalar_replaceable()) { 311 jobj_worklist.push(ptn); 312 } else { 313 found_nsr_alloc = true; 314 } 315 } 316 } 317 318 // Propagate NSR (Not Scalar Replaceable) state. 319 if (found_nsr_alloc) { 320 find_scalar_replaceable_allocs(jobj_worklist); 321 } 322 323 // alloc_worklist will be processed in reverse push order. 324 // Therefore the reducible Phis will be processed for last and that's what we 325 // want because by then the scalarizable inputs of the merge will already have 326 // an unique instance type. 327 for (uint i = 0; i < reducible_merges.size(); i++ ) { 328 Node* n = reducible_merges.at(i); 329 alloc_worklist.append(n); 330 } 331 332 for (int next = 0; next < jobj_worklist.length(); ++next) { 333 JavaObjectNode* jobj = jobj_worklist.at(next); 334 if (jobj->scalar_replaceable()) { 335 alloc_worklist.append(jobj->ideal_node()); 336 } 337 } 338 339 #ifdef ASSERT 340 if (VerifyConnectionGraph) { 341 // Verify that graph is complete - no new edges could be added or needed. 342 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 343 java_objects_worklist, addp_worklist); 344 } 345 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 346 assert(null_obj->escape_state() == PointsToNode::NoEscape && 347 null_obj->edge_count() == 0 && 348 !null_obj->arraycopy_src() && 349 !null_obj->arraycopy_dst(), "sanity"); 350 #endif 351 352 _collecting = false; 353 354 } // TracePhase t3("connectionGraph") 355 356 // 4. Optimize ideal graph based on EA information. 357 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 358 if (has_non_escaping_obj) { 359 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 360 } 361 362 #ifndef PRODUCT 363 if (PrintEscapeAnalysis) { 364 dump(ptnodes_worklist); // Dump ConnectionGraph 365 } 366 #endif 367 368 #ifdef ASSERT 369 if (VerifyConnectionGraph) { 370 int alloc_length = alloc_worklist.length(); 371 for (int next = 0; next < alloc_length; ++next) { 372 Node* n = alloc_worklist.at(next); 373 PointsToNode* ptn = ptnode_adr(n->_idx); 374 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 375 } 376 } 377 378 if (VerifyReduceAllocationMerges) { 379 for (uint i = 0; i < reducible_merges.size(); i++ ) { 380 Node* n = reducible_merges.at(i); 381 if (!can_reduce_phi(n->as_Phi())) { 382 TraceReduceAllocationMerges = true; 383 n->dump(2); 384 n->dump(-2); 385 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 386 } 387 } 388 } 389 #endif 390 391 // 5. Separate memory graph for scalar replaceable allcations. 392 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 393 if (has_scalar_replaceable_candidates && EliminateAllocations) { 394 assert(C->do_aliasing(), "Aliasing should be enabled"); 395 // Now use the escape information to create unique types for 396 // scalar replaceable objects. 397 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 398 if (C->failing()) { 399 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 400 return false; 401 } 402 C->print_method(PHASE_AFTER_EA, 2); 403 404 #ifdef ASSERT 405 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 406 tty->print("=== No allocations eliminated for "); 407 C->method()->print_short_name(); 408 if (!EliminateAllocations) { 409 tty->print(" since EliminateAllocations is off ==="); 410 } else if(!has_scalar_replaceable_candidates) { 411 tty->print(" since there are no scalar replaceable candidates ==="); 412 } 413 tty->cr(); 414 #endif 415 } 416 417 // 6. Reduce allocation merges used as debug information. This is done after 418 // split_unique_types because the methods used to create SafePointScalarObject 419 // need to traverse the memory graph to find values for object fields. We also 420 // set to null the scalarized inputs of reducible Phis so that the Allocate 421 // that they point can be later scalar replaced. 422 bool delay = _igvn->delay_transform(); 423 _igvn->set_delay_transform(true); 424 for (uint i = 0; i < reducible_merges.size(); i++) { 425 Node* n = reducible_merges.at(i); 426 if (n->outcnt() > 0) { 427 if (!reduce_phi_on_safepoints(n->as_Phi())) { 428 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 429 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 430 return false; 431 } 432 433 // Now we set the scalar replaceable inputs of ophi to null, which is 434 // the last piece that would prevent it from being scalar replaceable. 435 reset_scalar_replaceable_entries(n->as_Phi()); 436 } 437 } 438 _igvn->set_delay_transform(delay); 439 440 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 441 // java calls if they pass ArgEscape objects as parameters. 442 if (has_non_escaping_obj && 443 (C->env()->should_retain_local_variables() || 444 C->env()->jvmti_can_get_owned_monitor_info() || 445 C->env()->jvmti_can_walk_any_space() || 446 DeoptimizeObjectsALot)) { 447 int sfn_length = sfn_worklist.length(); 448 for (int next = 0; next < sfn_length; next++) { 449 SafePointNode* sfn = sfn_worklist.at(next); 450 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 451 if (sfn->is_CallJava()) { 452 CallJavaNode* call = sfn->as_CallJava(); 453 call->set_arg_escape(has_arg_escape(call)); 454 } 455 } 456 } 457 458 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 459 return has_non_escaping_obj; 460 } 461 462 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 463 // if at least one scalar replaceable allocation participates in the merge. 464 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 465 bool found_sr_allocate = false; 466 467 for (uint i = 1; i < ophi->req(); i++) { 468 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 469 if (ptn != nullptr && ptn->scalar_replaceable()) { 470 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 471 472 // Don't handle arrays. 473 if (alloc->Opcode() != Op_Allocate) { 474 assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation."); 475 continue; 476 } 477 478 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 479 found_sr_allocate = true; 480 } else { 481 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);) 482 ptn->set_scalar_replaceable(false); 483 } 484 } 485 } 486 487 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 488 return found_sr_allocate; 489 } 490 491 // We can reduce the Cmp if it's a comparison between the Phi and a constant. 492 // I require the 'other' input to be a constant so that I can move the Cmp 493 // around safely. 494 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const { 495 assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name()); 496 Node* left = cmp->in(1); 497 Node* right = cmp->in(2); 498 499 return (left == n || right == n) && 500 (left->is_Con() || right->is_Con()) && 501 cmp->outcnt() == 1; 502 } 503 504 // We are going to check if any of the SafePointScalarMerge entries 505 // in the SafePoint reference the Phi that we are checking. 506 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const { 507 JVMState *jvms = sfpt->jvms(); 508 509 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 510 Node* sfpt_in = sfpt->in(i); 511 if (sfpt_in->is_SafePointScalarMerge()) { 512 SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge(); 513 Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms)); 514 if (nsr_ptr == n) { 515 return true; 516 } 517 } 518 } 519 520 return false; 521 } 522 523 // Check if we are able to untangle the merge. The following patterns are 524 // supported: 525 // - Phi -> SafePoints 526 // - Phi -> CmpP/N 527 // - Phi -> AddP -> Load 528 // - Phi -> CastPP -> SafePoints 529 // - Phi -> CastPP -> AddP -> Load 530 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const { 531 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 532 Node* use = n->fast_out(i); 533 534 if (use->is_SafePoint()) { 535 if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) { 536 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);) 537 return false; 538 } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) { 539 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);) 540 return false; 541 } 542 } else if (use->is_AddP()) { 543 Node* addp = use; 544 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 545 Node* use_use = addp->fast_out(j); 546 const Type* load_type = _igvn->type(use_use); 547 548 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 549 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());) 550 return false; 551 } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) { 552 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());) 553 return false; 554 } 555 } 556 } else if (nesting > 0) { 557 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);) 558 return false; 559 } else if (use->is_CastPP()) { 560 const Type* cast_t = _igvn->type(use); 561 if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) { 562 #ifndef PRODUCT 563 if (TraceReduceAllocationMerges) { 564 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation); 565 use->dump(); 566 } 567 #endif 568 return false; 569 } 570 571 bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0); 572 if (!is_trivial_control) { 573 // If it's not a trivial control then we check if we can reduce the 574 // CmpP/N used by the If controlling the cast. 575 if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) { 576 Node* iff = use->in(0)->in(0); 577 // We may have Opaque4 node between If and Bool nodes. 578 // Bail out in such case - we need to preserve Opaque4 for correct 579 // processing predicates after loop opts. 580 bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp(); 581 if (can_reduce) { 582 Node* iff_cmp = iff->in(1)->in(1); 583 int opc = iff_cmp->Opcode(); 584 can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp); 585 } 586 if (!can_reduce) { 587 #ifndef PRODUCT 588 if (TraceReduceAllocationMerges) { 589 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx); 590 n->dump(5); 591 } 592 #endif 593 return false; 594 } 595 } 596 } 597 598 if (!can_reduce_check_users(use, nesting+1)) { 599 return false; 600 } 601 } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) { 602 if (!can_reduce_cmp(n, use)) { 603 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);) 604 return false; 605 } 606 } else { 607 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());) 608 return false; 609 } 610 } 611 612 return true; 613 } 614 615 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 616 // only used in some certain code shapes. Check comments in 617 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 618 // details. 619 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 620 // If there was an error attempting to reduce allocation merges for this 621 // method we might have disabled the compilation and be retrying with RAM 622 // disabled. 623 if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) { 624 return false; 625 } 626 627 const Type* phi_t = _igvn->type(ophi); 628 if (phi_t == nullptr || 629 phi_t->make_ptr() == nullptr || 630 phi_t->make_ptr()->isa_aryptr() != nullptr) { 631 return false; 632 } 633 634 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) { 635 return false; 636 } 637 638 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 639 return true; 640 } 641 642 // This method will return a CmpP/N that we need to use on the If controlling a 643 // CastPP after it was split. This method is only called on bases that are 644 // nullable therefore we always need a controlling if for the splitted CastPP. 645 // 646 // 'curr_ctrl' is the control of the CastPP that we want to split through phi. 647 // If the CastPP currently doesn't have a control then the CmpP/N will be 648 // against the NULL constant, otherwise it will be against the constant input of 649 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later 650 // case because we have constraints on it and because the CastPP has a control 651 // input. 652 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) { 653 const Type* t = base->bottom_type(); 654 Node* con = nullptr; 655 656 if (curr_ctrl == nullptr || curr_ctrl->is_Region()) { 657 con = _igvn->zerocon(t->basic_type()); 658 } else { 659 // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp 660 assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name()); 661 Node* bol = curr_ctrl->in(0)->in(1); 662 assert(bol->is_Bool(), "unexpected node %s", bol->Name()); 663 Node* curr_cmp = bol->in(1); 664 assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name()); 665 con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2); 666 } 667 668 return CmpNode::make(base, con, t->basic_type()); 669 } 670 671 // This method 'specializes' the CastPP passed as parameter to the base passed 672 // as parameter. Note that the existing CastPP input is a Phi. "Specialize" 673 // means that the CastPP now will be specific for a given base instead of a Phi. 674 // An If-Then-Else-Region block is inserted to control the CastPP. The control 675 // of the CastPP is a copy of the current one (if there is one) or a check 676 // against NULL. 677 // 678 // Before: 679 // 680 // C1 C2 ... Cn 681 // \ | / 682 // \ | / 683 // \ | / 684 // \ | / 685 // \ | / 686 // \ | / 687 // \|/ 688 // Region B1 B2 ... Bn 689 // | \ | / 690 // | \ | / 691 // | \ | / 692 // | \ | / 693 // | \ | / 694 // | \ | / 695 // ---------------> Phi 696 // | 697 // X | 698 // | | 699 // | | 700 // ------> CastPP 701 // 702 // After (only partial illustration; base = B2, current_control = C2): 703 // 704 // C2 705 // | 706 // If 707 // / \ 708 // / \ 709 // T F 710 // /\ / 711 // / \ / 712 // / \ / 713 // C1 CastPP Reg Cn 714 // | | | 715 // | | | 716 // | | | 717 // -------------- | ---------- 718 // | | | 719 // Region 720 // 721 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) { 722 Node* control_successor = current_control->unique_ctrl_out(); 723 Node* cmp = _igvn->transform(specialize_cmp(base, castpp->in(0))); 724 Node* bol = _igvn->transform(new BoolNode(cmp, BoolTest::ne)); 725 IfNode* if_ne = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If(); 726 Node* not_eq_control = _igvn->transform(new IfTrueNode(if_ne)); 727 Node* yes_eq_control = _igvn->transform(new IfFalseNode(if_ne)); 728 Node* end_region = _igvn->transform(new RegionNode(3)); 729 730 // Insert the new if-else-region block into the graph 731 end_region->set_req(1, not_eq_control); 732 end_region->set_req(2, yes_eq_control); 733 control_successor->replace_edge(current_control, end_region, _igvn); 734 735 _igvn->_worklist.push(current_control); 736 _igvn->_worklist.push(control_successor); 737 738 return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr)); 739 } 740 741 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) { 742 const Type* load_type = _igvn->type(curr_load); 743 Node* nsr_value = _igvn->zerocon(load_type->basic_type()); 744 Node* memory = curr_load->in(MemNode::Memory); 745 746 // The data_phi merging the loads needs to be nullable if 747 // we are loading pointers. 748 if (load_type->make_ptr() != nullptr) { 749 if (load_type->isa_narrowoop()) { 750 load_type = load_type->meet(TypeNarrowOop::NULL_PTR); 751 } else if (load_type->isa_ptr()) { 752 load_type = load_type->meet(TypePtr::NULL_PTR); 753 } else { 754 assert(false, "Unexpected load ptr type."); 755 } 756 } 757 758 Node* data_phi = PhiNode::make(region, nsr_value, load_type); 759 760 for (int i = 1; i < bases_for_loads->length(); i++) { 761 Node* base = bases_for_loads->at(i); 762 Node* cmp_region = nullptr; 763 if (base != nullptr) { 764 if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node 765 cmp_region = base->unique_ctrl_out_or_null(); 766 assert(cmp_region != nullptr, "There should be."); 767 base = base->find_out_with(Op_CastPP); 768 } 769 770 Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset))); 771 Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory; 772 Node* load = curr_load->clone(); 773 load->set_req(0, nullptr); 774 load->set_req(1, mem); 775 load->set_req(2, addr); 776 777 if (cmp_region != nullptr) { // see comment on previous if 778 Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type); 779 intermediate_phi->set_req(1, _igvn->transform(load)); 780 load = intermediate_phi; 781 } 782 783 data_phi->set_req(i, _igvn->transform(load)); 784 } else { 785 // Just use the default, which is already in phi 786 } 787 } 788 789 // Takes care of updating CG and split_unique_types worklists due 790 // to cloned AddP->Load. 791 updates_after_load_split(data_phi, curr_load, alloc_worklist); 792 793 return _igvn->transform(data_phi); 794 } 795 796 // This method only reduces CastPP fields loads; SafePoints are handled 797 // separately. The idea here is basically to clone the CastPP and place copies 798 // on each input of the Phi, including non-scalar replaceable inputs. 799 // Experimentation shows that the resulting IR graph is simpler that way than if 800 // we just split the cast through scalar-replaceable inputs. 801 // 802 // The reduction process requires that CastPP's control be one of: 803 // 1) no control, 804 // 2) the same region as Ophi, or 805 // 3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant. 806 // 807 // After splitting the CastPP we'll put it under an If-Then-Else-Region control 808 // flow. If the CastPP originally had an IfTrue/False control input then we'll 809 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll 810 // juse use a CmpP/N against the NULL constant. 811 // 812 // The If-Then-Else-Region isn't always needed. For instance, if input to 813 // splitted cast was not nullable (or if it was the NULL constant) then we don't 814 // need (shouldn't) use a CastPP at all. 815 // 816 // After the casts are splitted we'll split the AddP->Loads through the Phi and 817 // connect them to the just split CastPPs. 818 // 819 // Before (CastPP control is same as Phi): 820 // 821 // Region Allocate Null Call 822 // | \ | / 823 // | \ | / 824 // | \ | / 825 // | \ | / 826 // | \ | / 827 // | \ | / 828 // ------------------> Phi # Oop Phi 829 // | | 830 // | | 831 // | | 832 // | | 833 // ----------------> CastPP 834 // | 835 // AddP 836 // | 837 // Load 838 // 839 // After (Very much simplified): 840 // 841 // Call NULL 842 // \ / 843 // CmpP 844 // | 845 // Bool#NE 846 // | 847 // If 848 // / \ 849 // T F 850 // / \ / 851 // / R 852 // CastPP | 853 // | | 854 // AddP | 855 // | | 856 // Load | 857 // \ | 0 858 // Allocate \ | / 859 // \ \ | / 860 // AddP Phi 861 // \ / 862 // Load / 863 // \ 0 / 864 // \ | / 865 // \|/ 866 // Phi # "Field" Phi 867 // 868 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 869 Node* ophi = curr_castpp->in(1); 870 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 871 872 // Identify which base should be used for AddP->Load later when spliting the 873 // CastPP->Loads through ophi. Three kind of values may be stored in this 874 // array, depending on the nullability status of the corresponding input in 875 // ophi. 876 // 877 // - nullptr: Meaning that the base is actually the NULL constant and therefore 878 // we won't try to load from it. 879 // 880 // - CFG Node: Meaning that the base is a CastPP that was specialized for 881 // this input of Ophi. I.e., we added an If->Then->Else-Region 882 // that will 'activate' the CastPp only when the input is not Null. 883 // 884 // - Other Node: Meaning that the base is not nullable and therefore we'll try 885 // to load directly from it. 886 GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr); 887 888 for (uint i = 1; i < ophi->req(); i++) { 889 Node* base = ophi->in(i); 890 const Type* base_t = _igvn->type(base); 891 892 if (base_t->maybe_null()) { 893 if (base->is_Con()) { 894 // Nothing todo as bases_for_loads[i] is already nullptr 895 } else { 896 Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i)); 897 bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag 898 } 899 } else { 900 bases_for_loads.at_put(i, base); 901 } 902 } 903 904 // Now let's split the CastPP->Loads through the Phi 905 for (int i = curr_castpp->outcnt()-1; i >= 0;) { 906 Node* use = curr_castpp->raw_out(i); 907 if (use->is_AddP()) { 908 for (int j = use->outcnt()-1; j >= 0;) { 909 Node* use_use = use->raw_out(j); 910 assert(use_use->is_Load(), "Expected this to be a Load node."); 911 912 // We can't make an unconditional load from a nullable input. The 913 // 'split_castpp_load_through_phi` method will add an 914 // 'If-Then-Else-Region` around nullable bases and only load from them 915 // when the input is not null. 916 Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist); 917 _igvn->replace_node(use_use, phi); 918 919 --j; 920 j = MIN2(j, (int)use->outcnt()-1); 921 } 922 923 _igvn->remove_dead_node(use); 924 } 925 --i; 926 i = MIN2(i, (int)curr_castpp->outcnt()-1); 927 } 928 } 929 930 // This method split a given CmpP/N through the Phi used in one of its inputs. 931 // As a result we convert a comparison with a pointer to a comparison with an 932 // integer. 933 // The only requirement is that one of the inputs of the CmpP/N must be a Phi 934 // while the other must be a constant. 935 // The splitting process is basically just cloning the CmpP/N above the input 936 // Phi. However, some (most) of the cloned CmpP/Ns won't be requred because we 937 // can prove at compile time the result of the comparison. 938 // 939 // Before: 940 // 941 // in1 in2 ... inN 942 // \ | / 943 // \ | / 944 // \ | / 945 // \ | / 946 // \ | / 947 // \ | / 948 // Phi 949 // | Other 950 // | / 951 // | / 952 // | / 953 // CmpP/N 954 // 955 // After: 956 // 957 // in1 Other in2 Other inN Other 958 // | | | | | | 959 // \ | | | | | 960 // \ / | / | / 961 // CmpP/N CmpP/N CmpP/N 962 // Bool Bool Bool 963 // \ | / 964 // \ | / 965 // \ | / 966 // \ | / 967 // \ | / 968 // \ | / 969 // \ | / 970 // \ | / 971 // Phi 972 // | 973 // | Zero 974 // | / 975 // | / 976 // | / 977 // CmpI 978 // 979 // 980 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) { 981 Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1); 982 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 983 984 Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2); 985 Node* zero = _igvn->intcon(0); 986 BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test; 987 988 // This Phi will merge the result of the Cmps split through the Phi 989 Node* res_phi = _igvn->transform(PhiNode::make(ophi->in(0), zero, TypeInt::INT)); 990 991 for (uint i=1; i<ophi->req(); i++) { 992 Node* ophi_input = ophi->in(i); 993 Node* res_phi_input = nullptr; 994 995 const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other); 996 if (tcmp->singleton()) { 997 res_phi_input = _igvn->makecon(tcmp); 998 } else { 999 Node* ncmp = _igvn->transform(cmp->clone()); 1000 ncmp->set_req(1, ophi_input); 1001 ncmp->set_req(2, other); 1002 Node* bol = _igvn->transform(new BoolNode(ncmp, mask)); 1003 res_phi_input = bol->as_Bool()->as_int_value(_igvn); 1004 } 1005 1006 res_phi->set_req(i, res_phi_input); 1007 } 1008 1009 Node* new_cmp = _igvn->transform(new CmpINode(res_phi, zero)); 1010 _igvn->replace_node(cmp, new_cmp); 1011 } 1012 1013 // Push the newly created AddP on alloc_worklist and patch 1014 // the connection graph. Note that the changes in the CG below 1015 // won't affect the ES of objects since the new nodes have the 1016 // same status as the old ones. 1017 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) { 1018 assert(data_phi != nullptr, "Output of split_through_phi is null."); 1019 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 1020 assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi."); 1021 1022 if (data_phi == nullptr || !data_phi->is_Phi()) { 1023 // Make this a retry? 1024 return ; 1025 } 1026 1027 Node* previous_addp = previous_load->in(MemNode::Address); 1028 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1029 for (uint i = 1; i < data_phi->req(); i++) { 1030 Node* new_load = data_phi->in(i); 1031 1032 if (new_load->is_Phi()) { 1033 // new_load is currently the "intermediate_phi" from an specialized 1034 // CastPP. 1035 new_load = new_load->in(1); 1036 } 1037 1038 // "new_load" might actually be a constant, parameter, etc. 1039 if (new_load->is_Load()) { 1040 Node* new_addp = new_load->in(MemNode::Address); 1041 Node* base = get_addp_base(new_addp); 1042 1043 // The base might not be something that we can create an unique 1044 // type for. If that's the case we are done with that input. 1045 PointsToNode* jobj_ptn = unique_java_object(base); 1046 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 1047 continue; 1048 } 1049 1050 // Push to alloc_worklist since the base has an unique_type 1051 alloc_worklist.append_if_missing(new_addp); 1052 1053 // Now let's add the node to the connection graph 1054 _nodes.at_grow(new_addp->_idx, nullptr); 1055 add_field(new_addp, fn->escape_state(), fn->offset()); 1056 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 1057 1058 // If the load doesn't load an object then it won't be 1059 // part of the connection graph 1060 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 1061 if (curr_load_ptn != nullptr) { 1062 _nodes.at_grow(new_load->_idx, nullptr); 1063 add_local_var(new_load, curr_load_ptn->escape_state()); 1064 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 1065 } 1066 } 1067 } 1068 } 1069 1070 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) { 1071 // We'll pass this to 'split_through_phi' so that it'll do the split even 1072 // though the load doesn't have an unique instance type. 1073 bool ignore_missing_instance_id = true; 1074 1075 // All AddPs are present in the connection graph 1076 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1077 1078 // Iterate over AddP looking for a Load 1079 for (int k = previous_addp->outcnt()-1; k >= 0;) { 1080 Node* previous_load = previous_addp->raw_out(k); 1081 if (previous_load->is_Load()) { 1082 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 1083 1084 // Takes care of updating CG and split_unique_types worklists due to cloned 1085 // AddP->Load. 1086 updates_after_load_split(data_phi, previous_load, alloc_worklist); 1087 1088 _igvn->replace_node(previous_load, data_phi); 1089 } 1090 --k; 1091 k = MIN2(k, (int)previous_addp->outcnt()-1); 1092 } 1093 1094 // Remove the old AddP from the processing list because it's dead now 1095 assert(previous_addp->outcnt() == 0, "AddP should be dead now."); 1096 alloc_worklist.remove_if_existing(previous_addp); 1097 } 1098 1099 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the 1100 // selector is: 1101 // -> a '-1' constant, the i'th input of the original Phi is NSR. 1102 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and 1103 // the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects 1104 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const { 1105 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 1106 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 1107 uint number_of_sr_objects = 0; 1108 for (uint i = 1; i < ophi->req(); i++) { 1109 Node* base = ophi->in(i); 1110 JavaObjectNode* ptn = unique_java_object(base); 1111 1112 if (ptn != nullptr && ptn->scalar_replaceable()) { 1113 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 1114 selector->set_req(i, sr_obj_idx); 1115 number_of_sr_objects++; 1116 } 1117 } 1118 1119 return selector->as_Phi(); 1120 } 1121 1122 // Returns true if the AddP node 'n' has at least one base that is a reducible 1123 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is 1124 // checked instead. 1125 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) { 1126 PointsToNode* ptn = ptnode_adr(n->_idx); 1127 if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) { 1128 return false; 1129 } 1130 1131 for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) { 1132 Node* base = i.get()->ideal_node(); 1133 1134 if (reducible_merges.member(base)) { 1135 return true; 1136 } 1137 1138 if (base->is_CastPP() || base->is_CheckCastPP()) { 1139 base = base->in(1); 1140 if (reducible_merges.member(base)) { 1141 return true; 1142 } 1143 } 1144 } 1145 1146 return false; 1147 } 1148 1149 // This method will call its helper method to reduce SafePoint nodes that use 1150 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same 1151 // "version" of Phi use the same debug information (regarding the Phi). 1152 // Therefore, I collect all safepoints and patch them all at once. 1153 // 1154 // The safepoints using the Phi node have to be processed before safepoints of 1155 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the 1156 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the 1157 // safepoint. If we process CastPP's safepoints before Phi's safepoints the 1158 // algorithm that process Phi's safepoints will think that the added Phi 1159 // reference is a regular reference. 1160 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) { 1161 PhiNode* selector = create_selector(ophi); 1162 Unique_Node_List safepoints; 1163 Unique_Node_List casts; 1164 1165 // Just collect the users of the Phis for later processing 1166 // in the needed order. 1167 for (uint i = 0; i < ophi->outcnt(); i++) { 1168 Node* use = ophi->raw_out(i); 1169 if (use->is_SafePoint()) { 1170 safepoints.push(use); 1171 } else if (use->is_CastPP()) { 1172 casts.push(use); 1173 } else { 1174 assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left."); 1175 } 1176 } 1177 1178 // Need to process safepoints using the Phi first 1179 if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) { 1180 return false; 1181 } 1182 1183 // Now process CastPP->safepoints 1184 for (uint i = 0; i < casts.size(); i++) { 1185 Node* cast = casts.at(i); 1186 Unique_Node_List cast_sfpts; 1187 1188 for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) { 1189 Node* use_use = cast->fast_out(j); 1190 if (use_use->is_SafePoint()) { 1191 cast_sfpts.push(use_use); 1192 } else { 1193 assert(use_use->outcnt() == 0, "Only SafePoint users should be left."); 1194 } 1195 } 1196 1197 if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) { 1198 return false; 1199 } 1200 } 1201 1202 return true; 1203 } 1204 1205 // This method will create a SafePointScalarMERGEnode for each SafePoint in 1206 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a 1207 // SafePointScalarObjectNode for each scalar replaceable input. Each 1208 // SafePointScalarMergeNode may describe multiple scalar replaced objects - 1209 // check detailed description in SafePointScalarMergeNode class header. 1210 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) { 1211 PhaseMacroExpand mexp(*_igvn); 1212 Node* original_sfpt_parent = cast != nullptr ? cast : ophi; 1213 const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr(); 1214 1215 Node* nsr_merge_pointer = ophi; 1216 if (cast != nullptr) { 1217 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1218 nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr)); 1219 } 1220 1221 for (uint spi = 0; spi < safepoints.size(); spi++) { 1222 SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint(); 1223 JVMState *jvms = sfpt->jvms(); 1224 uint merge_idx = (sfpt->req() - jvms->scloff()); 1225 int debug_start = jvms->debug_start(); 1226 1227 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 1228 smerge->init_req(0, _compile->root()); 1229 _igvn->register_new_node_with_optimizer(smerge); 1230 1231 // The next two inputs are: 1232 // (1) A copy of the original pointer to NSR objects. 1233 // (2) A selector, used to decide if we need to rematerialize an object 1234 // or use the pointer to a NSR object. 1235 // See more details of these fields in the declaration of SafePointScalarMergeNode 1236 sfpt->add_req(nsr_merge_pointer); 1237 sfpt->add_req(selector); 1238 1239 for (uint i = 1; i < ophi->req(); i++) { 1240 Node* base = ophi->in(i); 1241 JavaObjectNode* ptn = unique_java_object(base); 1242 1243 // If the base is not scalar replaceable we don't need to register information about 1244 // it at this time. 1245 if (ptn == nullptr || !ptn->scalar_replaceable()) { 1246 continue; 1247 } 1248 1249 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 1250 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt); 1251 if (sobj == nullptr) { 1252 return false; 1253 } 1254 1255 // Now make a pass over the debug information replacing any references 1256 // to the allocated object with "sobj" 1257 Node* ccpp = alloc->result_cast(); 1258 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 1259 1260 // Register the scalarized object as a candidate for reallocation 1261 smerge->add_req(sobj); 1262 } 1263 1264 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge" 1265 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn); 1266 1267 // The call to 'replace_edges_in_range' above might have removed the 1268 // reference to ophi that we need at _merge_pointer_idx. The line below make 1269 // sure the reference is maintained. 1270 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer); 1271 _igvn->_worklist.push(sfpt); 1272 } 1273 1274 return true; 1275 } 1276 1277 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 1278 bool delay = _igvn->delay_transform(); 1279 _igvn->set_delay_transform(true); 1280 _igvn->hash_delete(ophi); 1281 1282 // Copying all users first because some will be removed and others won't. 1283 // Ophi also may acquire some new users as part of Cast reduction. 1284 // CastPPs also need to be processed before CmpPs. 1285 Unique_Node_List castpps; 1286 Unique_Node_List others; 1287 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 1288 Node* use = ophi->fast_out(i); 1289 1290 if (use->is_CastPP()) { 1291 castpps.push(use); 1292 } else if (use->is_AddP() || use->is_Cmp()) { 1293 others.push(use); 1294 } else if (use->is_SafePoint()) { 1295 // processed later 1296 } else { 1297 assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt()); 1298 } 1299 } 1300 1301 // CastPPs need to be processed before Cmps because during the process of 1302 // splitting CastPPs we make reference to the inputs of the Cmp that is used 1303 // by the If controlling the CastPP. 1304 for (uint i = 0; i < castpps.size(); i++) { 1305 reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist); 1306 } 1307 1308 for (uint i = 0; i < others.size(); i++) { 1309 Node* use = others.at(i); 1310 1311 if (use->is_AddP()) { 1312 reduce_phi_on_field_access(use, alloc_worklist); 1313 } else if(use->is_Cmp()) { 1314 reduce_phi_on_cmp(use); 1315 } 1316 } 1317 1318 _igvn->set_delay_transform(delay); 1319 } 1320 1321 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) { 1322 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 1323 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 1324 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1325 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 1326 1327 for (uint i = 1; i < ophi->req(); i++) { 1328 Node* base = ophi->in(i); 1329 JavaObjectNode* ptn = unique_java_object(base); 1330 1331 if (ptn != nullptr && ptn->scalar_replaceable()) { 1332 new_phi->set_req(i, null_ptr); 1333 } else { 1334 new_phi->set_req(i, ophi->in(i)); 1335 } 1336 } 1337 1338 for (int i = ophi->outcnt()-1; i >= 0;) { 1339 Node* out = ophi->raw_out(i); 1340 1341 if (out->is_ConstraintCast()) { 1342 const Type* out_t = _igvn->type(out)->make_ptr(); 1343 const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR); 1344 bool change = out_new_t != out_t; 1345 1346 for (int j = out->outcnt()-1; change && j >= 0; --j) { 1347 Node* out2 = out->raw_out(j); 1348 if (!out2->is_SafePoint()) { 1349 change = false; 1350 break; 1351 } 1352 } 1353 1354 if (change) { 1355 Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr); 1356 _igvn->replace_node(out, new_cast); 1357 _igvn->register_new_node_with_optimizer(new_cast); 1358 } 1359 } 1360 1361 --i; 1362 i = MIN2(i, (int)ophi->outcnt()-1); 1363 } 1364 1365 _igvn->replace_node(ophi, new_phi); 1366 } 1367 1368 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 1369 if (!C->do_reduce_allocation_merges()) return; 1370 1371 Unique_Node_List ideal_nodes; 1372 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 1373 ideal_nodes.push(root); 1374 1375 for (uint next = 0; next < ideal_nodes.size(); ++next) { 1376 Node* n = ideal_nodes.at(next); 1377 1378 if (n->is_SafePointScalarMerge()) { 1379 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 1380 1381 // Validate inputs of merge 1382 for (uint i = 1; i < merge->req(); i++) { 1383 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 1384 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 1385 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1386 } 1387 } 1388 1389 // Validate users of merge 1390 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 1391 Node* sfpt = merge->fast_out(i); 1392 if (sfpt->is_SafePoint()) { 1393 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 1394 1395 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 1396 assert(false, "SafePointScalarMerge nodes can't be nested."); 1397 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1398 } 1399 } else { 1400 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 1401 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1402 } 1403 } 1404 } 1405 1406 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1407 Node* m = n->fast_out(i); 1408 ideal_nodes.push(m); 1409 } 1410 } 1411 } 1412 1413 // Returns true if there is an object in the scope of sfn that does not escape globally. 1414 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 1415 Compile* C = _compile; 1416 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1417 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 1418 DeoptimizeObjectsALot) { 1419 // Jvmti agents can access locals. Must provide info about local objects at runtime. 1420 int num_locs = jvms->loc_size(); 1421 for (int idx = 0; idx < num_locs; idx++) { 1422 Node* l = sfn->local(jvms, idx); 1423 if (not_global_escape(l)) { 1424 return true; 1425 } 1426 } 1427 } 1428 if (C->env()->jvmti_can_get_owned_monitor_info() || 1429 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 1430 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 1431 int num_mon = jvms->nof_monitors(); 1432 for (int idx = 0; idx < num_mon; idx++) { 1433 Node* m = sfn->monitor_obj(jvms, idx); 1434 if (m != nullptr && not_global_escape(m)) { 1435 return true; 1436 } 1437 } 1438 } 1439 } 1440 return false; 1441 } 1442 1443 // Returns true if at least one of the arguments to the call is an object 1444 // that does not escape globally. 1445 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 1446 if (call->method() != nullptr) { 1447 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 1448 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 1449 Node* p = call->in(idx); 1450 if (not_global_escape(p)) { 1451 return true; 1452 } 1453 } 1454 } else { 1455 const char* name = call->as_CallStaticJava()->_name; 1456 assert(name != nullptr, "no name"); 1457 // no arg escapes through uncommon traps 1458 if (strcmp(name, "uncommon_trap") != 0) { 1459 // process_call_arguments() assumes that all arguments escape globally 1460 const TypeTuple* d = call->tf()->domain(); 1461 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1462 const Type* at = d->field_at(i); 1463 if (at->isa_oopptr() != nullptr) { 1464 return true; 1465 } 1466 } 1467 } 1468 } 1469 return false; 1470 } 1471 1472 1473 1474 // Utility function for nodes that load an object 1475 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1476 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1477 // ThreadLocal has RawPtr type. 1478 const Type* t = _igvn->type(n); 1479 if (t->make_ptr() != nullptr) { 1480 Node* adr = n->in(MemNode::Address); 1481 #ifdef ASSERT 1482 if (!adr->is_AddP()) { 1483 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 1484 } else { 1485 assert((ptnode_adr(adr->_idx) == nullptr || 1486 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 1487 } 1488 #endif 1489 add_local_var_and_edge(n, PointsToNode::NoEscape, 1490 adr, delayed_worklist); 1491 } 1492 } 1493 1494 // Populate Connection Graph with PointsTo nodes and create simple 1495 // connection graph edges. 1496 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1497 assert(!_verify, "this method should not be called for verification"); 1498 PhaseGVN* igvn = _igvn; 1499 uint n_idx = n->_idx; 1500 PointsToNode* n_ptn = ptnode_adr(n_idx); 1501 if (n_ptn != nullptr) { 1502 return; // No need to redefine PointsTo node during first iteration. 1503 } 1504 int opcode = n->Opcode(); 1505 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 1506 if (gc_handled) { 1507 return; // Ignore node if already handled by GC. 1508 } 1509 1510 if (n->is_Call()) { 1511 // Arguments to allocation and locking don't escape. 1512 if (n->is_AbstractLock()) { 1513 // Put Lock and Unlock nodes on IGVN worklist to process them during 1514 // first IGVN optimization when escape information is still available. 1515 record_for_optimizer(n); 1516 } else if (n->is_Allocate()) { 1517 add_call_node(n->as_Call()); 1518 record_for_optimizer(n); 1519 } else { 1520 if (n->is_CallStaticJava()) { 1521 const char* name = n->as_CallStaticJava()->_name; 1522 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 1523 return; // Skip uncommon traps 1524 } 1525 } 1526 // Don't mark as processed since call's arguments have to be processed. 1527 delayed_worklist->push(n); 1528 // Check if a call returns an object. 1529 if ((n->as_Call()->returns_pointer() && 1530 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 1531 (n->is_CallStaticJava() && 1532 n->as_CallStaticJava()->is_boxing_method())) { 1533 add_call_node(n->as_Call()); 1534 } 1535 } 1536 return; 1537 } 1538 // Put this check here to process call arguments since some call nodes 1539 // point to phantom_obj. 1540 if (n_ptn == phantom_obj || n_ptn == null_obj) { 1541 return; // Skip predefined nodes. 1542 } 1543 switch (opcode) { 1544 case Op_AddP: { 1545 Node* base = get_addp_base(n); 1546 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1547 // Field nodes are created for all field types. They are used in 1548 // adjust_scalar_replaceable_state() and split_unique_types(). 1549 // Note, non-oop fields will have only base edges in Connection 1550 // Graph because such fields are not used for oop loads and stores. 1551 int offset = address_offset(n, igvn); 1552 add_field(n, PointsToNode::NoEscape, offset); 1553 if (ptn_base == nullptr) { 1554 delayed_worklist->push(n); // Process it later. 1555 } else { 1556 n_ptn = ptnode_adr(n_idx); 1557 add_base(n_ptn->as_Field(), ptn_base); 1558 } 1559 break; 1560 } 1561 case Op_CastX2P: { 1562 map_ideal_node(n, phantom_obj); 1563 break; 1564 } 1565 case Op_CastPP: 1566 case Op_CheckCastPP: 1567 case Op_EncodeP: 1568 case Op_DecodeN: 1569 case Op_EncodePKlass: 1570 case Op_DecodeNKlass: { 1571 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1572 break; 1573 } 1574 case Op_CMoveP: { 1575 add_local_var(n, PointsToNode::NoEscape); 1576 // Do not add edges during first iteration because some could be 1577 // not defined yet. 1578 delayed_worklist->push(n); 1579 break; 1580 } 1581 case Op_ConP: 1582 case Op_ConN: 1583 case Op_ConNKlass: { 1584 // assume all oop constants globally escape except for null 1585 PointsToNode::EscapeState es; 1586 const Type* t = igvn->type(n); 1587 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 1588 es = PointsToNode::NoEscape; 1589 } else { 1590 es = PointsToNode::GlobalEscape; 1591 } 1592 PointsToNode* ptn_con = add_java_object(n, es); 1593 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 1594 break; 1595 } 1596 case Op_CreateEx: { 1597 // assume that all exception objects globally escape 1598 map_ideal_node(n, phantom_obj); 1599 break; 1600 } 1601 case Op_LoadKlass: 1602 case Op_LoadNKlass: { 1603 // Unknown class is loaded 1604 map_ideal_node(n, phantom_obj); 1605 break; 1606 } 1607 case Op_LoadP: 1608 case Op_LoadN: { 1609 add_objload_to_connection_graph(n, delayed_worklist); 1610 break; 1611 } 1612 case Op_Parm: { 1613 map_ideal_node(n, phantom_obj); 1614 break; 1615 } 1616 case Op_PartialSubtypeCheck: { 1617 // Produces Null or notNull and is used in only in CmpP so 1618 // phantom_obj could be used. 1619 map_ideal_node(n, phantom_obj); // Result is unknown 1620 break; 1621 } 1622 case Op_Phi: { 1623 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1624 // ThreadLocal has RawPtr type. 1625 const Type* t = n->as_Phi()->type(); 1626 if (t->make_ptr() != nullptr) { 1627 add_local_var(n, PointsToNode::NoEscape); 1628 // Do not add edges during first iteration because some could be 1629 // not defined yet. 1630 delayed_worklist->push(n); 1631 } 1632 break; 1633 } 1634 case Op_Proj: { 1635 // we are only interested in the oop result projection from a call 1636 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1637 n->in(0)->as_Call()->returns_pointer()) { 1638 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1639 } 1640 break; 1641 } 1642 case Op_Rethrow: // Exception object escapes 1643 case Op_Return: { 1644 if (n->req() > TypeFunc::Parms && 1645 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1646 // Treat Return value as LocalVar with GlobalEscape escape state. 1647 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1648 } 1649 break; 1650 } 1651 case Op_CompareAndExchangeP: 1652 case Op_CompareAndExchangeN: 1653 case Op_GetAndSetP: 1654 case Op_GetAndSetN: { 1655 add_objload_to_connection_graph(n, delayed_worklist); 1656 // fall-through 1657 } 1658 case Op_StoreP: 1659 case Op_StoreN: 1660 case Op_StoreNKlass: 1661 case Op_WeakCompareAndSwapP: 1662 case Op_WeakCompareAndSwapN: 1663 case Op_CompareAndSwapP: 1664 case Op_CompareAndSwapN: { 1665 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1666 break; 1667 } 1668 case Op_AryEq: 1669 case Op_CountPositives: 1670 case Op_StrComp: 1671 case Op_StrEquals: 1672 case Op_StrIndexOf: 1673 case Op_StrIndexOfChar: 1674 case Op_StrInflatedCopy: 1675 case Op_StrCompressedCopy: 1676 case Op_VectorizedHashCode: 1677 case Op_EncodeISOArray: { 1678 add_local_var(n, PointsToNode::ArgEscape); 1679 delayed_worklist->push(n); // Process it later. 1680 break; 1681 } 1682 case Op_ThreadLocal: { 1683 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1684 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1685 break; 1686 } 1687 case Op_Blackhole: { 1688 // All blackhole pointer arguments are globally escaping. 1689 // Only do this if there is at least one pointer argument. 1690 // Do not add edges during first iteration because some could be 1691 // not defined yet, defer to final step. 1692 for (uint i = 0; i < n->req(); i++) { 1693 Node* in = n->in(i); 1694 if (in != nullptr) { 1695 const Type* at = _igvn->type(in); 1696 if (!at->isa_ptr()) continue; 1697 1698 add_local_var(n, PointsToNode::GlobalEscape); 1699 delayed_worklist->push(n); 1700 break; 1701 } 1702 } 1703 break; 1704 } 1705 default: 1706 ; // Do nothing for nodes not related to EA. 1707 } 1708 return; 1709 } 1710 1711 // Add final simple edges to graph. 1712 void ConnectionGraph::add_final_edges(Node *n) { 1713 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1714 #ifdef ASSERT 1715 if (_verify && n_ptn->is_JavaObject()) 1716 return; // This method does not change graph for JavaObject. 1717 #endif 1718 1719 if (n->is_Call()) { 1720 process_call_arguments(n->as_Call()); 1721 return; 1722 } 1723 assert(n->is_Store() || n->is_LoadStore() || 1724 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)), 1725 "node should be registered already"); 1726 int opcode = n->Opcode(); 1727 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1728 if (gc_handled) { 1729 return; // Ignore node if already handled by GC. 1730 } 1731 switch (opcode) { 1732 case Op_AddP: { 1733 Node* base = get_addp_base(n); 1734 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1735 assert(ptn_base != nullptr, "field's base should be registered"); 1736 add_base(n_ptn->as_Field(), ptn_base); 1737 break; 1738 } 1739 case Op_CastPP: 1740 case Op_CheckCastPP: 1741 case Op_EncodeP: 1742 case Op_DecodeN: 1743 case Op_EncodePKlass: 1744 case Op_DecodeNKlass: { 1745 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1746 break; 1747 } 1748 case Op_CMoveP: { 1749 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1750 Node* in = n->in(i); 1751 if (in == nullptr) { 1752 continue; // ignore null 1753 } 1754 Node* uncast_in = in->uncast(); 1755 if (uncast_in->is_top() || uncast_in == n) { 1756 continue; // ignore top or inputs which go back this node 1757 } 1758 PointsToNode* ptn = ptnode_adr(in->_idx); 1759 assert(ptn != nullptr, "node should be registered"); 1760 add_edge(n_ptn, ptn); 1761 } 1762 break; 1763 } 1764 case Op_LoadP: 1765 case Op_LoadN: { 1766 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1767 // ThreadLocal has RawPtr type. 1768 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1769 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1770 break; 1771 } 1772 case Op_Phi: { 1773 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1774 // ThreadLocal has RawPtr type. 1775 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1776 for (uint i = 1; i < n->req(); i++) { 1777 Node* in = n->in(i); 1778 if (in == nullptr) { 1779 continue; // ignore null 1780 } 1781 Node* uncast_in = in->uncast(); 1782 if (uncast_in->is_top() || uncast_in == n) { 1783 continue; // ignore top or inputs which go back this node 1784 } 1785 PointsToNode* ptn = ptnode_adr(in->_idx); 1786 assert(ptn != nullptr, "node should be registered"); 1787 add_edge(n_ptn, ptn); 1788 } 1789 break; 1790 } 1791 case Op_Proj: { 1792 // we are only interested in the oop result projection from a call 1793 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1794 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); 1795 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1796 break; 1797 } 1798 case Op_Rethrow: // Exception object escapes 1799 case Op_Return: { 1800 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1801 "Unexpected node type"); 1802 // Treat Return value as LocalVar with GlobalEscape escape state. 1803 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1804 break; 1805 } 1806 case Op_CompareAndExchangeP: 1807 case Op_CompareAndExchangeN: 1808 case Op_GetAndSetP: 1809 case Op_GetAndSetN:{ 1810 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1811 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1812 // fall-through 1813 } 1814 case Op_CompareAndSwapP: 1815 case Op_CompareAndSwapN: 1816 case Op_WeakCompareAndSwapP: 1817 case Op_WeakCompareAndSwapN: 1818 case Op_StoreP: 1819 case Op_StoreN: 1820 case Op_StoreNKlass:{ 1821 add_final_edges_unsafe_access(n, opcode); 1822 break; 1823 } 1824 case Op_VectorizedHashCode: 1825 case Op_AryEq: 1826 case Op_CountPositives: 1827 case Op_StrComp: 1828 case Op_StrEquals: 1829 case Op_StrIndexOf: 1830 case Op_StrIndexOfChar: 1831 case Op_StrInflatedCopy: 1832 case Op_StrCompressedCopy: 1833 case Op_EncodeISOArray: { 1834 // char[]/byte[] arrays passed to string intrinsic do not escape but 1835 // they are not scalar replaceable. Adjust escape state for them. 1836 // Start from in(2) edge since in(1) is memory edge. 1837 for (uint i = 2; i < n->req(); i++) { 1838 Node* adr = n->in(i); 1839 const Type* at = _igvn->type(adr); 1840 if (!adr->is_top() && at->isa_ptr()) { 1841 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1842 at->isa_ptr() != nullptr, "expecting a pointer"); 1843 if (adr->is_AddP()) { 1844 adr = get_addp_base(adr); 1845 } 1846 PointsToNode* ptn = ptnode_adr(adr->_idx); 1847 assert(ptn != nullptr, "node should be registered"); 1848 add_edge(n_ptn, ptn); 1849 } 1850 } 1851 break; 1852 } 1853 case Op_Blackhole: { 1854 // All blackhole pointer arguments are globally escaping. 1855 for (uint i = 0; i < n->req(); i++) { 1856 Node* in = n->in(i); 1857 if (in != nullptr) { 1858 const Type* at = _igvn->type(in); 1859 if (!at->isa_ptr()) continue; 1860 1861 if (in->is_AddP()) { 1862 in = get_addp_base(in); 1863 } 1864 1865 PointsToNode* ptn = ptnode_adr(in->_idx); 1866 assert(ptn != nullptr, "should be defined already"); 1867 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1868 add_edge(n_ptn, ptn); 1869 } 1870 } 1871 break; 1872 } 1873 default: { 1874 // This method should be called only for EA specific nodes which may 1875 // miss some edges when they were created. 1876 #ifdef ASSERT 1877 n->dump(1); 1878 #endif 1879 guarantee(false, "unknown node"); 1880 } 1881 } 1882 return; 1883 } 1884 1885 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1886 Node* adr = n->in(MemNode::Address); 1887 const Type* adr_type = _igvn->type(adr); 1888 adr_type = adr_type->make_ptr(); 1889 if (adr_type == nullptr) { 1890 return; // skip dead nodes 1891 } 1892 if (adr_type->isa_oopptr() 1893 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1894 && adr_type == TypeRawPtr::NOTNULL 1895 && is_captured_store_address(adr))) { 1896 delayed_worklist->push(n); // Process it later. 1897 #ifdef ASSERT 1898 assert (adr->is_AddP(), "expecting an AddP"); 1899 if (adr_type == TypeRawPtr::NOTNULL) { 1900 // Verify a raw address for a store captured by Initialize node. 1901 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1902 assert(offs != Type::OffsetBot, "offset must be a constant"); 1903 } 1904 #endif 1905 } else { 1906 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1907 if (adr->is_BoxLock()) { 1908 return; 1909 } 1910 // Stored value escapes in unsafe access. 1911 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1912 delayed_worklist->push(n); // Process unsafe access later. 1913 return; 1914 } 1915 #ifdef ASSERT 1916 n->dump(1); 1917 assert(false, "not unsafe"); 1918 #endif 1919 } 1920 } 1921 1922 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1923 Node* adr = n->in(MemNode::Address); 1924 const Type *adr_type = _igvn->type(adr); 1925 adr_type = adr_type->make_ptr(); 1926 #ifdef ASSERT 1927 if (adr_type == nullptr) { 1928 n->dump(1); 1929 assert(adr_type != nullptr, "dead node should not be on list"); 1930 return true; 1931 } 1932 #endif 1933 1934 if (adr_type->isa_oopptr() 1935 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1936 && adr_type == TypeRawPtr::NOTNULL 1937 && is_captured_store_address(adr))) { 1938 // Point Address to Value 1939 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1940 assert(adr_ptn != nullptr && 1941 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1942 Node* val = n->in(MemNode::ValueIn); 1943 PointsToNode* ptn = ptnode_adr(val->_idx); 1944 assert(ptn != nullptr, "node should be registered"); 1945 add_edge(adr_ptn, ptn); 1946 return true; 1947 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1948 // Stored value escapes in unsafe access. 1949 Node* val = n->in(MemNode::ValueIn); 1950 PointsToNode* ptn = ptnode_adr(val->_idx); 1951 assert(ptn != nullptr, "node should be registered"); 1952 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1953 // Add edge to object for unsafe access with offset. 1954 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1955 assert(adr_ptn != nullptr, "node should be registered"); 1956 if (adr_ptn->is_Field()) { 1957 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1958 add_edge(adr_ptn, ptn); 1959 } 1960 return true; 1961 } 1962 #ifdef ASSERT 1963 n->dump(1); 1964 assert(false, "not unsafe"); 1965 #endif 1966 return false; 1967 } 1968 1969 void ConnectionGraph::add_call_node(CallNode* call) { 1970 assert(call->returns_pointer(), "only for call which returns pointer"); 1971 uint call_idx = call->_idx; 1972 if (call->is_Allocate()) { 1973 Node* k = call->in(AllocateNode::KlassNode); 1974 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1975 assert(kt != nullptr, "TypeKlassPtr required."); 1976 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1977 bool scalar_replaceable = true; 1978 NOT_PRODUCT(const char* nsr_reason = ""); 1979 if (call->is_AllocateArray()) { 1980 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1981 es = PointsToNode::GlobalEscape; 1982 } else { 1983 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1984 if (length < 0) { 1985 // Not scalar replaceable if the length is not constant. 1986 scalar_replaceable = false; 1987 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1988 } else if (length > EliminateAllocationArraySizeLimit) { 1989 // Not scalar replaceable if the length is too big. 1990 scalar_replaceable = false; 1991 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1992 } 1993 } 1994 } else { // Allocate instance 1995 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1996 es = PointsToNode::GlobalEscape; 1997 } else { 1998 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1999 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 2000 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 2001 ik->is_subclass_of(_compile->env()->Reference_klass()) || 2002 !ik->can_be_instantiated() || 2003 ik->has_finalizer()) { 2004 es = PointsToNode::GlobalEscape; 2005 } else { 2006 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 2007 if (nfields > EliminateAllocationFieldsLimit) { 2008 // Not scalar replaceable if there are too many fields. 2009 scalar_replaceable = false; 2010 NOT_PRODUCT(nsr_reason = "has too many fields"); 2011 } 2012 } 2013 } 2014 } 2015 add_java_object(call, es); 2016 PointsToNode* ptn = ptnode_adr(call_idx); 2017 if (!scalar_replaceable && ptn->scalar_replaceable()) { 2018 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 2019 } 2020 } else if (call->is_CallStaticJava()) { 2021 // Call nodes could be different types: 2022 // 2023 // 1. CallDynamicJavaNode (what happened during call is unknown): 2024 // 2025 // - mapped to GlobalEscape JavaObject node if oop is returned; 2026 // 2027 // - all oop arguments are escaping globally; 2028 // 2029 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 2030 // 2031 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 2032 // 2033 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 2034 // - mapped to NoEscape JavaObject node if non-escaping object allocated 2035 // during call is returned; 2036 // - mapped to ArgEscape LocalVar node pointed to object arguments 2037 // which are returned and does not escape during call; 2038 // 2039 // - oop arguments escaping status is defined by bytecode analysis; 2040 // 2041 // For a static call, we know exactly what method is being called. 2042 // Use bytecode estimator to record whether the call's return value escapes. 2043 ciMethod* meth = call->as_CallJava()->method(); 2044 if (meth == nullptr) { 2045 const char* name = call->as_CallStaticJava()->_name; 2046 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check"); 2047 // Returns a newly allocated non-escaped object. 2048 add_java_object(call, PointsToNode::NoEscape); 2049 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 2050 } else if (meth->is_boxing_method()) { 2051 // Returns boxing object 2052 PointsToNode::EscapeState es; 2053 vmIntrinsics::ID intr = meth->intrinsic_id(); 2054 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 2055 // It does not escape if object is always allocated. 2056 es = PointsToNode::NoEscape; 2057 } else { 2058 // It escapes globally if object could be loaded from cache. 2059 es = PointsToNode::GlobalEscape; 2060 } 2061 add_java_object(call, es); 2062 if (es == PointsToNode::GlobalEscape) { 2063 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 2064 } 2065 } else { 2066 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 2067 call_analyzer->copy_dependencies(_compile->dependencies()); 2068 if (call_analyzer->is_return_allocated()) { 2069 // Returns a newly allocated non-escaped object, simply 2070 // update dependency information. 2071 // Mark it as NoEscape so that objects referenced by 2072 // it's fields will be marked as NoEscape at least. 2073 add_java_object(call, PointsToNode::NoEscape); 2074 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 2075 } else { 2076 // Determine whether any arguments are returned. 2077 const TypeTuple* d = call->tf()->domain(); 2078 bool ret_arg = false; 2079 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2080 if (d->field_at(i)->isa_ptr() != nullptr && 2081 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 2082 ret_arg = true; 2083 break; 2084 } 2085 } 2086 if (ret_arg) { 2087 add_local_var(call, PointsToNode::ArgEscape); 2088 } else { 2089 // Returns unknown object. 2090 map_ideal_node(call, phantom_obj); 2091 } 2092 } 2093 } 2094 } else { 2095 // An other type of call, assume the worst case: 2096 // returned value is unknown and globally escapes. 2097 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 2098 map_ideal_node(call, phantom_obj); 2099 } 2100 } 2101 2102 void ConnectionGraph::process_call_arguments(CallNode *call) { 2103 bool is_arraycopy = false; 2104 switch (call->Opcode()) { 2105 #ifdef ASSERT 2106 case Op_Allocate: 2107 case Op_AllocateArray: 2108 case Op_Lock: 2109 case Op_Unlock: 2110 assert(false, "should be done already"); 2111 break; 2112 #endif 2113 case Op_ArrayCopy: 2114 case Op_CallLeafNoFP: 2115 // Most array copies are ArrayCopy nodes at this point but there 2116 // are still a few direct calls to the copy subroutines (See 2117 // PhaseStringOpts::copy_string()) 2118 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 2119 call->as_CallLeaf()->is_call_to_arraycopystub(); 2120 // fall through 2121 case Op_CallLeafVector: 2122 case Op_CallLeaf: { 2123 // Stub calls, objects do not escape but they are not scale replaceable. 2124 // Adjust escape state for outgoing arguments. 2125 const TypeTuple * d = call->tf()->domain(); 2126 bool src_has_oops = false; 2127 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2128 const Type* at = d->field_at(i); 2129 Node *arg = call->in(i); 2130 if (arg == nullptr) { 2131 continue; 2132 } 2133 const Type *aat = _igvn->type(arg); 2134 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 2135 continue; 2136 } 2137 if (arg->is_AddP()) { 2138 // 2139 // The inline_native_clone() case when the arraycopy stub is called 2140 // after the allocation before Initialize and CheckCastPP nodes. 2141 // Or normal arraycopy for object arrays case. 2142 // 2143 // Set AddP's base (Allocate) as not scalar replaceable since 2144 // pointer to the base (with offset) is passed as argument. 2145 // 2146 arg = get_addp_base(arg); 2147 } 2148 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2149 assert(arg_ptn != nullptr, "should be registered"); 2150 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 2151 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 2152 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 2153 aat->isa_ptr() != nullptr, "expecting an Ptr"); 2154 bool arg_has_oops = aat->isa_oopptr() && 2155 (aat->isa_instptr() || 2156 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr))); 2157 if (i == TypeFunc::Parms) { 2158 src_has_oops = arg_has_oops; 2159 } 2160 // 2161 // src or dst could be j.l.Object when other is basic type array: 2162 // 2163 // arraycopy(char[],0,Object*,0,size); 2164 // arraycopy(Object*,0,char[],0,size); 2165 // 2166 // Don't add edges in such cases. 2167 // 2168 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 2169 arg_has_oops && (i > TypeFunc::Parms); 2170 #ifdef ASSERT 2171 if (!(is_arraycopy || 2172 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 2173 (call->as_CallLeaf()->_name != nullptr && 2174 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 2175 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 2176 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 2177 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 2178 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 2179 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 2180 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 2181 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 2182 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 2183 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 2184 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 2185 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 2186 strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 || 2187 strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 || 2188 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 2189 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 2190 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 2191 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 2192 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 2193 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 2194 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 2195 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 2196 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 2197 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 2198 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 2199 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 2200 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 2201 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 2202 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 2203 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 2204 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 2205 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 2206 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 2207 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 2208 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 2209 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2210 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 || 2211 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 2212 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 2213 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 || 2214 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0) 2215 ))) { 2216 call->dump(); 2217 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 2218 } 2219 #endif 2220 // Always process arraycopy's destination object since 2221 // we need to add all possible edges to references in 2222 // source object. 2223 if (arg_esc >= PointsToNode::ArgEscape && 2224 !arg_is_arraycopy_dest) { 2225 continue; 2226 } 2227 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 2228 if (call->is_ArrayCopy()) { 2229 ArrayCopyNode* ac = call->as_ArrayCopy(); 2230 if (ac->is_clonebasic() || 2231 ac->is_arraycopy_validated() || 2232 ac->is_copyof_validated() || 2233 ac->is_copyofrange_validated()) { 2234 es = PointsToNode::NoEscape; 2235 } 2236 } 2237 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2238 if (arg_is_arraycopy_dest) { 2239 Node* src = call->in(TypeFunc::Parms); 2240 if (src->is_AddP()) { 2241 src = get_addp_base(src); 2242 } 2243 PointsToNode* src_ptn = ptnode_adr(src->_idx); 2244 assert(src_ptn != nullptr, "should be registered"); 2245 if (arg_ptn != src_ptn) { 2246 // Special arraycopy edge: 2247 // A destination object's field can't have the source object 2248 // as base since objects escape states are not related. 2249 // Only escape state of destination object's fields affects 2250 // escape state of fields in source object. 2251 add_arraycopy(call, es, src_ptn, arg_ptn); 2252 } 2253 } 2254 } 2255 } 2256 break; 2257 } 2258 case Op_CallStaticJava: { 2259 // For a static call, we know exactly what method is being called. 2260 // Use bytecode estimator to record the call's escape affects 2261 #ifdef ASSERT 2262 const char* name = call->as_CallStaticJava()->_name; 2263 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 2264 #endif 2265 ciMethod* meth = call->as_CallJava()->method(); 2266 if ((meth != nullptr) && meth->is_boxing_method()) { 2267 break; // Boxing methods do not modify any oops. 2268 } 2269 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 2270 // fall-through if not a Java method or no analyzer information 2271 if (call_analyzer != nullptr) { 2272 PointsToNode* call_ptn = ptnode_adr(call->_idx); 2273 const TypeTuple* d = call->tf()->domain(); 2274 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2275 const Type* at = d->field_at(i); 2276 int k = i - TypeFunc::Parms; 2277 Node* arg = call->in(i); 2278 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2279 if (at->isa_ptr() != nullptr && 2280 call_analyzer->is_arg_returned(k)) { 2281 // The call returns arguments. 2282 if (call_ptn != nullptr) { // Is call's result used? 2283 assert(call_ptn->is_LocalVar(), "node should be registered"); 2284 assert(arg_ptn != nullptr, "node should be registered"); 2285 add_edge(call_ptn, arg_ptn); 2286 } 2287 } 2288 if (at->isa_oopptr() != nullptr && 2289 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 2290 if (!call_analyzer->is_arg_stack(k)) { 2291 // The argument global escapes 2292 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2293 } else { 2294 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2295 if (!call_analyzer->is_arg_local(k)) { 2296 // The argument itself doesn't escape, but any fields might 2297 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2298 } 2299 } 2300 } 2301 } 2302 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 2303 // The call returns arguments. 2304 assert(call_ptn->edge_count() > 0, "sanity"); 2305 if (!call_analyzer->is_return_local()) { 2306 // Returns also unknown object. 2307 add_edge(call_ptn, phantom_obj); 2308 } 2309 } 2310 break; 2311 } 2312 } 2313 default: { 2314 // Fall-through here if not a Java method or no analyzer information 2315 // or some other type of call, assume the worst case: all arguments 2316 // globally escape. 2317 const TypeTuple* d = call->tf()->domain(); 2318 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2319 const Type* at = d->field_at(i); 2320 if (at->isa_oopptr() != nullptr) { 2321 Node* arg = call->in(i); 2322 if (arg->is_AddP()) { 2323 arg = get_addp_base(arg); 2324 } 2325 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 2326 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2327 } 2328 } 2329 } 2330 } 2331 } 2332 2333 2334 // Finish Graph construction. 2335 bool ConnectionGraph::complete_connection_graph( 2336 GrowableArray<PointsToNode*>& ptnodes_worklist, 2337 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2338 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2339 GrowableArray<FieldNode*>& oop_fields_worklist) { 2340 // Normally only 1-3 passes needed to build Connection Graph depending 2341 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 2342 // Set limit to 20 to catch situation when something did go wrong and 2343 // bailout Escape Analysis. 2344 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 2345 #define GRAPH_BUILD_ITER_LIMIT 20 2346 2347 // Propagate GlobalEscape and ArgEscape escape states and check that 2348 // we still have non-escaping objects. The method pushs on _worklist 2349 // Field nodes which reference phantom_object. 2350 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2351 return false; // Nothing to do. 2352 } 2353 // Now propagate references to all JavaObject nodes. 2354 int java_objects_length = java_objects_worklist.length(); 2355 elapsedTimer build_time; 2356 build_time.start(); 2357 elapsedTimer time; 2358 bool timeout = false; 2359 int new_edges = 1; 2360 int iterations = 0; 2361 do { 2362 while ((new_edges > 0) && 2363 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 2364 double start_time = time.seconds(); 2365 time.start(); 2366 new_edges = 0; 2367 // Propagate references to phantom_object for nodes pushed on _worklist 2368 // by find_non_escaped_objects() and find_field_value(). 2369 new_edges += add_java_object_edges(phantom_obj, false); 2370 for (int next = 0; next < java_objects_length; ++next) { 2371 JavaObjectNode* ptn = java_objects_worklist.at(next); 2372 new_edges += add_java_object_edges(ptn, true); 2373 2374 #define SAMPLE_SIZE 4 2375 if ((next % SAMPLE_SIZE) == 0) { 2376 // Each 4 iterations calculate how much time it will take 2377 // to complete graph construction. 2378 time.stop(); 2379 // Poll for requests from shutdown mechanism to quiesce compiler 2380 // because Connection graph construction may take long time. 2381 CompileBroker::maybe_block(); 2382 double stop_time = time.seconds(); 2383 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 2384 double time_until_end = time_per_iter * (double)(java_objects_length - next); 2385 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 2386 timeout = true; 2387 break; // Timeout 2388 } 2389 start_time = stop_time; 2390 time.start(); 2391 } 2392 #undef SAMPLE_SIZE 2393 2394 } 2395 if (timeout) break; 2396 if (new_edges > 0) { 2397 // Update escape states on each iteration if graph was updated. 2398 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2399 return false; // Nothing to do. 2400 } 2401 } 2402 time.stop(); 2403 if (time.seconds() >= EscapeAnalysisTimeout) { 2404 timeout = true; 2405 break; 2406 } 2407 } 2408 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 2409 time.start(); 2410 // Find fields which have unknown value. 2411 int fields_length = oop_fields_worklist.length(); 2412 for (int next = 0; next < fields_length; next++) { 2413 FieldNode* field = oop_fields_worklist.at(next); 2414 if (field->edge_count() == 0) { 2415 new_edges += find_field_value(field); 2416 // This code may added new edges to phantom_object. 2417 // Need an other cycle to propagate references to phantom_object. 2418 } 2419 } 2420 time.stop(); 2421 if (time.seconds() >= EscapeAnalysisTimeout) { 2422 timeout = true; 2423 break; 2424 } 2425 } else { 2426 new_edges = 0; // Bailout 2427 } 2428 } while (new_edges > 0); 2429 2430 build_time.stop(); 2431 _build_time = build_time.seconds(); 2432 _build_iterations = iterations; 2433 2434 // Bailout if passed limits. 2435 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 2436 Compile* C = _compile; 2437 if (C->log() != nullptr) { 2438 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 2439 C->log()->text("%s", timeout ? "time" : "iterations"); 2440 C->log()->end_elem(" limit'"); 2441 } 2442 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 2443 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 2444 // Possible infinite build_connection_graph loop, 2445 // bailout (no changes to ideal graph were made). 2446 return false; 2447 } 2448 2449 #undef GRAPH_BUILD_ITER_LIMIT 2450 2451 // Find fields initialized by null for non-escaping Allocations. 2452 int non_escaped_length = non_escaped_allocs_worklist.length(); 2453 for (int next = 0; next < non_escaped_length; next++) { 2454 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2455 PointsToNode::EscapeState es = ptn->escape_state(); 2456 assert(es <= PointsToNode::ArgEscape, "sanity"); 2457 if (es == PointsToNode::NoEscape) { 2458 if (find_init_values_null(ptn, _igvn) > 0) { 2459 // Adding references to null object does not change escape states 2460 // since it does not escape. Also no fields are added to null object. 2461 add_java_object_edges(null_obj, false); 2462 } 2463 } 2464 Node* n = ptn->ideal_node(); 2465 if (n->is_Allocate()) { 2466 // The object allocated by this Allocate node will never be 2467 // seen by an other thread. Mark it so that when it is 2468 // expanded no MemBarStoreStore is added. 2469 InitializeNode* ini = n->as_Allocate()->initialization(); 2470 if (ini != nullptr) 2471 ini->set_does_not_escape(); 2472 } 2473 } 2474 return true; // Finished graph construction. 2475 } 2476 2477 // Propagate GlobalEscape and ArgEscape escape states to all nodes 2478 // and check that we still have non-escaping java objects. 2479 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 2480 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 2481 GrowableArray<PointsToNode*> escape_worklist; 2482 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 2483 int ptnodes_length = ptnodes_worklist.length(); 2484 for (int next = 0; next < ptnodes_length; ++next) { 2485 PointsToNode* ptn = ptnodes_worklist.at(next); 2486 if (ptn->escape_state() >= PointsToNode::ArgEscape || 2487 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 2488 escape_worklist.push(ptn); 2489 } 2490 } 2491 // Set escape states to referenced nodes (edges list). 2492 while (escape_worklist.length() > 0) { 2493 PointsToNode* ptn = escape_worklist.pop(); 2494 PointsToNode::EscapeState es = ptn->escape_state(); 2495 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 2496 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 2497 es >= PointsToNode::ArgEscape) { 2498 // GlobalEscape or ArgEscape state of field means it has unknown value. 2499 if (add_edge(ptn, phantom_obj)) { 2500 // New edge was added 2501 add_field_uses_to_worklist(ptn->as_Field()); 2502 } 2503 } 2504 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2505 PointsToNode* e = i.get(); 2506 if (e->is_Arraycopy()) { 2507 assert(ptn->arraycopy_dst(), "sanity"); 2508 // Propagate only fields escape state through arraycopy edge. 2509 if (e->fields_escape_state() < field_es) { 2510 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2511 escape_worklist.push(e); 2512 } 2513 } else if (es >= field_es) { 2514 // fields_escape_state is also set to 'es' if it is less than 'es'. 2515 if (e->escape_state() < es) { 2516 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2517 escape_worklist.push(e); 2518 } 2519 } else { 2520 // Propagate field escape state. 2521 bool es_changed = false; 2522 if (e->fields_escape_state() < field_es) { 2523 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2524 es_changed = true; 2525 } 2526 if ((e->escape_state() < field_es) && 2527 e->is_Field() && ptn->is_JavaObject() && 2528 e->as_Field()->is_oop()) { 2529 // Change escape state of referenced fields. 2530 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2531 es_changed = true; 2532 } else if (e->escape_state() < es) { 2533 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2534 es_changed = true; 2535 } 2536 if (es_changed) { 2537 escape_worklist.push(e); 2538 } 2539 } 2540 } 2541 } 2542 // Remove escaped objects from non_escaped list. 2543 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 2544 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2545 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 2546 non_escaped_allocs_worklist.delete_at(next); 2547 } 2548 if (ptn->escape_state() == PointsToNode::NoEscape) { 2549 // Find fields in non-escaped allocations which have unknown value. 2550 find_init_values_phantom(ptn); 2551 } 2552 } 2553 return (non_escaped_allocs_worklist.length() > 0); 2554 } 2555 2556 // Add all references to JavaObject node by walking over all uses. 2557 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 2558 int new_edges = 0; 2559 if (populate_worklist) { 2560 // Populate _worklist by uses of jobj's uses. 2561 for (UseIterator i(jobj); i.has_next(); i.next()) { 2562 PointsToNode* use = i.get(); 2563 if (use->is_Arraycopy()) { 2564 continue; 2565 } 2566 add_uses_to_worklist(use); 2567 if (use->is_Field() && use->as_Field()->is_oop()) { 2568 // Put on worklist all field's uses (loads) and 2569 // related field nodes (same base and offset). 2570 add_field_uses_to_worklist(use->as_Field()); 2571 } 2572 } 2573 } 2574 for (int l = 0; l < _worklist.length(); l++) { 2575 PointsToNode* use = _worklist.at(l); 2576 if (PointsToNode::is_base_use(use)) { 2577 // Add reference from jobj to field and from field to jobj (field's base). 2578 use = PointsToNode::get_use_node(use)->as_Field(); 2579 if (add_base(use->as_Field(), jobj)) { 2580 new_edges++; 2581 } 2582 continue; 2583 } 2584 assert(!use->is_JavaObject(), "sanity"); 2585 if (use->is_Arraycopy()) { 2586 if (jobj == null_obj) { // null object does not have field edges 2587 continue; 2588 } 2589 // Added edge from Arraycopy node to arraycopy's source java object 2590 if (add_edge(use, jobj)) { 2591 jobj->set_arraycopy_src(); 2592 new_edges++; 2593 } 2594 // and stop here. 2595 continue; 2596 } 2597 if (!add_edge(use, jobj)) { 2598 continue; // No new edge added, there was such edge already. 2599 } 2600 new_edges++; 2601 if (use->is_LocalVar()) { 2602 add_uses_to_worklist(use); 2603 if (use->arraycopy_dst()) { 2604 for (EdgeIterator i(use); i.has_next(); i.next()) { 2605 PointsToNode* e = i.get(); 2606 if (e->is_Arraycopy()) { 2607 if (jobj == null_obj) { // null object does not have field edges 2608 continue; 2609 } 2610 // Add edge from arraycopy's destination java object to Arraycopy node. 2611 if (add_edge(jobj, e)) { 2612 new_edges++; 2613 jobj->set_arraycopy_dst(); 2614 } 2615 } 2616 } 2617 } 2618 } else { 2619 // Added new edge to stored in field values. 2620 // Put on worklist all field's uses (loads) and 2621 // related field nodes (same base and offset). 2622 add_field_uses_to_worklist(use->as_Field()); 2623 } 2624 } 2625 _worklist.clear(); 2626 _in_worklist.reset(); 2627 return new_edges; 2628 } 2629 2630 // Put on worklist all related field nodes. 2631 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2632 assert(field->is_oop(), "sanity"); 2633 int offset = field->offset(); 2634 add_uses_to_worklist(field); 2635 // Loop over all bases of this field and push on worklist Field nodes 2636 // with the same offset and base (since they may reference the same field). 2637 for (BaseIterator i(field); i.has_next(); i.next()) { 2638 PointsToNode* base = i.get(); 2639 add_fields_to_worklist(field, base); 2640 // Check if the base was source object of arraycopy and go over arraycopy's 2641 // destination objects since values stored to a field of source object are 2642 // accessible by uses (loads) of fields of destination objects. 2643 if (base->arraycopy_src()) { 2644 for (UseIterator j(base); j.has_next(); j.next()) { 2645 PointsToNode* arycp = j.get(); 2646 if (arycp->is_Arraycopy()) { 2647 for (UseIterator k(arycp); k.has_next(); k.next()) { 2648 PointsToNode* abase = k.get(); 2649 if (abase->arraycopy_dst() && abase != base) { 2650 // Look for the same arraycopy reference. 2651 add_fields_to_worklist(field, abase); 2652 } 2653 } 2654 } 2655 } 2656 } 2657 } 2658 } 2659 2660 // Put on worklist all related field nodes. 2661 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2662 int offset = field->offset(); 2663 if (base->is_LocalVar()) { 2664 for (UseIterator j(base); j.has_next(); j.next()) { 2665 PointsToNode* f = j.get(); 2666 if (PointsToNode::is_base_use(f)) { // Field 2667 f = PointsToNode::get_use_node(f); 2668 if (f == field || !f->as_Field()->is_oop()) { 2669 continue; 2670 } 2671 int offs = f->as_Field()->offset(); 2672 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2673 add_to_worklist(f); 2674 } 2675 } 2676 } 2677 } else { 2678 assert(base->is_JavaObject(), "sanity"); 2679 if (// Skip phantom_object since it is only used to indicate that 2680 // this field's content globally escapes. 2681 (base != phantom_obj) && 2682 // null object node does not have fields. 2683 (base != null_obj)) { 2684 for (EdgeIterator i(base); i.has_next(); i.next()) { 2685 PointsToNode* f = i.get(); 2686 // Skip arraycopy edge since store to destination object field 2687 // does not update value in source object field. 2688 if (f->is_Arraycopy()) { 2689 assert(base->arraycopy_dst(), "sanity"); 2690 continue; 2691 } 2692 if (f == field || !f->as_Field()->is_oop()) { 2693 continue; 2694 } 2695 int offs = f->as_Field()->offset(); 2696 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2697 add_to_worklist(f); 2698 } 2699 } 2700 } 2701 } 2702 } 2703 2704 // Find fields which have unknown value. 2705 int ConnectionGraph::find_field_value(FieldNode* field) { 2706 // Escaped fields should have init value already. 2707 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2708 int new_edges = 0; 2709 for (BaseIterator i(field); i.has_next(); i.next()) { 2710 PointsToNode* base = i.get(); 2711 if (base->is_JavaObject()) { 2712 // Skip Allocate's fields which will be processed later. 2713 if (base->ideal_node()->is_Allocate()) { 2714 return 0; 2715 } 2716 assert(base == null_obj, "only null ptr base expected here"); 2717 } 2718 } 2719 if (add_edge(field, phantom_obj)) { 2720 // New edge was added 2721 new_edges++; 2722 add_field_uses_to_worklist(field); 2723 } 2724 return new_edges; 2725 } 2726 2727 // Find fields initializing values for allocations. 2728 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2729 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2730 Node* alloc = pta->ideal_node(); 2731 2732 // Do nothing for Allocate nodes since its fields values are 2733 // "known" unless they are initialized by arraycopy/clone. 2734 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2735 return 0; 2736 } 2737 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 2738 #ifdef ASSERT 2739 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { 2740 const char* name = alloc->as_CallStaticJava()->_name; 2741 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity"); 2742 } 2743 #endif 2744 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2745 int new_edges = 0; 2746 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2747 PointsToNode* field = i.get(); 2748 if (field->is_Field() && field->as_Field()->is_oop()) { 2749 if (add_edge(field, phantom_obj)) { 2750 // New edge was added 2751 new_edges++; 2752 add_field_uses_to_worklist(field->as_Field()); 2753 } 2754 } 2755 } 2756 return new_edges; 2757 } 2758 2759 // Find fields initializing values for allocations. 2760 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2761 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2762 Node* alloc = pta->ideal_node(); 2763 // Do nothing for Call nodes since its fields values are unknown. 2764 if (!alloc->is_Allocate()) { 2765 return 0; 2766 } 2767 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2768 bool visited_bottom_offset = false; 2769 GrowableArray<int> offsets_worklist; 2770 int new_edges = 0; 2771 2772 // Check if an oop field's initializing value is recorded and add 2773 // a corresponding null if field's value if it is not recorded. 2774 // Connection Graph does not record a default initialization by null 2775 // captured by Initialize node. 2776 // 2777 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2778 PointsToNode* field = i.get(); // Field (AddP) 2779 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2780 continue; // Not oop field 2781 } 2782 int offset = field->as_Field()->offset(); 2783 if (offset == Type::OffsetBot) { 2784 if (!visited_bottom_offset) { 2785 // OffsetBot is used to reference array's element, 2786 // always add reference to null to all Field nodes since we don't 2787 // known which element is referenced. 2788 if (add_edge(field, null_obj)) { 2789 // New edge was added 2790 new_edges++; 2791 add_field_uses_to_worklist(field->as_Field()); 2792 visited_bottom_offset = true; 2793 } 2794 } 2795 } else { 2796 // Check only oop fields. 2797 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2798 if (adr_type->isa_rawptr()) { 2799 #ifdef ASSERT 2800 // Raw pointers are used for initializing stores so skip it 2801 // since it should be recorded already 2802 Node* base = get_addp_base(field->ideal_node()); 2803 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2804 #endif 2805 continue; 2806 } 2807 if (!offsets_worklist.contains(offset)) { 2808 offsets_worklist.append(offset); 2809 Node* value = nullptr; 2810 if (ini != nullptr) { 2811 // StoreP::memory_type() == T_ADDRESS 2812 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2813 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2814 // Make sure initializing store has the same type as this AddP. 2815 // This AddP may reference non existing field because it is on a 2816 // dead branch of bimorphic call which is not eliminated yet. 2817 if (store != nullptr && store->is_Store() && 2818 store->as_Store()->memory_type() == ft) { 2819 value = store->in(MemNode::ValueIn); 2820 #ifdef ASSERT 2821 if (VerifyConnectionGraph) { 2822 // Verify that AddP already points to all objects the value points to. 2823 PointsToNode* val = ptnode_adr(value->_idx); 2824 assert((val != nullptr), "should be processed already"); 2825 PointsToNode* missed_obj = nullptr; 2826 if (val->is_JavaObject()) { 2827 if (!field->points_to(val->as_JavaObject())) { 2828 missed_obj = val; 2829 } 2830 } else { 2831 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2832 tty->print_cr("----------init store has invalid value -----"); 2833 store->dump(); 2834 val->dump(); 2835 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2836 } 2837 for (EdgeIterator j(val); j.has_next(); j.next()) { 2838 PointsToNode* obj = j.get(); 2839 if (obj->is_JavaObject()) { 2840 if (!field->points_to(obj->as_JavaObject())) { 2841 missed_obj = obj; 2842 break; 2843 } 2844 } 2845 } 2846 } 2847 if (missed_obj != nullptr) { 2848 tty->print_cr("----------field---------------------------------"); 2849 field->dump(); 2850 tty->print_cr("----------missed referernce to object-----------"); 2851 missed_obj->dump(); 2852 tty->print_cr("----------object referernced by init store -----"); 2853 store->dump(); 2854 val->dump(); 2855 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2856 } 2857 } 2858 #endif 2859 } else { 2860 // There could be initializing stores which follow allocation. 2861 // For example, a volatile field store is not collected 2862 // by Initialize node. 2863 // 2864 // Need to check for dependent loads to separate such stores from 2865 // stores which follow loads. For now, add initial value null so 2866 // that compare pointers optimization works correctly. 2867 } 2868 } 2869 if (value == nullptr) { 2870 // A field's initializing value was not recorded. Add null. 2871 if (add_edge(field, null_obj)) { 2872 // New edge was added 2873 new_edges++; 2874 add_field_uses_to_worklist(field->as_Field()); 2875 } 2876 } 2877 } 2878 } 2879 } 2880 return new_edges; 2881 } 2882 2883 // Adjust scalar_replaceable state after Connection Graph is built. 2884 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2885 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2886 // returns true. If one of the constraints in this method set 'jobj' to NSR 2887 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2888 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2889 // that other object and the Phi will become a reducible Phi. 2890 // There could be multiple merges involving the same jobj. 2891 Unique_Node_List candidates; 2892 2893 // Search for non-escaping objects which are not scalar replaceable 2894 // and mark them to propagate the state to referenced objects. 2895 2896 for (UseIterator i(jobj); i.has_next(); i.next()) { 2897 PointsToNode* use = i.get(); 2898 if (use->is_Arraycopy()) { 2899 continue; 2900 } 2901 if (use->is_Field()) { 2902 FieldNode* field = use->as_Field(); 2903 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2904 // 1. An object is not scalar replaceable if the field into which it is 2905 // stored has unknown offset (stored into unknown element of an array). 2906 if (field->offset() == Type::OffsetBot) { 2907 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2908 return; 2909 } 2910 for (BaseIterator i(field); i.has_next(); i.next()) { 2911 PointsToNode* base = i.get(); 2912 // 2. An object is not scalar replaceable if the field into which it is 2913 // stored has multiple bases one of which is null. 2914 if ((base == null_obj) && (field->base_count() > 1)) { 2915 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2916 return; 2917 } 2918 // 2.5. An object is not scalar replaceable if the field into which it is 2919 // stored has NSR base. 2920 if (!base->scalar_replaceable()) { 2921 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2922 return; 2923 } 2924 } 2925 } 2926 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2927 // 3. An object is not scalar replaceable if it is merged with other objects 2928 // and we can't remove the merge 2929 for (EdgeIterator j(use); j.has_next(); j.next()) { 2930 PointsToNode* ptn = j.get(); 2931 if (ptn->is_JavaObject() && ptn != jobj) { 2932 Node* use_n = use->ideal_node(); 2933 2934 // These other local vars may point to multiple objects through a Phi 2935 // In this case we skip them and see if we can reduce the Phi. 2936 if (use_n->is_CastPP() || use_n->is_CheckCastPP()) { 2937 use_n = use_n->in(1); 2938 } 2939 2940 // If it's already a candidate or confirmed reducible merge we can skip verification 2941 if (candidates.member(use_n) || reducible_merges.member(use_n)) { 2942 continue; 2943 } 2944 2945 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2946 candidates.push(use_n); 2947 } else { 2948 // Mark all objects as NSR if we can't remove the merge 2949 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2950 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2951 } 2952 } 2953 } 2954 if (!jobj->scalar_replaceable()) { 2955 return; 2956 } 2957 } 2958 2959 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2960 if (j.get()->is_Arraycopy()) { 2961 continue; 2962 } 2963 2964 // Non-escaping object node should point only to field nodes. 2965 FieldNode* field = j.get()->as_Field(); 2966 int offset = field->as_Field()->offset(); 2967 2968 // 4. An object is not scalar replaceable if it has a field with unknown 2969 // offset (array's element is accessed in loop). 2970 if (offset == Type::OffsetBot) { 2971 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2972 return; 2973 } 2974 // 5. Currently an object is not scalar replaceable if a LoadStore node 2975 // access its field since the field value is unknown after it. 2976 // 2977 Node* n = field->ideal_node(); 2978 2979 // Test for an unsafe access that was parsed as maybe off heap 2980 // (with a CheckCastPP to raw memory). 2981 assert(n->is_AddP(), "expect an address computation"); 2982 if (n->in(AddPNode::Base)->is_top() && 2983 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2984 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2985 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2986 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2987 return; 2988 } 2989 2990 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2991 Node* u = n->fast_out(i); 2992 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2993 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2994 return; 2995 } 2996 } 2997 2998 // 6. Or the address may point to more then one object. This may produce 2999 // the false positive result (set not scalar replaceable) 3000 // since the flow-insensitive escape analysis can't separate 3001 // the case when stores overwrite the field's value from the case 3002 // when stores happened on different control branches. 3003 // 3004 // Note: it will disable scalar replacement in some cases: 3005 // 3006 // Point p[] = new Point[1]; 3007 // p[0] = new Point(); // Will be not scalar replaced 3008 // 3009 // but it will save us from incorrect optimizations in next cases: 3010 // 3011 // Point p[] = new Point[1]; 3012 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 3013 // 3014 if (field->base_count() > 1 && candidates.size() == 0) { 3015 if (has_non_reducible_merge(field, reducible_merges)) { 3016 for (BaseIterator i(field); i.has_next(); i.next()) { 3017 PointsToNode* base = i.get(); 3018 // Don't take into account LocalVar nodes which 3019 // may point to only one object which should be also 3020 // this field's base by now. 3021 if (base->is_JavaObject() && base != jobj) { 3022 // Mark all bases. 3023 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 3024 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 3025 } 3026 } 3027 3028 if (!jobj->scalar_replaceable()) { 3029 return; 3030 } 3031 } 3032 } 3033 } 3034 3035 // The candidate is truly a reducible merge only if none of the other 3036 // constraints ruled it as NSR. There could be multiple merges involving the 3037 // same jobj. 3038 assert(jobj->scalar_replaceable(), "sanity"); 3039 for (uint i = 0; i < candidates.size(); i++ ) { 3040 Node* candidate = candidates.at(i); 3041 reducible_merges.push(candidate); 3042 } 3043 } 3044 3045 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) { 3046 for (BaseIterator i(field); i.has_next(); i.next()) { 3047 Node* base = i.get()->ideal_node(); 3048 if (base->is_Phi() && !reducible_merges.member(base)) { 3049 return true; 3050 } 3051 } 3052 return false; 3053 } 3054 3055 // Propagate NSR (Not scalar replaceable) state. 3056 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 3057 int jobj_length = jobj_worklist.length(); 3058 bool found_nsr_alloc = true; 3059 while (found_nsr_alloc) { 3060 found_nsr_alloc = false; 3061 for (int next = 0; next < jobj_length; ++next) { 3062 JavaObjectNode* jobj = jobj_worklist.at(next); 3063 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 3064 PointsToNode* use = i.get(); 3065 if (use->is_Field()) { 3066 FieldNode* field = use->as_Field(); 3067 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 3068 assert(field->offset() != Type::OffsetBot, "sanity"); 3069 for (BaseIterator i(field); i.has_next(); i.next()) { 3070 PointsToNode* base = i.get(); 3071 // An object is not scalar replaceable if the field into which 3072 // it is stored has NSR base. 3073 if ((base != null_obj) && !base->scalar_replaceable()) { 3074 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 3075 found_nsr_alloc = true; 3076 break; 3077 } 3078 } 3079 } 3080 } 3081 } 3082 } 3083 } 3084 3085 #ifdef ASSERT 3086 void ConnectionGraph::verify_connection_graph( 3087 GrowableArray<PointsToNode*>& ptnodes_worklist, 3088 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 3089 GrowableArray<JavaObjectNode*>& java_objects_worklist, 3090 GrowableArray<Node*>& addp_worklist) { 3091 // Verify that graph is complete - no new edges could be added. 3092 int java_objects_length = java_objects_worklist.length(); 3093 int non_escaped_length = non_escaped_allocs_worklist.length(); 3094 int new_edges = 0; 3095 for (int next = 0; next < java_objects_length; ++next) { 3096 JavaObjectNode* ptn = java_objects_worklist.at(next); 3097 new_edges += add_java_object_edges(ptn, true); 3098 } 3099 assert(new_edges == 0, "graph was not complete"); 3100 // Verify that escape state is final. 3101 int length = non_escaped_allocs_worklist.length(); 3102 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 3103 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 3104 (non_escaped_length == length) && 3105 (_worklist.length() == 0), "escape state was not final"); 3106 3107 // Verify fields information. 3108 int addp_length = addp_worklist.length(); 3109 for (int next = 0; next < addp_length; ++next ) { 3110 Node* n = addp_worklist.at(next); 3111 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 3112 if (field->is_oop()) { 3113 // Verify that field has all bases 3114 Node* base = get_addp_base(n); 3115 PointsToNode* ptn = ptnode_adr(base->_idx); 3116 if (ptn->is_JavaObject()) { 3117 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 3118 } else { 3119 assert(ptn->is_LocalVar(), "sanity"); 3120 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3121 PointsToNode* e = i.get(); 3122 if (e->is_JavaObject()) { 3123 assert(field->has_base(e->as_JavaObject()), "sanity"); 3124 } 3125 } 3126 } 3127 // Verify that all fields have initializing values. 3128 if (field->edge_count() == 0) { 3129 tty->print_cr("----------field does not have references----------"); 3130 field->dump(); 3131 for (BaseIterator i(field); i.has_next(); i.next()) { 3132 PointsToNode* base = i.get(); 3133 tty->print_cr("----------field has next base---------------------"); 3134 base->dump(); 3135 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 3136 tty->print_cr("----------base has fields-------------------------"); 3137 for (EdgeIterator j(base); j.has_next(); j.next()) { 3138 j.get()->dump(); 3139 } 3140 tty->print_cr("----------base has references---------------------"); 3141 for (UseIterator j(base); j.has_next(); j.next()) { 3142 j.get()->dump(); 3143 } 3144 } 3145 } 3146 for (UseIterator i(field); i.has_next(); i.next()) { 3147 i.get()->dump(); 3148 } 3149 assert(field->edge_count() > 0, "sanity"); 3150 } 3151 } 3152 } 3153 } 3154 #endif 3155 3156 // Optimize ideal graph. 3157 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 3158 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 3159 Compile* C = _compile; 3160 PhaseIterGVN* igvn = _igvn; 3161 if (EliminateLocks) { 3162 // Mark locks before changing ideal graph. 3163 int cnt = C->macro_count(); 3164 for (int i = 0; i < cnt; i++) { 3165 Node *n = C->macro_node(i); 3166 if (n->is_AbstractLock()) { // Lock and Unlock nodes 3167 AbstractLockNode* alock = n->as_AbstractLock(); 3168 if (!alock->is_non_esc_obj()) { 3169 if (can_eliminate_lock(alock)) { 3170 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 3171 // The lock could be marked eliminated by lock coarsening 3172 // code during first IGVN before EA. Replace coarsened flag 3173 // to eliminate all associated locks/unlocks. 3174 #ifdef ASSERT 3175 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 3176 #endif 3177 alock->set_non_esc_obj(); 3178 } 3179 } 3180 } 3181 } 3182 } 3183 3184 if (OptimizePtrCompare) { 3185 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 3186 Node *n = ptr_cmp_worklist.at(i); 3187 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 3188 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2)); 3189 if (tcmp->singleton()) { 3190 Node* cmp = igvn->makecon(tcmp); 3191 #ifndef PRODUCT 3192 if (PrintOptimizePtrCompare) { 3193 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 3194 if (Verbose) { 3195 n->dump(1); 3196 } 3197 } 3198 #endif 3199 igvn->replace_node(n, cmp); 3200 } 3201 } 3202 } 3203 3204 // For MemBarStoreStore nodes added in library_call.cpp, check 3205 // escape status of associated AllocateNode and optimize out 3206 // MemBarStoreStore node if the allocated object never escapes. 3207 for (int i = 0; i < storestore_worklist.length(); i++) { 3208 Node* storestore = storestore_worklist.at(i); 3209 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 3210 if (alloc->is_Allocate() && not_global_escape(alloc)) { 3211 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 3212 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 3213 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 3214 igvn->register_new_node_with_optimizer(mb); 3215 igvn->replace_node(storestore, mb); 3216 } 3217 } 3218 } 3219 3220 // Optimize objects compare. 3221 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) { 3222 assert(OptimizePtrCompare, "sanity"); 3223 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 3224 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 3225 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 3226 3227 PointsToNode* ptn1 = ptnode_adr(left->_idx); 3228 PointsToNode* ptn2 = ptnode_adr(right->_idx); 3229 JavaObjectNode* jobj1 = unique_java_object(left); 3230 JavaObjectNode* jobj2 = unique_java_object(right); 3231 3232 // The use of this method during allocation merge reduction may cause 'left' 3233 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or 3234 // that doesn't reference an unique java object. 3235 if (ptn1 == nullptr || ptn2 == nullptr || 3236 jobj1 == nullptr || jobj2 == nullptr) { 3237 return UNKNOWN; 3238 } 3239 3240 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 3241 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 3242 3243 // Check simple cases first. 3244 if (jobj1 != nullptr) { 3245 if (jobj1->escape_state() == PointsToNode::NoEscape) { 3246 if (jobj1 == jobj2) { 3247 // Comparing the same not escaping object. 3248 return EQ; 3249 } 3250 Node* obj = jobj1->ideal_node(); 3251 // Comparing not escaping allocation. 3252 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3253 !ptn2->points_to(jobj1)) { 3254 return NE; // This includes nullness check. 3255 } 3256 } 3257 } 3258 if (jobj2 != nullptr) { 3259 if (jobj2->escape_state() == PointsToNode::NoEscape) { 3260 Node* obj = jobj2->ideal_node(); 3261 // Comparing not escaping allocation. 3262 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3263 !ptn1->points_to(jobj2)) { 3264 return NE; // This includes nullness check. 3265 } 3266 } 3267 } 3268 if (jobj1 != nullptr && jobj1 != phantom_obj && 3269 jobj2 != nullptr && jobj2 != phantom_obj && 3270 jobj1->ideal_node()->is_Con() && 3271 jobj2->ideal_node()->is_Con()) { 3272 // Klass or String constants compare. Need to be careful with 3273 // compressed pointers - compare types of ConN and ConP instead of nodes. 3274 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 3275 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 3276 if (t1->make_ptr() == t2->make_ptr()) { 3277 return EQ; 3278 } else { 3279 return NE; 3280 } 3281 } 3282 if (ptn1->meet(ptn2)) { 3283 return UNKNOWN; // Sets are not disjoint 3284 } 3285 3286 // Sets are disjoint. 3287 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 3288 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 3289 bool set1_has_null_ptr = ptn1->points_to(null_obj); 3290 bool set2_has_null_ptr = ptn2->points_to(null_obj); 3291 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 3292 (set2_has_unknown_ptr && set1_has_null_ptr)) { 3293 // Check nullness of unknown object. 3294 return UNKNOWN; 3295 } 3296 3297 // Disjointness by itself is not sufficient since 3298 // alias analysis is not complete for escaped objects. 3299 // Disjoint sets are definitely unrelated only when 3300 // at least one set has only not escaping allocations. 3301 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 3302 if (ptn1->non_escaping_allocation()) { 3303 return NE; 3304 } 3305 } 3306 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 3307 if (ptn2->non_escaping_allocation()) { 3308 return NE; 3309 } 3310 } 3311 return UNKNOWN; 3312 } 3313 3314 // Connection Graph construction functions. 3315 3316 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 3317 PointsToNode* ptadr = _nodes.at(n->_idx); 3318 if (ptadr != nullptr) { 3319 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 3320 return; 3321 } 3322 Compile* C = _compile; 3323 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 3324 map_ideal_node(n, ptadr); 3325 } 3326 3327 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 3328 PointsToNode* ptadr = _nodes.at(n->_idx); 3329 if (ptadr != nullptr) { 3330 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 3331 return ptadr; 3332 } 3333 Compile* C = _compile; 3334 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 3335 map_ideal_node(n, ptadr); 3336 return ptadr; 3337 } 3338 3339 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 3340 PointsToNode* ptadr = _nodes.at(n->_idx); 3341 if (ptadr != nullptr) { 3342 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 3343 return; 3344 } 3345 bool unsafe = false; 3346 bool is_oop = is_oop_field(n, offset, &unsafe); 3347 if (unsafe) { 3348 es = PointsToNode::GlobalEscape; 3349 } 3350 Compile* C = _compile; 3351 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 3352 map_ideal_node(n, field); 3353 } 3354 3355 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 3356 PointsToNode* src, PointsToNode* dst) { 3357 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 3358 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 3359 PointsToNode* ptadr = _nodes.at(n->_idx); 3360 if (ptadr != nullptr) { 3361 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 3362 return; 3363 } 3364 Compile* C = _compile; 3365 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 3366 map_ideal_node(n, ptadr); 3367 // Add edge from arraycopy node to source object. 3368 (void)add_edge(ptadr, src); 3369 src->set_arraycopy_src(); 3370 // Add edge from destination object to arraycopy node. 3371 (void)add_edge(dst, ptadr); 3372 dst->set_arraycopy_dst(); 3373 } 3374 3375 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 3376 const Type* adr_type = n->as_AddP()->bottom_type(); 3377 BasicType bt = T_INT; 3378 if (offset == Type::OffsetBot) { 3379 // Check only oop fields. 3380 if (!adr_type->isa_aryptr() || 3381 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 3382 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 3383 // OffsetBot is used to reference array's element. Ignore first AddP. 3384 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 3385 bt = T_OBJECT; 3386 } 3387 } 3388 } else if (offset != oopDesc::klass_offset_in_bytes()) { 3389 if (adr_type->isa_instptr()) { 3390 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 3391 if (field != nullptr) { 3392 bt = field->layout_type(); 3393 } else { 3394 // Check for unsafe oop field access 3395 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3396 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3397 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3398 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3399 bt = T_OBJECT; 3400 (*unsafe) = true; 3401 } 3402 } 3403 } else if (adr_type->isa_aryptr()) { 3404 if (offset == arrayOopDesc::length_offset_in_bytes()) { 3405 // Ignore array length load. 3406 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 3407 // Ignore first AddP. 3408 } else { 3409 const Type* elemtype = adr_type->isa_aryptr()->elem(); 3410 bt = elemtype->array_element_basic_type(); 3411 } 3412 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 3413 // Allocation initialization, ThreadLocal field access, unsafe access 3414 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3415 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3416 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3417 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3418 bt = T_OBJECT; 3419 } 3420 } 3421 } 3422 // Note: T_NARROWOOP is not classed as a real reference type 3423 return (is_reference_type(bt) || bt == T_NARROWOOP); 3424 } 3425 3426 // Returns unique pointed java object or null. 3427 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 3428 // If the node was created after the escape computation we can't answer. 3429 uint idx = n->_idx; 3430 if (idx >= nodes_size()) { 3431 return nullptr; 3432 } 3433 PointsToNode* ptn = ptnode_adr(idx); 3434 if (ptn == nullptr) { 3435 return nullptr; 3436 } 3437 if (ptn->is_JavaObject()) { 3438 return ptn->as_JavaObject(); 3439 } 3440 assert(ptn->is_LocalVar(), "sanity"); 3441 // Check all java objects it points to. 3442 JavaObjectNode* jobj = nullptr; 3443 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3444 PointsToNode* e = i.get(); 3445 if (e->is_JavaObject()) { 3446 if (jobj == nullptr) { 3447 jobj = e->as_JavaObject(); 3448 } else if (jobj != e) { 3449 return nullptr; 3450 } 3451 } 3452 } 3453 return jobj; 3454 } 3455 3456 // Return true if this node points only to non-escaping allocations. 3457 bool PointsToNode::non_escaping_allocation() { 3458 if (is_JavaObject()) { 3459 Node* n = ideal_node(); 3460 if (n->is_Allocate() || n->is_CallStaticJava()) { 3461 return (escape_state() == PointsToNode::NoEscape); 3462 } else { 3463 return false; 3464 } 3465 } 3466 assert(is_LocalVar(), "sanity"); 3467 // Check all java objects it points to. 3468 for (EdgeIterator i(this); i.has_next(); i.next()) { 3469 PointsToNode* e = i.get(); 3470 if (e->is_JavaObject()) { 3471 Node* n = e->ideal_node(); 3472 if ((e->escape_state() != PointsToNode::NoEscape) || 3473 !(n->is_Allocate() || n->is_CallStaticJava())) { 3474 return false; 3475 } 3476 } 3477 } 3478 return true; 3479 } 3480 3481 // Return true if we know the node does not escape globally. 3482 bool ConnectionGraph::not_global_escape(Node *n) { 3483 assert(!_collecting, "should not call during graph construction"); 3484 // If the node was created after the escape computation we can't answer. 3485 uint idx = n->_idx; 3486 if (idx >= nodes_size()) { 3487 return false; 3488 } 3489 PointsToNode* ptn = ptnode_adr(idx); 3490 if (ptn == nullptr) { 3491 return false; // not in congraph (e.g. ConI) 3492 } 3493 PointsToNode::EscapeState es = ptn->escape_state(); 3494 // If we have already computed a value, return it. 3495 if (es >= PointsToNode::GlobalEscape) { 3496 return false; 3497 } 3498 if (ptn->is_JavaObject()) { 3499 return true; // (es < PointsToNode::GlobalEscape); 3500 } 3501 assert(ptn->is_LocalVar(), "sanity"); 3502 // Check all java objects it points to. 3503 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3504 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 3505 return false; 3506 } 3507 } 3508 return true; 3509 } 3510 3511 // Return true if locked object does not escape globally 3512 // and locked code region (identified by BoxLockNode) is balanced: 3513 // all compiled code paths have corresponding Lock/Unlock pairs. 3514 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) { 3515 if (alock->is_balanced() && not_global_escape(alock->obj_node())) { 3516 if (EliminateNestedLocks) { 3517 // We can mark whole locking region as Local only when only 3518 // one object is used for locking. 3519 alock->box_node()->as_BoxLock()->set_local(); 3520 } 3521 return true; 3522 } 3523 return false; 3524 } 3525 3526 // Helper functions 3527 3528 // Return true if this node points to specified node or nodes it points to. 3529 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 3530 if (is_JavaObject()) { 3531 return (this == ptn); 3532 } 3533 assert(is_LocalVar() || is_Field(), "sanity"); 3534 for (EdgeIterator i(this); i.has_next(); i.next()) { 3535 if (i.get() == ptn) { 3536 return true; 3537 } 3538 } 3539 return false; 3540 } 3541 3542 // Return true if one node points to an other. 3543 bool PointsToNode::meet(PointsToNode* ptn) { 3544 if (this == ptn) { 3545 return true; 3546 } else if (ptn->is_JavaObject()) { 3547 return this->points_to(ptn->as_JavaObject()); 3548 } else if (this->is_JavaObject()) { 3549 return ptn->points_to(this->as_JavaObject()); 3550 } 3551 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 3552 int ptn_count = ptn->edge_count(); 3553 for (EdgeIterator i(this); i.has_next(); i.next()) { 3554 PointsToNode* this_e = i.get(); 3555 for (int j = 0; j < ptn_count; j++) { 3556 if (this_e == ptn->edge(j)) { 3557 return true; 3558 } 3559 } 3560 } 3561 return false; 3562 } 3563 3564 #ifdef ASSERT 3565 // Return true if bases point to this java object. 3566 bool FieldNode::has_base(JavaObjectNode* jobj) const { 3567 for (BaseIterator i(this); i.has_next(); i.next()) { 3568 if (i.get() == jobj) { 3569 return true; 3570 } 3571 } 3572 return false; 3573 } 3574 #endif 3575 3576 bool ConnectionGraph::is_captured_store_address(Node* addp) { 3577 // Handle simple case first. 3578 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 3579 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 3580 return true; 3581 } else if (addp->in(AddPNode::Address)->is_Phi()) { 3582 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 3583 Node* addp_use = addp->fast_out(i); 3584 if (addp_use->is_Store()) { 3585 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 3586 if (addp_use->fast_out(j)->is_Initialize()) { 3587 return true; 3588 } 3589 } 3590 } 3591 } 3592 } 3593 return false; 3594 } 3595 3596 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 3597 const Type *adr_type = phase->type(adr); 3598 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 3599 // We are computing a raw address for a store captured by an Initialize 3600 // compute an appropriate address type. AddP cases #3 and #5 (see below). 3601 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 3602 assert(offs != Type::OffsetBot || 3603 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 3604 "offset must be a constant or it is initialization of array"); 3605 return offs; 3606 } 3607 const TypePtr *t_ptr = adr_type->isa_ptr(); 3608 assert(t_ptr != nullptr, "must be a pointer type"); 3609 return t_ptr->offset(); 3610 } 3611 3612 Node* ConnectionGraph::get_addp_base(Node *addp) { 3613 assert(addp->is_AddP(), "must be AddP"); 3614 // 3615 // AddP cases for Base and Address inputs: 3616 // case #1. Direct object's field reference: 3617 // Allocate 3618 // | 3619 // Proj #5 ( oop result ) 3620 // | 3621 // CheckCastPP (cast to instance type) 3622 // | | 3623 // AddP ( base == address ) 3624 // 3625 // case #2. Indirect object's field reference: 3626 // Phi 3627 // | 3628 // CastPP (cast to instance type) 3629 // | | 3630 // AddP ( base == address ) 3631 // 3632 // case #3. Raw object's field reference for Initialize node: 3633 // Allocate 3634 // | 3635 // Proj #5 ( oop result ) 3636 // top | 3637 // \ | 3638 // AddP ( base == top ) 3639 // 3640 // case #4. Array's element reference: 3641 // {CheckCastPP | CastPP} 3642 // | | | 3643 // | AddP ( array's element offset ) 3644 // | | 3645 // AddP ( array's offset ) 3646 // 3647 // case #5. Raw object's field reference for arraycopy stub call: 3648 // The inline_native_clone() case when the arraycopy stub is called 3649 // after the allocation before Initialize and CheckCastPP nodes. 3650 // Allocate 3651 // | 3652 // Proj #5 ( oop result ) 3653 // | | 3654 // AddP ( base == address ) 3655 // 3656 // case #6. Constant Pool, ThreadLocal, CastX2P or 3657 // Raw object's field reference: 3658 // {ConP, ThreadLocal, CastX2P, raw Load} 3659 // top | 3660 // \ | 3661 // AddP ( base == top ) 3662 // 3663 // case #7. Klass's field reference. 3664 // LoadKlass 3665 // | | 3666 // AddP ( base == address ) 3667 // 3668 // case #8. narrow Klass's field reference. 3669 // LoadNKlass 3670 // | 3671 // DecodeN 3672 // | | 3673 // AddP ( base == address ) 3674 // 3675 // case #9. Mixed unsafe access 3676 // {instance} 3677 // | 3678 // CheckCastPP (raw) 3679 // top | 3680 // \ | 3681 // AddP ( base == top ) 3682 // 3683 Node *base = addp->in(AddPNode::Base); 3684 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3685 base = addp->in(AddPNode::Address); 3686 while (base->is_AddP()) { 3687 // Case #6 (unsafe access) may have several chained AddP nodes. 3688 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3689 base = base->in(AddPNode::Address); 3690 } 3691 if (base->Opcode() == Op_CheckCastPP && 3692 base->bottom_type()->isa_rawptr() && 3693 _igvn->type(base->in(1))->isa_oopptr()) { 3694 base = base->in(1); // Case #9 3695 } else { 3696 Node* uncast_base = base->uncast(); 3697 int opcode = uncast_base->Opcode(); 3698 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3699 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3700 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3701 is_captured_store_address(addp), "sanity"); 3702 } 3703 } 3704 return base; 3705 } 3706 3707 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3708 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3709 Node* addp2 = addp->raw_out(0); 3710 if (addp->outcnt() == 1 && addp2->is_AddP() && 3711 addp2->in(AddPNode::Base) == n && 3712 addp2->in(AddPNode::Address) == addp) { 3713 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3714 // 3715 // Find array's offset to push it on worklist first and 3716 // as result process an array's element offset first (pushed second) 3717 // to avoid CastPP for the array's offset. 3718 // Otherwise the inserted CastPP (LocalVar) will point to what 3719 // the AddP (Field) points to. Which would be wrong since 3720 // the algorithm expects the CastPP has the same point as 3721 // as AddP's base CheckCastPP (LocalVar). 3722 // 3723 // ArrayAllocation 3724 // | 3725 // CheckCastPP 3726 // | 3727 // memProj (from ArrayAllocation CheckCastPP) 3728 // | || 3729 // | || Int (element index) 3730 // | || | ConI (log(element size)) 3731 // | || | / 3732 // | || LShift 3733 // | || / 3734 // | AddP (array's element offset) 3735 // | | 3736 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3737 // | / / 3738 // AddP (array's offset) 3739 // | 3740 // Load/Store (memory operation on array's element) 3741 // 3742 return addp2; 3743 } 3744 return nullptr; 3745 } 3746 3747 // 3748 // Adjust the type and inputs of an AddP which computes the 3749 // address of a field of an instance 3750 // 3751 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3752 PhaseGVN* igvn = _igvn; 3753 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3754 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3755 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3756 if (t == nullptr) { 3757 // We are computing a raw address for a store captured by an Initialize 3758 // compute an appropriate address type (cases #3 and #5). 3759 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3760 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3761 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3762 assert(offs != Type::OffsetBot, "offset must be a constant"); 3763 t = base_t->add_offset(offs)->is_oopptr(); 3764 } 3765 int inst_id = base_t->instance_id(); 3766 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3767 "old type must be non-instance or match new type"); 3768 3769 // The type 't' could be subclass of 'base_t'. 3770 // As result t->offset() could be large then base_t's size and it will 3771 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3772 // constructor verifies correctness of the offset. 3773 // 3774 // It could happened on subclass's branch (from the type profiling 3775 // inlining) which was not eliminated during parsing since the exactness 3776 // of the allocation type was not propagated to the subclass type check. 3777 // 3778 // Or the type 't' could be not related to 'base_t' at all. 3779 // It could happened when CHA type is different from MDO type on a dead path 3780 // (for example, from instanceof check) which is not collapsed during parsing. 3781 // 3782 // Do nothing for such AddP node and don't process its users since 3783 // this code branch will go away. 3784 // 3785 if (!t->is_known_instance() && 3786 !base_t->maybe_java_subtype_of(t)) { 3787 return false; // bail out 3788 } 3789 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 3790 // Do NOT remove the next line: ensure a new alias index is allocated 3791 // for the instance type. Note: C++ will not remove it since the call 3792 // has side effect. 3793 int alias_idx = _compile->get_alias_index(tinst); 3794 igvn->set_type(addp, tinst); 3795 // record the allocation in the node map 3796 set_map(addp, get_map(base->_idx)); 3797 // Set addp's Base and Address to 'base'. 3798 Node *abase = addp->in(AddPNode::Base); 3799 Node *adr = addp->in(AddPNode::Address); 3800 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3801 adr->in(0)->_idx == (uint)inst_id) { 3802 // Skip AddP cases #3 and #5. 3803 } else { 3804 assert(!abase->is_top(), "sanity"); // AddP case #3 3805 if (abase != base) { 3806 igvn->hash_delete(addp); 3807 addp->set_req(AddPNode::Base, base); 3808 if (abase == adr) { 3809 addp->set_req(AddPNode::Address, base); 3810 } else { 3811 // AddP case #4 (adr is array's element offset AddP node) 3812 #ifdef ASSERT 3813 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3814 assert(adr->is_AddP() && atype != nullptr && 3815 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3816 #endif 3817 } 3818 igvn->hash_insert(addp); 3819 } 3820 } 3821 // Put on IGVN worklist since at least addp's type was changed above. 3822 record_for_optimizer(addp); 3823 return true; 3824 } 3825 3826 // 3827 // Create a new version of orig_phi if necessary. Returns either the newly 3828 // created phi or an existing phi. Sets create_new to indicate whether a new 3829 // phi was created. Cache the last newly created phi in the node map. 3830 // 3831 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3832 Compile *C = _compile; 3833 PhaseGVN* igvn = _igvn; 3834 new_created = false; 3835 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3836 // nothing to do if orig_phi is bottom memory or matches alias_idx 3837 if (phi_alias_idx == alias_idx) { 3838 return orig_phi; 3839 } 3840 // Have we recently created a Phi for this alias index? 3841 PhiNode *result = get_map_phi(orig_phi->_idx); 3842 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3843 return result; 3844 } 3845 // Previous check may fail when the same wide memory Phi was split into Phis 3846 // for different memory slices. Search all Phis for this region. 3847 if (result != nullptr) { 3848 Node* region = orig_phi->in(0); 3849 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3850 Node* phi = region->fast_out(i); 3851 if (phi->is_Phi() && 3852 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3853 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3854 return phi->as_Phi(); 3855 } 3856 } 3857 } 3858 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3859 if (C->do_escape_analysis() == true && !C->failing()) { 3860 // Retry compilation without escape analysis. 3861 // If this is the first failure, the sentinel string will "stick" 3862 // to the Compile object, and the C2Compiler will see it and retry. 3863 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3864 } 3865 return nullptr; 3866 } 3867 orig_phi_worklist.append_if_missing(orig_phi); 3868 const TypePtr *atype = C->get_adr_type(alias_idx); 3869 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3870 C->copy_node_notes_to(result, orig_phi); 3871 igvn->set_type(result, result->bottom_type()); 3872 record_for_optimizer(result); 3873 set_map(orig_phi, result); 3874 new_created = true; 3875 return result; 3876 } 3877 3878 // 3879 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3880 // specified alias index. 3881 // 3882 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) { 3883 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3884 Compile *C = _compile; 3885 PhaseGVN* igvn = _igvn; 3886 bool new_phi_created; 3887 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3888 if (!new_phi_created) { 3889 return result; 3890 } 3891 GrowableArray<PhiNode *> phi_list; 3892 GrowableArray<uint> cur_input; 3893 PhiNode *phi = orig_phi; 3894 uint idx = 1; 3895 bool finished = false; 3896 while(!finished) { 3897 while (idx < phi->req()) { 3898 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1); 3899 if (mem != nullptr && mem->is_Phi()) { 3900 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3901 if (new_phi_created) { 3902 // found an phi for which we created a new split, push current one on worklist and begin 3903 // processing new one 3904 phi_list.push(phi); 3905 cur_input.push(idx); 3906 phi = mem->as_Phi(); 3907 result = newphi; 3908 idx = 1; 3909 continue; 3910 } else { 3911 mem = newphi; 3912 } 3913 } 3914 if (C->failing()) { 3915 return nullptr; 3916 } 3917 result->set_req(idx++, mem); 3918 } 3919 #ifdef ASSERT 3920 // verify that the new Phi has an input for each input of the original 3921 assert( phi->req() == result->req(), "must have same number of inputs."); 3922 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3923 #endif 3924 // Check if all new phi's inputs have specified alias index. 3925 // Otherwise use old phi. 3926 for (uint i = 1; i < phi->req(); i++) { 3927 Node* in = result->in(i); 3928 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3929 } 3930 // we have finished processing a Phi, see if there are any more to do 3931 finished = (phi_list.length() == 0 ); 3932 if (!finished) { 3933 phi = phi_list.pop(); 3934 idx = cur_input.pop(); 3935 PhiNode *prev_result = get_map_phi(phi->_idx); 3936 prev_result->set_req(idx++, result); 3937 result = prev_result; 3938 } 3939 } 3940 return result; 3941 } 3942 3943 // 3944 // The next methods are derived from methods in MemNode. 3945 // 3946 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3947 Node *mem = mmem; 3948 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3949 // means an array I have not precisely typed yet. Do not do any 3950 // alias stuff with it any time soon. 3951 if (toop->base() != Type::AnyPtr && 3952 !(toop->isa_instptr() && 3953 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3954 toop->offset() == Type::OffsetBot)) { 3955 mem = mmem->memory_at(alias_idx); 3956 // Update input if it is progress over what we have now 3957 } 3958 return mem; 3959 } 3960 3961 // 3962 // Move memory users to their memory slices. 3963 // 3964 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 3965 Compile* C = _compile; 3966 PhaseGVN* igvn = _igvn; 3967 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 3968 assert(tp != nullptr, "ptr type"); 3969 int alias_idx = C->get_alias_index(tp); 3970 int general_idx = C->get_general_index(alias_idx); 3971 3972 // Move users first 3973 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3974 Node* use = n->fast_out(i); 3975 if (use->is_MergeMem()) { 3976 MergeMemNode* mmem = use->as_MergeMem(); 3977 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 3978 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 3979 continue; // Nothing to do 3980 } 3981 // Replace previous general reference to mem node. 3982 uint orig_uniq = C->unique(); 3983 Node* m = find_inst_mem(n, general_idx, orig_phis); 3984 assert(orig_uniq == C->unique(), "no new nodes"); 3985 mmem->set_memory_at(general_idx, m); 3986 --imax; 3987 --i; 3988 } else if (use->is_MemBar()) { 3989 assert(!use->is_Initialize(), "initializing stores should not be moved"); 3990 if (use->req() > MemBarNode::Precedent && 3991 use->in(MemBarNode::Precedent) == n) { 3992 // Don't move related membars. 3993 record_for_optimizer(use); 3994 continue; 3995 } 3996 tp = use->as_MemBar()->adr_type()->isa_ptr(); 3997 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 3998 alias_idx == general_idx) { 3999 continue; // Nothing to do 4000 } 4001 // Move to general memory slice. 4002 uint orig_uniq = C->unique(); 4003 Node* m = find_inst_mem(n, general_idx, orig_phis); 4004 assert(orig_uniq == C->unique(), "no new nodes"); 4005 igvn->hash_delete(use); 4006 imax -= use->replace_edge(n, m, igvn); 4007 igvn->hash_insert(use); 4008 record_for_optimizer(use); 4009 --i; 4010 #ifdef ASSERT 4011 } else if (use->is_Mem()) { 4012 // Memory nodes should have new memory input. 4013 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 4014 assert(tp != nullptr, "ptr type"); 4015 int idx = C->get_alias_index(tp); 4016 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 4017 "Following memory nodes should have new memory input or be on the same memory slice"); 4018 } else if (use->is_Phi()) { 4019 // Phi nodes should be split and moved already. 4020 tp = use->as_Phi()->adr_type()->isa_ptr(); 4021 assert(tp != nullptr, "ptr type"); 4022 int idx = C->get_alias_index(tp); 4023 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 4024 } else { 4025 use->dump(); 4026 assert(false, "should not be here"); 4027 #endif 4028 } 4029 } 4030 } 4031 4032 // 4033 // Search memory chain of "mem" to find a MemNode whose address 4034 // is the specified alias index. 4035 // 4036 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000 4037 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, uint rec_depth) { 4038 if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) { 4039 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4040 return nullptr; 4041 } 4042 if (orig_mem == nullptr) { 4043 return orig_mem; 4044 } 4045 Compile* C = _compile; 4046 PhaseGVN* igvn = _igvn; 4047 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 4048 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 4049 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 4050 Node *prev = nullptr; 4051 Node *result = orig_mem; 4052 while (prev != result) { 4053 prev = result; 4054 if (result == start_mem) { 4055 break; // hit one of our sentinels 4056 } 4057 if (result->is_Mem()) { 4058 const Type *at = igvn->type(result->in(MemNode::Address)); 4059 if (at == Type::TOP) { 4060 break; // Dead 4061 } 4062 assert (at->isa_ptr() != nullptr, "pointer type required."); 4063 int idx = C->get_alias_index(at->is_ptr()); 4064 if (idx == alias_idx) { 4065 break; // Found 4066 } 4067 if (!is_instance && (at->isa_oopptr() == nullptr || 4068 !at->is_oopptr()->is_known_instance())) { 4069 break; // Do not skip store to general memory slice. 4070 } 4071 result = result->in(MemNode::Memory); 4072 } 4073 if (!is_instance) { 4074 continue; // don't search further for non-instance types 4075 } 4076 // skip over a call which does not affect this memory slice 4077 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 4078 Node *proj_in = result->in(0); 4079 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 4080 break; // hit one of our sentinels 4081 } else if (proj_in->is_Call()) { 4082 // ArrayCopy node processed here as well 4083 CallNode *call = proj_in->as_Call(); 4084 if (!call->may_modify(toop, igvn)) { 4085 result = call->in(TypeFunc::Memory); 4086 } 4087 } else if (proj_in->is_Initialize()) { 4088 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 4089 // Stop if this is the initialization for the object instance which 4090 // which contains this memory slice, otherwise skip over it. 4091 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 4092 result = proj_in->in(TypeFunc::Memory); 4093 } 4094 } else if (proj_in->is_MemBar()) { 4095 // Check if there is an array copy for a clone 4096 // Step over GC barrier when ReduceInitialCardMarks is disabled 4097 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4098 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 4099 4100 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 4101 // Stop if it is a clone 4102 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 4103 if (ac->may_modify(toop, igvn)) { 4104 break; 4105 } 4106 } 4107 result = proj_in->in(TypeFunc::Memory); 4108 } 4109 } else if (result->is_MergeMem()) { 4110 MergeMemNode *mmem = result->as_MergeMem(); 4111 result = step_through_mergemem(mmem, alias_idx, toop); 4112 if (result == mmem->base_memory()) { 4113 // Didn't find instance memory, search through general slice recursively. 4114 result = mmem->memory_at(C->get_general_index(alias_idx)); 4115 result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1); 4116 if (C->failing()) { 4117 return nullptr; 4118 } 4119 mmem->set_memory_at(alias_idx, result); 4120 } 4121 } else if (result->is_Phi() && 4122 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 4123 Node *un = result->as_Phi()->unique_input(igvn); 4124 if (un != nullptr) { 4125 orig_phis.append_if_missing(result->as_Phi()); 4126 result = un; 4127 } else { 4128 break; 4129 } 4130 } else if (result->is_ClearArray()) { 4131 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 4132 // Can not bypass initialization of the instance 4133 // we are looking for. 4134 break; 4135 } 4136 // Otherwise skip it (the call updated 'result' value). 4137 } else if (result->Opcode() == Op_SCMemProj) { 4138 Node* mem = result->in(0); 4139 Node* adr = nullptr; 4140 if (mem->is_LoadStore()) { 4141 adr = mem->in(MemNode::Address); 4142 } else { 4143 assert(mem->Opcode() == Op_EncodeISOArray || 4144 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 4145 adr = mem->in(3); // Memory edge corresponds to destination array 4146 } 4147 const Type *at = igvn->type(adr); 4148 if (at != Type::TOP) { 4149 assert(at->isa_ptr() != nullptr, "pointer type required."); 4150 int idx = C->get_alias_index(at->is_ptr()); 4151 if (idx == alias_idx) { 4152 // Assert in debug mode 4153 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 4154 break; // In product mode return SCMemProj node 4155 } 4156 } 4157 result = mem->in(MemNode::Memory); 4158 } else if (result->Opcode() == Op_StrInflatedCopy) { 4159 Node* adr = result->in(3); // Memory edge corresponds to destination array 4160 const Type *at = igvn->type(adr); 4161 if (at != Type::TOP) { 4162 assert(at->isa_ptr() != nullptr, "pointer type required."); 4163 int idx = C->get_alias_index(at->is_ptr()); 4164 if (idx == alias_idx) { 4165 // Assert in debug mode 4166 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 4167 break; // In product mode return SCMemProj node 4168 } 4169 } 4170 result = result->in(MemNode::Memory); 4171 } 4172 } 4173 if (result->is_Phi()) { 4174 PhiNode *mphi = result->as_Phi(); 4175 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 4176 const TypePtr *t = mphi->adr_type(); 4177 if (!is_instance) { 4178 // Push all non-instance Phis on the orig_phis worklist to update inputs 4179 // during Phase 4 if needed. 4180 orig_phis.append_if_missing(mphi); 4181 } else if (C->get_alias_index(t) != alias_idx) { 4182 // Create a new Phi with the specified alias index type. 4183 result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1); 4184 } 4185 } 4186 // the result is either MemNode, PhiNode, InitializeNode. 4187 return result; 4188 } 4189 4190 // 4191 // Convert the types of non-escaped object to instance types where possible, 4192 // propagate the new type information through the graph, and update memory 4193 // edges and MergeMem inputs to reflect the new type. 4194 // 4195 // We start with allocations (and calls which may be allocations) on alloc_worklist. 4196 // The processing is done in 4 phases: 4197 // 4198 // Phase 1: Process possible allocations from alloc_worklist. Create instance 4199 // types for the CheckCastPP for allocations where possible. 4200 // Propagate the new types through users as follows: 4201 // casts and Phi: push users on alloc_worklist 4202 // AddP: cast Base and Address inputs to the instance type 4203 // push any AddP users on alloc_worklist and push any memnode 4204 // users onto memnode_worklist. 4205 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4206 // search the Memory chain for a store with the appropriate type 4207 // address type. If a Phi is found, create a new version with 4208 // the appropriate memory slices from each of the Phi inputs. 4209 // For stores, process the users as follows: 4210 // MemNode: push on memnode_worklist 4211 // MergeMem: push on mergemem_worklist 4212 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 4213 // moving the first node encountered of each instance type to the 4214 // the input corresponding to its alias index. 4215 // appropriate memory slice. 4216 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 4217 // 4218 // In the following example, the CheckCastPP nodes are the cast of allocation 4219 // results and the allocation of node 29 is non-escaped and eligible to be an 4220 // instance type. 4221 // 4222 // We start with: 4223 // 4224 // 7 Parm #memory 4225 // 10 ConI "12" 4226 // 19 CheckCastPP "Foo" 4227 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4228 // 29 CheckCastPP "Foo" 4229 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 4230 // 4231 // 40 StoreP 25 7 20 ... alias_index=4 4232 // 50 StoreP 35 40 30 ... alias_index=4 4233 // 60 StoreP 45 50 20 ... alias_index=4 4234 // 70 LoadP _ 60 30 ... alias_index=4 4235 // 80 Phi 75 50 60 Memory alias_index=4 4236 // 90 LoadP _ 80 30 ... alias_index=4 4237 // 100 LoadP _ 80 20 ... alias_index=4 4238 // 4239 // 4240 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 4241 // and creating a new alias index for node 30. This gives: 4242 // 4243 // 7 Parm #memory 4244 // 10 ConI "12" 4245 // 19 CheckCastPP "Foo" 4246 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4247 // 29 CheckCastPP "Foo" iid=24 4248 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4249 // 4250 // 40 StoreP 25 7 20 ... alias_index=4 4251 // 50 StoreP 35 40 30 ... alias_index=6 4252 // 60 StoreP 45 50 20 ... alias_index=4 4253 // 70 LoadP _ 60 30 ... alias_index=6 4254 // 80 Phi 75 50 60 Memory alias_index=4 4255 // 90 LoadP _ 80 30 ... alias_index=6 4256 // 100 LoadP _ 80 20 ... alias_index=4 4257 // 4258 // In phase 2, new memory inputs are computed for the loads and stores, 4259 // And a new version of the phi is created. In phase 4, the inputs to 4260 // node 80 are updated and then the memory nodes are updated with the 4261 // values computed in phase 2. This results in: 4262 // 4263 // 7 Parm #memory 4264 // 10 ConI "12" 4265 // 19 CheckCastPP "Foo" 4266 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4267 // 29 CheckCastPP "Foo" iid=24 4268 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4269 // 4270 // 40 StoreP 25 7 20 ... alias_index=4 4271 // 50 StoreP 35 7 30 ... alias_index=6 4272 // 60 StoreP 45 40 20 ... alias_index=4 4273 // 70 LoadP _ 50 30 ... alias_index=6 4274 // 80 Phi 75 40 60 Memory alias_index=4 4275 // 120 Phi 75 50 50 Memory alias_index=6 4276 // 90 LoadP _ 120 30 ... alias_index=6 4277 // 100 LoadP _ 80 20 ... alias_index=4 4278 // 4279 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 4280 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 4281 GrowableArray<MergeMemNode*> &mergemem_worklist, 4282 Unique_Node_List &reducible_merges) { 4283 DEBUG_ONLY(Unique_Node_List reduced_merges;) 4284 GrowableArray<Node *> memnode_worklist; 4285 GrowableArray<PhiNode *> orig_phis; 4286 PhaseIterGVN *igvn = _igvn; 4287 uint new_index_start = (uint) _compile->num_alias_types(); 4288 VectorSet visited; 4289 ideal_nodes.clear(); // Reset for use with set_map/get_map. 4290 uint unique_old = _compile->unique(); 4291 4292 // Phase 1: Process possible allocations from alloc_worklist. 4293 // Create instance types for the CheckCastPP for allocations where possible. 4294 // 4295 // (Note: don't forget to change the order of the second AddP node on 4296 // the alloc_worklist if the order of the worklist processing is changed, 4297 // see the comment in find_second_addp().) 4298 // 4299 while (alloc_worklist.length() != 0) { 4300 Node *n = alloc_worklist.pop(); 4301 uint ni = n->_idx; 4302 if (n->is_Call()) { 4303 CallNode *alloc = n->as_Call(); 4304 // copy escape information to call node 4305 PointsToNode* ptn = ptnode_adr(alloc->_idx); 4306 PointsToNode::EscapeState es = ptn->escape_state(); 4307 // We have an allocation or call which returns a Java object, 4308 // see if it is non-escaped. 4309 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 4310 continue; 4311 } 4312 // Find CheckCastPP for the allocate or for the return value of a call 4313 n = alloc->result_cast(); 4314 if (n == nullptr) { // No uses except Initialize node 4315 if (alloc->is_Allocate()) { 4316 // Set the scalar_replaceable flag for allocation 4317 // so it could be eliminated if it has no uses. 4318 alloc->as_Allocate()->_is_scalar_replaceable = true; 4319 } 4320 continue; 4321 } 4322 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 4323 // we could reach here for allocate case if one init is associated with many allocs. 4324 if (alloc->is_Allocate()) { 4325 alloc->as_Allocate()->_is_scalar_replaceable = false; 4326 } 4327 continue; 4328 } 4329 4330 // The inline code for Object.clone() casts the allocation result to 4331 // java.lang.Object and then to the actual type of the allocated 4332 // object. Detect this case and use the second cast. 4333 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 4334 // the allocation result is cast to java.lang.Object and then 4335 // to the actual Array type. 4336 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 4337 && (alloc->is_AllocateArray() || 4338 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 4339 Node *cast2 = nullptr; 4340 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4341 Node *use = n->fast_out(i); 4342 if (use->is_CheckCastPP()) { 4343 cast2 = use; 4344 break; 4345 } 4346 } 4347 if (cast2 != nullptr) { 4348 n = cast2; 4349 } else { 4350 // Non-scalar replaceable if the allocation type is unknown statically 4351 // (reflection allocation), the object can't be restored during 4352 // deoptimization without precise type. 4353 continue; 4354 } 4355 } 4356 4357 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 4358 if (t == nullptr) { 4359 continue; // not a TypeOopPtr 4360 } 4361 if (!t->klass_is_exact()) { 4362 continue; // not an unique type 4363 } 4364 if (alloc->is_Allocate()) { 4365 // Set the scalar_replaceable flag for allocation 4366 // so it could be eliminated. 4367 alloc->as_Allocate()->_is_scalar_replaceable = true; 4368 } 4369 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 4370 // in order for an object to be scalar-replaceable, it must be: 4371 // - a direct allocation (not a call returning an object) 4372 // - non-escaping 4373 // - eligible to be a unique type 4374 // - not determined to be ineligible by escape analysis 4375 set_map(alloc, n); 4376 set_map(n, alloc); 4377 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 4378 igvn->hash_delete(n); 4379 igvn->set_type(n, tinst); 4380 n->raise_bottom_type(tinst); 4381 igvn->hash_insert(n); 4382 record_for_optimizer(n); 4383 // Allocate an alias index for the header fields. Accesses to 4384 // the header emitted during macro expansion wouldn't have 4385 // correct memory state otherwise. 4386 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 4387 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 4388 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 4389 4390 // First, put on the worklist all Field edges from Connection Graph 4391 // which is more accurate than putting immediate users from Ideal Graph. 4392 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 4393 PointsToNode* tgt = e.get(); 4394 if (tgt->is_Arraycopy()) { 4395 continue; 4396 } 4397 Node* use = tgt->ideal_node(); 4398 assert(tgt->is_Field() && use->is_AddP(), 4399 "only AddP nodes are Field edges in CG"); 4400 if (use->outcnt() > 0) { // Don't process dead nodes 4401 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 4402 if (addp2 != nullptr) { 4403 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4404 alloc_worklist.append_if_missing(addp2); 4405 } 4406 alloc_worklist.append_if_missing(use); 4407 } 4408 } 4409 4410 // An allocation may have an Initialize which has raw stores. Scan 4411 // the users of the raw allocation result and push AddP users 4412 // on alloc_worklist. 4413 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 4414 assert (raw_result != nullptr, "must have an allocation result"); 4415 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 4416 Node *use = raw_result->fast_out(i); 4417 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 4418 Node* addp2 = find_second_addp(use, raw_result); 4419 if (addp2 != nullptr) { 4420 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4421 alloc_worklist.append_if_missing(addp2); 4422 } 4423 alloc_worklist.append_if_missing(use); 4424 } else if (use->is_MemBar()) { 4425 memnode_worklist.append_if_missing(use); 4426 } 4427 } 4428 } 4429 } else if (n->is_AddP()) { 4430 if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) { 4431 // This AddP will go away when we reduce the the Phi 4432 continue; 4433 } 4434 Node* addp_base = get_addp_base(n); 4435 JavaObjectNode* jobj = unique_java_object(addp_base); 4436 if (jobj == nullptr || jobj == phantom_obj) { 4437 #ifdef ASSERT 4438 ptnode_adr(get_addp_base(n)->_idx)->dump(); 4439 ptnode_adr(n->_idx)->dump(); 4440 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4441 #endif 4442 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4443 return; 4444 } 4445 Node *base = get_map(jobj->idx()); // CheckCastPP node 4446 if (!split_AddP(n, base)) continue; // wrong type from dead path 4447 } else if (n->is_Phi() || 4448 n->is_CheckCastPP() || 4449 n->is_EncodeP() || 4450 n->is_DecodeN() || 4451 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 4452 if (visited.test_set(n->_idx)) { 4453 assert(n->is_Phi(), "loops only through Phi's"); 4454 continue; // already processed 4455 } 4456 // Reducible Phi's will be removed from the graph after split_unique_types 4457 // finishes. For now we just try to split out the SR inputs of the merge. 4458 Node* parent = n->in(1); 4459 if (reducible_merges.member(n)) { 4460 reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist); 4461 #ifdef ASSERT 4462 if (VerifyReduceAllocationMerges) { 4463 reduced_merges.push(n); 4464 } 4465 #endif 4466 continue; 4467 } else if (reducible_merges.member(parent)) { 4468 // 'n' is an user of a reducible merge (a Phi). It will be simplified as 4469 // part of reduce_merge. 4470 continue; 4471 } 4472 JavaObjectNode* jobj = unique_java_object(n); 4473 if (jobj == nullptr || jobj == phantom_obj) { 4474 #ifdef ASSERT 4475 ptnode_adr(n->_idx)->dump(); 4476 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4477 #endif 4478 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4479 return; 4480 } else { 4481 Node *val = get_map(jobj->idx()); // CheckCastPP node 4482 TypeNode *tn = n->as_Type(); 4483 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 4484 assert(tinst != nullptr && tinst->is_known_instance() && 4485 tinst->instance_id() == jobj->idx() , "instance type expected."); 4486 4487 const Type *tn_type = igvn->type(tn); 4488 const TypeOopPtr *tn_t; 4489 if (tn_type->isa_narrowoop()) { 4490 tn_t = tn_type->make_ptr()->isa_oopptr(); 4491 } else { 4492 tn_t = tn_type->isa_oopptr(); 4493 } 4494 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 4495 if (tn_type->isa_narrowoop()) { 4496 tn_type = tinst->make_narrowoop(); 4497 } else { 4498 tn_type = tinst; 4499 } 4500 igvn->hash_delete(tn); 4501 igvn->set_type(tn, tn_type); 4502 tn->set_type(tn_type); 4503 igvn->hash_insert(tn); 4504 record_for_optimizer(n); 4505 } else { 4506 assert(tn_type == TypePtr::NULL_PTR || 4507 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)), 4508 "unexpected type"); 4509 continue; // Skip dead path with different type 4510 } 4511 } 4512 } else { 4513 debug_only(n->dump();) 4514 assert(false, "EA: unexpected node"); 4515 continue; 4516 } 4517 // push allocation's users on appropriate worklist 4518 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4519 Node *use = n->fast_out(i); 4520 if(use->is_Mem() && use->in(MemNode::Address) == n) { 4521 // Load/store to instance's field 4522 memnode_worklist.append_if_missing(use); 4523 } else if (use->is_MemBar()) { 4524 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4525 memnode_worklist.append_if_missing(use); 4526 } 4527 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 4528 Node* addp2 = find_second_addp(use, n); 4529 if (addp2 != nullptr) { 4530 alloc_worklist.append_if_missing(addp2); 4531 } 4532 alloc_worklist.append_if_missing(use); 4533 } else if (use->is_Phi() || 4534 use->is_CheckCastPP() || 4535 use->is_EncodeNarrowPtr() || 4536 use->is_DecodeNarrowPtr() || 4537 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 4538 alloc_worklist.append_if_missing(use); 4539 #ifdef ASSERT 4540 } else if (use->is_Mem()) { 4541 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 4542 } else if (use->is_MergeMem()) { 4543 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4544 } else if (use->is_SafePoint()) { 4545 // Look for MergeMem nodes for calls which reference unique allocation 4546 // (through CheckCastPP nodes) even for debug info. 4547 Node* m = use->in(TypeFunc::Memory); 4548 if (m->is_MergeMem()) { 4549 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4550 } 4551 } else if (use->Opcode() == Op_EncodeISOArray) { 4552 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4553 // EncodeISOArray overwrites destination array 4554 memnode_worklist.append_if_missing(use); 4555 } 4556 } else { 4557 uint op = use->Opcode(); 4558 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 4559 (use->in(MemNode::Memory) == n)) { 4560 // They overwrite memory edge corresponding to destination array, 4561 memnode_worklist.append_if_missing(use); 4562 } else if (!(op == Op_CmpP || op == Op_Conv2B || 4563 op == Op_CastP2X || 4564 op == Op_FastLock || op == Op_AryEq || 4565 op == Op_StrComp || op == Op_CountPositives || 4566 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 4567 op == Op_StrEquals || op == Op_VectorizedHashCode || 4568 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 4569 op == Op_SubTypeCheck || 4570 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 4571 n->dump(); 4572 use->dump(); 4573 assert(false, "EA: missing allocation reference path"); 4574 } 4575 #endif 4576 } 4577 } 4578 4579 } 4580 4581 #ifdef ASSERT 4582 if (VerifyReduceAllocationMerges) { 4583 for (uint i = 0; i < reducible_merges.size(); i++) { 4584 Node* phi = reducible_merges.at(i); 4585 4586 if (!reduced_merges.member(phi)) { 4587 phi->dump(2); 4588 phi->dump(-2); 4589 assert(false, "This reducible merge wasn't reduced."); 4590 } 4591 4592 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts. 4593 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 4594 Node* use = phi->fast_out(j); 4595 if (!use->is_SafePoint() && !use->is_CastPP()) { 4596 phi->dump(2); 4597 phi->dump(-2); 4598 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 4599 } 4600 } 4601 } 4602 } 4603 #endif 4604 4605 // Go over all ArrayCopy nodes and if one of the inputs has a unique 4606 // type, record it in the ArrayCopy node so we know what memory this 4607 // node uses/modified. 4608 for (int next = 0; next < arraycopy_worklist.length(); next++) { 4609 ArrayCopyNode* ac = arraycopy_worklist.at(next); 4610 Node* dest = ac->in(ArrayCopyNode::Dest); 4611 if (dest->is_AddP()) { 4612 dest = get_addp_base(dest); 4613 } 4614 JavaObjectNode* jobj = unique_java_object(dest); 4615 if (jobj != nullptr) { 4616 Node *base = get_map(jobj->idx()); 4617 if (base != nullptr) { 4618 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4619 ac->_dest_type = base_t; 4620 } 4621 } 4622 Node* src = ac->in(ArrayCopyNode::Src); 4623 if (src->is_AddP()) { 4624 src = get_addp_base(src); 4625 } 4626 jobj = unique_java_object(src); 4627 if (jobj != nullptr) { 4628 Node* base = get_map(jobj->idx()); 4629 if (base != nullptr) { 4630 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4631 ac->_src_type = base_t; 4632 } 4633 } 4634 } 4635 4636 // New alias types were created in split_AddP(). 4637 uint new_index_end = (uint) _compile->num_alias_types(); 4638 4639 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4640 // compute new values for Memory inputs (the Memory inputs are not 4641 // actually updated until phase 4.) 4642 if (memnode_worklist.length() == 0) 4643 return; // nothing to do 4644 while (memnode_worklist.length() != 0) { 4645 Node *n = memnode_worklist.pop(); 4646 if (visited.test_set(n->_idx)) { 4647 continue; 4648 } 4649 if (n->is_Phi() || n->is_ClearArray()) { 4650 // we don't need to do anything, but the users must be pushed 4651 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4652 // we don't need to do anything, but the users must be pushed 4653 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4654 if (n == nullptr) { 4655 continue; 4656 } 4657 } else if (n->is_CallLeaf()) { 4658 // Runtime calls with narrow memory input (no MergeMem node) 4659 // get the memory projection 4660 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory); 4661 if (n == nullptr) { 4662 continue; 4663 } 4664 } else if (n->Opcode() == Op_StrCompressedCopy || 4665 n->Opcode() == Op_EncodeISOArray) { 4666 // get the memory projection 4667 n = n->find_out_with(Op_SCMemProj); 4668 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4669 } else { 4670 assert(n->is_Mem(), "memory node required."); 4671 Node *addr = n->in(MemNode::Address); 4672 const Type *addr_t = igvn->type(addr); 4673 if (addr_t == Type::TOP) { 4674 continue; 4675 } 4676 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4677 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4678 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4679 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4680 if (_compile->failing()) { 4681 return; 4682 } 4683 if (mem != n->in(MemNode::Memory)) { 4684 // We delay the memory edge update since we need old one in 4685 // MergeMem code below when instances memory slices are separated. 4686 set_map(n, mem); 4687 } 4688 if (n->is_Load()) { 4689 continue; // don't push users 4690 } else if (n->is_LoadStore()) { 4691 // get the memory projection 4692 n = n->find_out_with(Op_SCMemProj); 4693 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4694 } 4695 } 4696 // push user on appropriate worklist 4697 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4698 Node *use = n->fast_out(i); 4699 if (use->is_Phi() || use->is_ClearArray()) { 4700 memnode_worklist.append_if_missing(use); 4701 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4702 memnode_worklist.append_if_missing(use); 4703 } else if (use->is_MemBar() || use->is_CallLeaf()) { 4704 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4705 memnode_worklist.append_if_missing(use); 4706 } 4707 #ifdef ASSERT 4708 } else if(use->is_Mem()) { 4709 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4710 } else if (use->is_MergeMem()) { 4711 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4712 } else if (use->Opcode() == Op_EncodeISOArray) { 4713 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4714 // EncodeISOArray overwrites destination array 4715 memnode_worklist.append_if_missing(use); 4716 } 4717 } else { 4718 uint op = use->Opcode(); 4719 if ((use->in(MemNode::Memory) == n) && 4720 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4721 // They overwrite memory edge corresponding to destination array, 4722 memnode_worklist.append_if_missing(use); 4723 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4724 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4725 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4726 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 4727 n->dump(); 4728 use->dump(); 4729 assert(false, "EA: missing memory path"); 4730 } 4731 #endif 4732 } 4733 } 4734 } 4735 4736 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4737 // Walk each memory slice moving the first node encountered of each 4738 // instance type to the input corresponding to its alias index. 4739 uint length = mergemem_worklist.length(); 4740 for( uint next = 0; next < length; ++next ) { 4741 MergeMemNode* nmm = mergemem_worklist.at(next); 4742 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4743 // Note: we don't want to use MergeMemStream here because we only want to 4744 // scan inputs which exist at the start, not ones we add during processing. 4745 // Note 2: MergeMem may already contains instance memory slices added 4746 // during find_inst_mem() call when memory nodes were processed above. 4747 igvn->hash_delete(nmm); 4748 uint nslices = MIN2(nmm->req(), new_index_start); 4749 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4750 Node* mem = nmm->in(i); 4751 Node* cur = nullptr; 4752 if (mem == nullptr || mem->is_top()) { 4753 continue; 4754 } 4755 // First, update mergemem by moving memory nodes to corresponding slices 4756 // if their type became more precise since this mergemem was created. 4757 while (mem->is_Mem()) { 4758 const Type *at = igvn->type(mem->in(MemNode::Address)); 4759 if (at != Type::TOP) { 4760 assert (at->isa_ptr() != nullptr, "pointer type required."); 4761 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4762 if (idx == i) { 4763 if (cur == nullptr) { 4764 cur = mem; 4765 } 4766 } else { 4767 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4768 nmm->set_memory_at(idx, mem); 4769 } 4770 } 4771 } 4772 mem = mem->in(MemNode::Memory); 4773 } 4774 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4775 // Find any instance of the current type if we haven't encountered 4776 // already a memory slice of the instance along the memory chain. 4777 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4778 if((uint)_compile->get_general_index(ni) == i) { 4779 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4780 if (nmm->is_empty_memory(m)) { 4781 Node* result = find_inst_mem(mem, ni, orig_phis); 4782 if (_compile->failing()) { 4783 return; 4784 } 4785 nmm->set_memory_at(ni, result); 4786 } 4787 } 4788 } 4789 } 4790 // Find the rest of instances values 4791 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4792 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4793 Node* result = step_through_mergemem(nmm, ni, tinst); 4794 if (result == nmm->base_memory()) { 4795 // Didn't find instance memory, search through general slice recursively. 4796 result = nmm->memory_at(_compile->get_general_index(ni)); 4797 result = find_inst_mem(result, ni, orig_phis); 4798 if (_compile->failing()) { 4799 return; 4800 } 4801 nmm->set_memory_at(ni, result); 4802 } 4803 } 4804 4805 // If we have crossed the 3/4 point of max node limit it's too risky 4806 // to continue with EA/SR because we might hit the max node limit. 4807 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) { 4808 if (_compile->do_reduce_allocation_merges()) { 4809 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 4810 } else if (_invocation > 0) { 4811 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis()); 4812 } else { 4813 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 4814 } 4815 return; 4816 } 4817 4818 igvn->hash_insert(nmm); 4819 record_for_optimizer(nmm); 4820 } 4821 4822 // Phase 4: Update the inputs of non-instance memory Phis and 4823 // the Memory input of memnodes 4824 // First update the inputs of any non-instance Phi's from 4825 // which we split out an instance Phi. Note we don't have 4826 // to recursively process Phi's encountered on the input memory 4827 // chains as is done in split_memory_phi() since they will 4828 // also be processed here. 4829 for (int j = 0; j < orig_phis.length(); j++) { 4830 PhiNode *phi = orig_phis.at(j); 4831 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4832 igvn->hash_delete(phi); 4833 for (uint i = 1; i < phi->req(); i++) { 4834 Node *mem = phi->in(i); 4835 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4836 if (_compile->failing()) { 4837 return; 4838 } 4839 if (mem != new_mem) { 4840 phi->set_req(i, new_mem); 4841 } 4842 } 4843 igvn->hash_insert(phi); 4844 record_for_optimizer(phi); 4845 } 4846 4847 // Update the memory inputs of MemNodes with the value we computed 4848 // in Phase 2 and move stores memory users to corresponding memory slices. 4849 // Disable memory split verification code until the fix for 6984348. 4850 // Currently it produces false negative results since it does not cover all cases. 4851 #if 0 // ifdef ASSERT 4852 visited.Reset(); 4853 Node_Stack old_mems(arena, _compile->unique() >> 2); 4854 #endif 4855 for (uint i = 0; i < ideal_nodes.size(); i++) { 4856 Node* n = ideal_nodes.at(i); 4857 Node* nmem = get_map(n->_idx); 4858 assert(nmem != nullptr, "sanity"); 4859 if (n->is_Mem()) { 4860 #if 0 // ifdef ASSERT 4861 Node* old_mem = n->in(MemNode::Memory); 4862 if (!visited.test_set(old_mem->_idx)) { 4863 old_mems.push(old_mem, old_mem->outcnt()); 4864 } 4865 #endif 4866 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4867 if (!n->is_Load()) { 4868 // Move memory users of a store first. 4869 move_inst_mem(n, orig_phis); 4870 } 4871 // Now update memory input 4872 igvn->hash_delete(n); 4873 n->set_req(MemNode::Memory, nmem); 4874 igvn->hash_insert(n); 4875 record_for_optimizer(n); 4876 } else { 4877 assert(n->is_Allocate() || n->is_CheckCastPP() || 4878 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4879 } 4880 } 4881 #if 0 // ifdef ASSERT 4882 // Verify that memory was split correctly 4883 while (old_mems.is_nonempty()) { 4884 Node* old_mem = old_mems.node(); 4885 uint old_cnt = old_mems.index(); 4886 old_mems.pop(); 4887 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4888 } 4889 #endif 4890 } 4891 4892 #ifndef PRODUCT 4893 int ConnectionGraph::_no_escape_counter = 0; 4894 int ConnectionGraph::_arg_escape_counter = 0; 4895 int ConnectionGraph::_global_escape_counter = 0; 4896 4897 static const char *node_type_names[] = { 4898 "UnknownType", 4899 "JavaObject", 4900 "LocalVar", 4901 "Field", 4902 "Arraycopy" 4903 }; 4904 4905 static const char *esc_names[] = { 4906 "UnknownEscape", 4907 "NoEscape", 4908 "ArgEscape", 4909 "GlobalEscape" 4910 }; 4911 4912 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4913 NodeType nt = node_type(); 4914 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4915 if (print_state) { 4916 EscapeState es = escape_state(); 4917 EscapeState fields_es = fields_escape_state(); 4918 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4919 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4920 out->print("NSR "); 4921 } 4922 } 4923 } 4924 4925 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4926 dump_header(print_state, out); 4927 if (is_Field()) { 4928 FieldNode* f = (FieldNode*)this; 4929 if (f->is_oop()) { 4930 out->print("oop "); 4931 } 4932 if (f->offset() > 0) { 4933 out->print("+%d ", f->offset()); 4934 } 4935 out->print("("); 4936 for (BaseIterator i(f); i.has_next(); i.next()) { 4937 PointsToNode* b = i.get(); 4938 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4939 } 4940 out->print(" )"); 4941 } 4942 out->print("["); 4943 for (EdgeIterator i(this); i.has_next(); i.next()) { 4944 PointsToNode* e = i.get(); 4945 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4946 } 4947 out->print(" ["); 4948 for (UseIterator i(this); i.has_next(); i.next()) { 4949 PointsToNode* u = i.get(); 4950 bool is_base = false; 4951 if (PointsToNode::is_base_use(u)) { 4952 is_base = true; 4953 u = PointsToNode::get_use_node(u)->as_Field(); 4954 } 4955 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 4956 } 4957 out->print(" ]] "); 4958 if (_node == nullptr) { 4959 out->print("<null>%s", newline ? "\n" : ""); 4960 } else { 4961 _node->dump(newline ? "\n" : "", false, out); 4962 } 4963 } 4964 4965 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 4966 bool first = true; 4967 int ptnodes_length = ptnodes_worklist.length(); 4968 for (int i = 0; i < ptnodes_length; i++) { 4969 PointsToNode *ptn = ptnodes_worklist.at(i); 4970 if (ptn == nullptr || !ptn->is_JavaObject()) { 4971 continue; 4972 } 4973 PointsToNode::EscapeState es = ptn->escape_state(); 4974 if ((es != PointsToNode::NoEscape) && !Verbose) { 4975 continue; 4976 } 4977 Node* n = ptn->ideal_node(); 4978 if (n->is_Allocate() || (n->is_CallStaticJava() && 4979 n->as_CallStaticJava()->is_boxing_method())) { 4980 if (first) { 4981 tty->cr(); 4982 tty->print("======== Connection graph for "); 4983 _compile->method()->print_short_name(); 4984 tty->cr(); 4985 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 4986 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 4987 tty->cr(); 4988 first = false; 4989 } 4990 ptn->dump(); 4991 // Print all locals and fields which reference this allocation 4992 for (UseIterator j(ptn); j.has_next(); j.next()) { 4993 PointsToNode* use = j.get(); 4994 if (use->is_LocalVar()) { 4995 use->dump(Verbose); 4996 } else if (Verbose) { 4997 use->dump(); 4998 } 4999 } 5000 tty->cr(); 5001 } 5002 } 5003 } 5004 5005 void ConnectionGraph::print_statistics() { 5006 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 5007 } 5008 5009 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 5010 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 5011 return; 5012 } 5013 for (int next = 0; next < java_objects_worklist.length(); ++next) { 5014 JavaObjectNode* ptn = java_objects_worklist.at(next); 5015 if (ptn->ideal_node()->is_Allocate()) { 5016 if (ptn->escape_state() == PointsToNode::NoEscape) { 5017 Atomic::inc(&ConnectionGraph::_no_escape_counter); 5018 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 5019 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 5020 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 5021 Atomic::inc(&ConnectionGraph::_global_escape_counter); 5022 } else { 5023 assert(false, "Unexpected Escape State"); 5024 } 5025 } 5026 } 5027 } 5028 5029 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 5030 if (_compile->directive()->TraceEscapeAnalysisOption) { 5031 assert(ptn != nullptr, "should not be null"); 5032 assert(reason != nullptr, "should not be null"); 5033 ptn->dump_header(true); 5034 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 5035 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 5036 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 5037 } 5038 } 5039 5040 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 5041 if (_compile->directive()->TraceEscapeAnalysisOption) { 5042 stringStream ss; 5043 ss.print("propagated from: "); 5044 from->dump(true, &ss, false); 5045 return ss.as_string(); 5046 } else { 5047 return nullptr; 5048 } 5049 } 5050 5051 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 5052 if (_compile->directive()->TraceEscapeAnalysisOption) { 5053 stringStream ss; 5054 ss.print("escapes as arg to:"); 5055 call->dump("", false, &ss); 5056 return ss.as_string(); 5057 } else { 5058 return nullptr; 5059 } 5060 } 5061 5062 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 5063 if (_compile->directive()->TraceEscapeAnalysisOption) { 5064 stringStream ss; 5065 ss.print("is merged with other object: "); 5066 other->dump_header(true, &ss); 5067 return ss.as_string(); 5068 } else { 5069 return nullptr; 5070 } 5071 } 5072 5073 #endif 5074 5075 void ConnectionGraph::record_for_optimizer(Node *n) { 5076 _igvn->_worklist.push(n); 5077 _igvn->add_users_to_worklist(n); 5078 }