1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/bcEscapeAnalyzer.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "opto/c2compiler.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/cfgnode.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/escape.hpp" 38 #include "opto/macro.hpp" 39 #include "opto/locknode.hpp" 40 #include "opto/phaseX.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/narrowptrnode.hpp" 43 #include "opto/castnode.hpp" 44 #include "opto/rootnode.hpp" 45 #include "utilities/macros.hpp" 46 47 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 48 // If ReduceAllocationMerges is enabled we might call split_through_phi during 49 // split_unique_types and that will create additional nodes that need to be 50 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 51 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 52 // the array will be reallocated. 53 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 54 _in_worklist(C->comp_arena()), 55 _next_pidx(0), 56 _collecting(true), 57 _verify(false), 58 _compile(C), 59 _igvn(igvn), 60 _invocation(invocation), 61 _build_iterations(0), 62 _build_time(0.), 63 _node_map(C->comp_arena()) { 64 // Add unknown java object. 65 add_java_object(C->top(), PointsToNode::GlobalEscape); 66 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 67 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 68 // Add ConP and ConN null oop nodes 69 Node* oop_null = igvn->zerocon(T_OBJECT); 70 assert(oop_null->_idx < nodes_size(), "should be created already"); 71 add_java_object(oop_null, PointsToNode::NoEscape); 72 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 73 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 74 if (UseCompressedOops) { 75 Node* noop_null = igvn->zerocon(T_NARROWOOP); 76 assert(noop_null->_idx < nodes_size(), "should be created already"); 77 map_ideal_node(noop_null, null_obj); 78 } 79 } 80 81 bool ConnectionGraph::has_candidates(Compile *C) { 82 // EA brings benefits only when the code has allocations and/or locks which 83 // are represented by ideal Macro nodes. 84 int cnt = C->macro_count(); 85 for (int i = 0; i < cnt; i++) { 86 Node *n = C->macro_node(i); 87 if (n->is_Allocate()) { 88 return true; 89 } 90 if (n->is_Lock()) { 91 Node* obj = n->as_Lock()->obj_node()->uncast(); 92 if (!(obj->is_Parm() || obj->is_Con())) { 93 return true; 94 } 95 } 96 if (n->is_CallStaticJava() && 97 n->as_CallStaticJava()->is_boxing_method()) { 98 return true; 99 } 100 } 101 return false; 102 } 103 104 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 105 Compile::TracePhase tp(Phase::_t_escapeAnalysis); 106 ResourceMark rm; 107 108 // Add ConP and ConN null oop nodes before ConnectionGraph construction 109 // to create space for them in ConnectionGraph::_nodes[]. 110 Node* oop_null = igvn->zerocon(T_OBJECT); 111 Node* noop_null = igvn->zerocon(T_NARROWOOP); 112 int invocation = 0; 113 if (C->congraph() != nullptr) { 114 invocation = C->congraph()->_invocation + 1; 115 } 116 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 117 // Perform escape analysis 118 if (congraph->compute_escape()) { 119 // There are non escaping objects. 120 C->set_congraph(congraph); 121 } 122 // Cleanup. 123 if (oop_null->outcnt() == 0) { 124 igvn->hash_delete(oop_null); 125 } 126 if (noop_null->outcnt() == 0) { 127 igvn->hash_delete(noop_null); 128 } 129 } 130 131 bool ConnectionGraph::compute_escape() { 132 Compile* C = _compile; 133 PhaseGVN* igvn = _igvn; 134 135 // Worklists used by EA. 136 Unique_Node_List delayed_worklist; 137 Unique_Node_List reducible_merges; 138 GrowableArray<Node*> alloc_worklist; 139 GrowableArray<Node*> ptr_cmp_worklist; 140 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 141 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 142 GrowableArray<PointsToNode*> ptnodes_worklist; 143 GrowableArray<JavaObjectNode*> java_objects_worklist; 144 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 145 GrowableArray<FieldNode*> oop_fields_worklist; 146 GrowableArray<SafePointNode*> sfn_worklist; 147 GrowableArray<MergeMemNode*> mergemem_worklist; 148 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 149 150 { Compile::TracePhase tp(Phase::_t_connectionGraph); 151 152 // 1. Populate Connection Graph (CG) with PointsTo nodes. 153 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 154 // Initialize worklist 155 if (C->root() != nullptr) { 156 ideal_nodes.push(C->root()); 157 } 158 // Processed ideal nodes are unique on ideal_nodes list 159 // but several ideal nodes are mapped to the phantom_obj. 160 // To avoid duplicated entries on the following worklists 161 // add the phantom_obj only once to them. 162 ptnodes_worklist.append(phantom_obj); 163 java_objects_worklist.append(phantom_obj); 164 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 165 Node* n = ideal_nodes.at(next); 166 // Create PointsTo nodes and add them to Connection Graph. Called 167 // only once per ideal node since ideal_nodes is Unique_Node list. 168 add_node_to_connection_graph(n, &delayed_worklist); 169 PointsToNode* ptn = ptnode_adr(n->_idx); 170 if (ptn != nullptr && ptn != phantom_obj) { 171 ptnodes_worklist.append(ptn); 172 if (ptn->is_JavaObject()) { 173 java_objects_worklist.append(ptn->as_JavaObject()); 174 if ((n->is_Allocate() || n->is_CallStaticJava()) && 175 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 176 // Only allocations and java static calls results are interesting. 177 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 178 } 179 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 180 oop_fields_worklist.append(ptn->as_Field()); 181 } 182 } 183 // Collect some interesting nodes for further use. 184 switch (n->Opcode()) { 185 case Op_MergeMem: 186 // Collect all MergeMem nodes to add memory slices for 187 // scalar replaceable objects in split_unique_types(). 188 mergemem_worklist.append(n->as_MergeMem()); 189 break; 190 case Op_CmpP: 191 case Op_CmpN: 192 // Collect compare pointers nodes. 193 if (OptimizePtrCompare) { 194 ptr_cmp_worklist.append(n); 195 } 196 break; 197 case Op_MemBarStoreStore: 198 // Collect all MemBarStoreStore nodes so that depending on the 199 // escape status of the associated Allocate node some of them 200 // may be eliminated. 201 if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) { 202 storestore_worklist.append(n->as_MemBarStoreStore()); 203 } 204 break; 205 case Op_MemBarRelease: 206 if (n->req() > MemBarNode::Precedent) { 207 record_for_optimizer(n); 208 } 209 break; 210 #ifdef ASSERT 211 case Op_AddP: 212 // Collect address nodes for graph verification. 213 addp_worklist.append(n); 214 break; 215 #endif 216 case Op_ArrayCopy: 217 // Keep a list of ArrayCopy nodes so if one of its input is non 218 // escaping, we can record a unique type 219 arraycopy_worklist.append(n->as_ArrayCopy()); 220 break; 221 default: 222 // not interested now, ignore... 223 break; 224 } 225 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 226 Node* m = n->fast_out(i); // Get user 227 ideal_nodes.push(m); 228 } 229 if (n->is_SafePoint()) { 230 sfn_worklist.append(n->as_SafePoint()); 231 } 232 } 233 234 #ifndef PRODUCT 235 if (_compile->directive()->TraceEscapeAnalysisOption) { 236 tty->print("+++++ Initial worklist for "); 237 _compile->method()->print_name(); 238 tty->print_cr(" (ea_inv=%d)", _invocation); 239 for (int i = 0; i < ptnodes_worklist.length(); i++) { 240 PointsToNode* ptn = ptnodes_worklist.at(i); 241 ptn->dump(); 242 } 243 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 244 } 245 #endif 246 247 if (non_escaped_allocs_worklist.length() == 0) { 248 _collecting = false; 249 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 250 return false; // Nothing to do. 251 } 252 // Add final simple edges to graph. 253 while(delayed_worklist.size() > 0) { 254 Node* n = delayed_worklist.pop(); 255 add_final_edges(n); 256 } 257 258 #ifdef ASSERT 259 if (VerifyConnectionGraph) { 260 // Verify that no new simple edges could be created and all 261 // local vars has edges. 262 _verify = true; 263 int ptnodes_length = ptnodes_worklist.length(); 264 for (int next = 0; next < ptnodes_length; ++next) { 265 PointsToNode* ptn = ptnodes_worklist.at(next); 266 add_final_edges(ptn->ideal_node()); 267 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 268 ptn->dump(); 269 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 270 } 271 } 272 _verify = false; 273 } 274 #endif 275 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 276 // processing, calls to CI to resolve symbols (types, fields, methods) 277 // referenced in bytecode. During symbol resolution VM may throw 278 // an exception which CI cleans and converts to compilation failure. 279 if (C->failing()) { 280 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 281 return false; 282 } 283 284 // 2. Finish Graph construction by propagating references to all 285 // java objects through graph. 286 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 287 java_objects_worklist, oop_fields_worklist)) { 288 // All objects escaped or hit time or iterations limits. 289 _collecting = false; 290 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 291 return false; 292 } 293 294 // 3. Adjust scalar_replaceable state of nonescaping objects and push 295 // scalar replaceable allocations on alloc_worklist for processing 296 // in split_unique_types(). 297 GrowableArray<JavaObjectNode*> jobj_worklist; 298 int non_escaped_length = non_escaped_allocs_worklist.length(); 299 bool found_nsr_alloc = false; 300 for (int next = 0; next < non_escaped_length; next++) { 301 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 302 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 303 Node* n = ptn->ideal_node(); 304 if (n->is_Allocate()) { 305 n->as_Allocate()->_is_non_escaping = noescape; 306 } 307 if (noescape && ptn->scalar_replaceable()) { 308 adjust_scalar_replaceable_state(ptn, reducible_merges); 309 if (ptn->scalar_replaceable()) { 310 jobj_worklist.push(ptn); 311 } else { 312 found_nsr_alloc = true; 313 } 314 } 315 } 316 317 // Propagate NSR (Not Scalar Replaceable) state. 318 if (found_nsr_alloc) { 319 find_scalar_replaceable_allocs(jobj_worklist, reducible_merges); 320 } 321 322 // alloc_worklist will be processed in reverse push order. 323 // Therefore the reducible Phis will be processed for last and that's what we 324 // want because by then the scalarizable inputs of the merge will already have 325 // an unique instance type. 326 for (uint i = 0; i < reducible_merges.size(); i++ ) { 327 Node* n = reducible_merges.at(i); 328 alloc_worklist.append(n); 329 } 330 331 for (int next = 0; next < jobj_worklist.length(); ++next) { 332 JavaObjectNode* jobj = jobj_worklist.at(next); 333 if (jobj->scalar_replaceable()) { 334 alloc_worklist.append(jobj->ideal_node()); 335 } 336 } 337 338 #ifdef ASSERT 339 if (VerifyConnectionGraph) { 340 // Verify that graph is complete - no new edges could be added or needed. 341 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 342 java_objects_worklist, addp_worklist); 343 } 344 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 345 assert(null_obj->escape_state() == PointsToNode::NoEscape && 346 null_obj->edge_count() == 0 && 347 !null_obj->arraycopy_src() && 348 !null_obj->arraycopy_dst(), "sanity"); 349 #endif 350 351 _collecting = false; 352 353 } // TracePhase t3("connectionGraph") 354 355 // 4. Optimize ideal graph based on EA information. 356 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 357 if (has_non_escaping_obj) { 358 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 359 } 360 361 #ifndef PRODUCT 362 if (PrintEscapeAnalysis) { 363 dump(ptnodes_worklist); // Dump ConnectionGraph 364 } 365 #endif 366 367 #ifdef ASSERT 368 if (VerifyConnectionGraph) { 369 int alloc_length = alloc_worklist.length(); 370 for (int next = 0; next < alloc_length; ++next) { 371 Node* n = alloc_worklist.at(next); 372 PointsToNode* ptn = ptnode_adr(n->_idx); 373 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 374 } 375 } 376 377 if (VerifyReduceAllocationMerges) { 378 for (uint i = 0; i < reducible_merges.size(); i++ ) { 379 Node* n = reducible_merges.at(i); 380 if (!can_reduce_phi(n->as_Phi())) { 381 TraceReduceAllocationMerges = true; 382 n->dump(2); 383 n->dump(-2); 384 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 385 } 386 } 387 } 388 #endif 389 390 // 5. Separate memory graph for scalar replaceable allcations. 391 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 392 if (has_scalar_replaceable_candidates && EliminateAllocations) { 393 assert(C->do_aliasing(), "Aliasing should be enabled"); 394 // Now use the escape information to create unique types for 395 // scalar replaceable objects. 396 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 397 if (C->failing()) { 398 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 399 return false; 400 } 401 C->print_method(PHASE_AFTER_EA, 2); 402 403 #ifdef ASSERT 404 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 405 tty->print("=== No allocations eliminated for "); 406 C->method()->print_short_name(); 407 if (!EliminateAllocations) { 408 tty->print(" since EliminateAllocations is off ==="); 409 } else if(!has_scalar_replaceable_candidates) { 410 tty->print(" since there are no scalar replaceable candidates ==="); 411 } 412 tty->cr(); 413 #endif 414 } 415 416 // 6. Reduce allocation merges used as debug information. This is done after 417 // split_unique_types because the methods used to create SafePointScalarObject 418 // need to traverse the memory graph to find values for object fields. We also 419 // set to null the scalarized inputs of reducible Phis so that the Allocate 420 // that they point can be later scalar replaced. 421 bool delay = _igvn->delay_transform(); 422 _igvn->set_delay_transform(true); 423 for (uint i = 0; i < reducible_merges.size(); i++) { 424 Node* n = reducible_merges.at(i); 425 if (n->outcnt() > 0) { 426 if (!reduce_phi_on_safepoints(n->as_Phi())) { 427 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 428 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 429 return false; 430 } 431 432 // Now we set the scalar replaceable inputs of ophi to null, which is 433 // the last piece that would prevent it from being scalar replaceable. 434 reset_scalar_replaceable_entries(n->as_Phi()); 435 } 436 } 437 _igvn->set_delay_transform(delay); 438 439 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 440 // java calls if they pass ArgEscape objects as parameters. 441 if (has_non_escaping_obj && 442 (C->env()->should_retain_local_variables() || 443 C->env()->jvmti_can_get_owned_monitor_info() || 444 C->env()->jvmti_can_walk_any_space() || 445 DeoptimizeObjectsALot)) { 446 int sfn_length = sfn_worklist.length(); 447 for (int next = 0; next < sfn_length; next++) { 448 SafePointNode* sfn = sfn_worklist.at(next); 449 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 450 if (sfn->is_CallJava()) { 451 CallJavaNode* call = sfn->as_CallJava(); 452 call->set_arg_escape(has_arg_escape(call)); 453 } 454 } 455 } 456 457 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 458 return has_non_escaping_obj; 459 } 460 461 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 462 // if at least one scalar replaceable allocation participates in the merge. 463 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 464 bool found_sr_allocate = false; 465 466 for (uint i = 1; i < ophi->req(); i++) { 467 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 468 if (ptn != nullptr && ptn->scalar_replaceable()) { 469 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 470 471 // Don't handle arrays. 472 if (alloc->Opcode() != Op_Allocate) { 473 assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation."); 474 continue; 475 } 476 477 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 478 found_sr_allocate = true; 479 } else { 480 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);) 481 ptn->set_scalar_replaceable(false); 482 } 483 } 484 } 485 486 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 487 return found_sr_allocate; 488 } 489 490 // We can reduce the Cmp if it's a comparison between the Phi and a constant. 491 // I require the 'other' input to be a constant so that I can move the Cmp 492 // around safely. 493 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const { 494 assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name()); 495 Node* left = cmp->in(1); 496 Node* right = cmp->in(2); 497 498 return (left == n || right == n) && 499 (left->is_Con() || right->is_Con()) && 500 cmp->outcnt() == 1; 501 } 502 503 // We are going to check if any of the SafePointScalarMerge entries 504 // in the SafePoint reference the Phi that we are checking. 505 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const { 506 JVMState *jvms = sfpt->jvms(); 507 508 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 509 Node* sfpt_in = sfpt->in(i); 510 if (sfpt_in->is_SafePointScalarMerge()) { 511 SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge(); 512 Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms)); 513 if (nsr_ptr == n) { 514 return true; 515 } 516 } 517 } 518 519 return false; 520 } 521 522 // Check if we are able to untangle the merge. The following patterns are 523 // supported: 524 // - Phi -> SafePoints 525 // - Phi -> CmpP/N 526 // - Phi -> AddP -> Load 527 // - Phi -> CastPP -> SafePoints 528 // - Phi -> CastPP -> AddP -> Load 529 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const { 530 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 531 Node* use = n->fast_out(i); 532 533 if (use->is_SafePoint()) { 534 if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) { 535 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);) 536 return false; 537 } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) { 538 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);) 539 return false; 540 } 541 } else if (use->is_AddP()) { 542 Node* addp = use; 543 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 544 Node* use_use = addp->fast_out(j); 545 const Type* load_type = _igvn->type(use_use); 546 547 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 548 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());) 549 return false; 550 } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) { 551 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());) 552 return false; 553 } 554 } 555 } else if (nesting > 0) { 556 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);) 557 return false; 558 } else if (use->is_CastPP()) { 559 const Type* cast_t = _igvn->type(use); 560 if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) { 561 #ifndef PRODUCT 562 if (TraceReduceAllocationMerges) { 563 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation); 564 use->dump(); 565 } 566 #endif 567 return false; 568 } 569 570 bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0); 571 if (!is_trivial_control) { 572 // If it's not a trivial control then we check if we can reduce the 573 // CmpP/N used by the If controlling the cast. 574 if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) { 575 Node* iff = use->in(0)->in(0); 576 // We may have an OpaqueNotNull node between If and Bool nodes. But we could also have a sub class of IfNode, 577 // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases. 578 bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp(); 579 if (can_reduce) { 580 Node* iff_cmp = iff->in(1)->in(1); 581 int opc = iff_cmp->Opcode(); 582 can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp); 583 } 584 if (!can_reduce) { 585 #ifndef PRODUCT 586 if (TraceReduceAllocationMerges) { 587 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx); 588 n->dump(5); 589 } 590 #endif 591 return false; 592 } 593 } 594 } 595 596 if (!can_reduce_check_users(use, nesting+1)) { 597 return false; 598 } 599 } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) { 600 if (!can_reduce_cmp(n, use)) { 601 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);) 602 return false; 603 } 604 } else { 605 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());) 606 return false; 607 } 608 } 609 610 return true; 611 } 612 613 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 614 // only used in some certain code shapes. Check comments in 615 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 616 // details. 617 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 618 // If there was an error attempting to reduce allocation merges for this 619 // method we might have disabled the compilation and be retrying with RAM 620 // disabled. 621 if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) { 622 return false; 623 } 624 625 const Type* phi_t = _igvn->type(ophi); 626 if (phi_t == nullptr || 627 phi_t->make_ptr() == nullptr || 628 phi_t->make_ptr()->isa_aryptr() != nullptr) { 629 return false; 630 } 631 632 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) { 633 return false; 634 } 635 636 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 637 return true; 638 } 639 640 // This method will return a CmpP/N that we need to use on the If controlling a 641 // CastPP after it was split. This method is only called on bases that are 642 // nullable therefore we always need a controlling if for the splitted CastPP. 643 // 644 // 'curr_ctrl' is the control of the CastPP that we want to split through phi. 645 // If the CastPP currently doesn't have a control then the CmpP/N will be 646 // against the null constant, otherwise it will be against the constant input of 647 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later 648 // case because we have constraints on it and because the CastPP has a control 649 // input. 650 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) { 651 const Type* t = base->bottom_type(); 652 Node* con = nullptr; 653 654 if (curr_ctrl == nullptr || curr_ctrl->is_Region()) { 655 con = _igvn->zerocon(t->basic_type()); 656 } else { 657 // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp 658 assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name()); 659 Node* bol = curr_ctrl->in(0)->in(1); 660 assert(bol->is_Bool(), "unexpected node %s", bol->Name()); 661 Node* curr_cmp = bol->in(1); 662 assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name()); 663 con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2); 664 } 665 666 return CmpNode::make(base, con, t->basic_type()); 667 } 668 669 // This method 'specializes' the CastPP passed as parameter to the base passed 670 // as parameter. Note that the existing CastPP input is a Phi. "Specialize" 671 // means that the CastPP now will be specific for a given base instead of a Phi. 672 // An If-Then-Else-Region block is inserted to control the CastPP. The control 673 // of the CastPP is a copy of the current one (if there is one) or a check 674 // against null. 675 // 676 // Before: 677 // 678 // C1 C2 ... Cn 679 // \ | / 680 // \ | / 681 // \ | / 682 // \ | / 683 // \ | / 684 // \ | / 685 // \|/ 686 // Region B1 B2 ... Bn 687 // | \ | / 688 // | \ | / 689 // | \ | / 690 // | \ | / 691 // | \ | / 692 // | \ | / 693 // ---------------> Phi 694 // | 695 // X | 696 // | | 697 // | | 698 // ------> CastPP 699 // 700 // After (only partial illustration; base = B2, current_control = C2): 701 // 702 // C2 703 // | 704 // If 705 // / \ 706 // / \ 707 // T F 708 // /\ / 709 // / \ / 710 // / \ / 711 // C1 CastPP Reg Cn 712 // | | | 713 // | | | 714 // | | | 715 // -------------- | ---------- 716 // | | | 717 // Region 718 // 719 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) { 720 Node* control_successor = current_control->unique_ctrl_out(); 721 Node* cmp = _igvn->transform(specialize_cmp(base, castpp->in(0))); 722 Node* bol = _igvn->transform(new BoolNode(cmp, BoolTest::ne)); 723 IfNode* if_ne = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If(); 724 Node* not_eq_control = _igvn->transform(new IfTrueNode(if_ne)); 725 Node* yes_eq_control = _igvn->transform(new IfFalseNode(if_ne)); 726 Node* end_region = _igvn->transform(new RegionNode(3)); 727 728 // Insert the new if-else-region block into the graph 729 end_region->set_req(1, not_eq_control); 730 end_region->set_req(2, yes_eq_control); 731 control_successor->replace_edge(current_control, end_region, _igvn); 732 733 _igvn->_worklist.push(current_control); 734 _igvn->_worklist.push(control_successor); 735 736 return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr)); 737 } 738 739 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) { 740 const Type* load_type = _igvn->type(curr_load); 741 Node* nsr_value = _igvn->zerocon(load_type->basic_type()); 742 Node* memory = curr_load->in(MemNode::Memory); 743 744 // The data_phi merging the loads needs to be nullable if 745 // we are loading pointers. 746 if (load_type->make_ptr() != nullptr) { 747 if (load_type->isa_narrowoop()) { 748 load_type = load_type->meet(TypeNarrowOop::NULL_PTR); 749 } else if (load_type->isa_ptr()) { 750 load_type = load_type->meet(TypePtr::NULL_PTR); 751 } else { 752 assert(false, "Unexpected load ptr type."); 753 } 754 } 755 756 Node* data_phi = PhiNode::make(region, nsr_value, load_type); 757 758 for (int i = 1; i < bases_for_loads->length(); i++) { 759 Node* base = bases_for_loads->at(i); 760 Node* cmp_region = nullptr; 761 if (base != nullptr) { 762 if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node 763 cmp_region = base->unique_ctrl_out_or_null(); 764 assert(cmp_region != nullptr, "There should be."); 765 base = base->find_out_with(Op_CastPP); 766 } 767 768 Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset))); 769 Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory; 770 Node* load = curr_load->clone(); 771 load->set_req(0, nullptr); 772 load->set_req(1, mem); 773 load->set_req(2, addr); 774 775 if (cmp_region != nullptr) { // see comment on previous if 776 Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type); 777 intermediate_phi->set_req(1, _igvn->transform(load)); 778 load = intermediate_phi; 779 } 780 781 data_phi->set_req(i, _igvn->transform(load)); 782 } else { 783 // Just use the default, which is already in phi 784 } 785 } 786 787 // Takes care of updating CG and split_unique_types worklists due 788 // to cloned AddP->Load. 789 updates_after_load_split(data_phi, curr_load, alloc_worklist); 790 791 return _igvn->transform(data_phi); 792 } 793 794 // This method only reduces CastPP fields loads; SafePoints are handled 795 // separately. The idea here is basically to clone the CastPP and place copies 796 // on each input of the Phi, including non-scalar replaceable inputs. 797 // Experimentation shows that the resulting IR graph is simpler that way than if 798 // we just split the cast through scalar-replaceable inputs. 799 // 800 // The reduction process requires that CastPP's control be one of: 801 // 1) no control, 802 // 2) the same region as Ophi, or 803 // 3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant. 804 // 805 // After splitting the CastPP we'll put it under an If-Then-Else-Region control 806 // flow. If the CastPP originally had an IfTrue/False control input then we'll 807 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll 808 // juse use a CmpP/N against the null constant. 809 // 810 // The If-Then-Else-Region isn't always needed. For instance, if input to 811 // splitted cast was not nullable (or if it was the null constant) then we don't 812 // need (shouldn't) use a CastPP at all. 813 // 814 // After the casts are splitted we'll split the AddP->Loads through the Phi and 815 // connect them to the just split CastPPs. 816 // 817 // Before (CastPP control is same as Phi): 818 // 819 // Region Allocate Null Call 820 // | \ | / 821 // | \ | / 822 // | \ | / 823 // | \ | / 824 // | \ | / 825 // | \ | / 826 // ------------------> Phi # Oop Phi 827 // | | 828 // | | 829 // | | 830 // | | 831 // ----------------> CastPP 832 // | 833 // AddP 834 // | 835 // Load 836 // 837 // After (Very much simplified): 838 // 839 // Call Null 840 // \ / 841 // CmpP 842 // | 843 // Bool#NE 844 // | 845 // If 846 // / \ 847 // T F 848 // / \ / 849 // / R 850 // CastPP | 851 // | | 852 // AddP | 853 // | | 854 // Load | 855 // \ | 0 856 // Allocate \ | / 857 // \ \ | / 858 // AddP Phi 859 // \ / 860 // Load / 861 // \ 0 / 862 // \ | / 863 // \|/ 864 // Phi # "Field" Phi 865 // 866 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 867 Node* ophi = curr_castpp->in(1); 868 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 869 870 // Identify which base should be used for AddP->Load later when spliting the 871 // CastPP->Loads through ophi. Three kind of values may be stored in this 872 // array, depending on the nullability status of the corresponding input in 873 // ophi. 874 // 875 // - nullptr: Meaning that the base is actually the null constant and therefore 876 // we won't try to load from it. 877 // 878 // - CFG Node: Meaning that the base is a CastPP that was specialized for 879 // this input of Ophi. I.e., we added an If->Then->Else-Region 880 // that will 'activate' the CastPp only when the input is not Null. 881 // 882 // - Other Node: Meaning that the base is not nullable and therefore we'll try 883 // to load directly from it. 884 GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr); 885 886 for (uint i = 1; i < ophi->req(); i++) { 887 Node* base = ophi->in(i); 888 const Type* base_t = _igvn->type(base); 889 890 if (base_t->maybe_null()) { 891 if (base->is_Con()) { 892 // Nothing todo as bases_for_loads[i] is already null 893 } else { 894 Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i)); 895 bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag 896 } 897 } else { 898 bases_for_loads.at_put(i, base); 899 } 900 } 901 902 // Now let's split the CastPP->Loads through the Phi 903 for (int i = curr_castpp->outcnt()-1; i >= 0;) { 904 Node* use = curr_castpp->raw_out(i); 905 if (use->is_AddP()) { 906 for (int j = use->outcnt()-1; j >= 0;) { 907 Node* use_use = use->raw_out(j); 908 assert(use_use->is_Load(), "Expected this to be a Load node."); 909 910 // We can't make an unconditional load from a nullable input. The 911 // 'split_castpp_load_through_phi` method will add an 912 // 'If-Then-Else-Region` around nullable bases and only load from them 913 // when the input is not null. 914 Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist); 915 _igvn->replace_node(use_use, phi); 916 917 --j; 918 j = MIN2(j, (int)use->outcnt()-1); 919 } 920 921 _igvn->remove_dead_node(use); 922 } 923 --i; 924 i = MIN2(i, (int)curr_castpp->outcnt()-1); 925 } 926 } 927 928 // This method split a given CmpP/N through the Phi used in one of its inputs. 929 // As a result we convert a comparison with a pointer to a comparison with an 930 // integer. 931 // The only requirement is that one of the inputs of the CmpP/N must be a Phi 932 // while the other must be a constant. 933 // The splitting process is basically just cloning the CmpP/N above the input 934 // Phi. However, some (most) of the cloned CmpP/Ns won't be requred because we 935 // can prove at compile time the result of the comparison. 936 // 937 // Before: 938 // 939 // in1 in2 ... inN 940 // \ | / 941 // \ | / 942 // \ | / 943 // \ | / 944 // \ | / 945 // \ | / 946 // Phi 947 // | Other 948 // | / 949 // | / 950 // | / 951 // CmpP/N 952 // 953 // After: 954 // 955 // in1 Other in2 Other inN Other 956 // | | | | | | 957 // \ | | | | | 958 // \ / | / | / 959 // CmpP/N CmpP/N CmpP/N 960 // Bool Bool Bool 961 // \ | / 962 // \ | / 963 // \ | / 964 // \ | / 965 // \ | / 966 // \ | / 967 // \ | / 968 // \ | / 969 // Phi 970 // | 971 // | Zero 972 // | / 973 // | / 974 // | / 975 // CmpI 976 // 977 // 978 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) { 979 Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1); 980 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 981 982 Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2); 983 Node* zero = _igvn->intcon(0); 984 Node* one = _igvn->intcon(1); 985 BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test; 986 987 // This Phi will merge the result of the Cmps split through the Phi 988 Node* res_phi = PhiNode::make(ophi->in(0), zero, TypeInt::INT); 989 990 for (uint i=1; i<ophi->req(); i++) { 991 Node* ophi_input = ophi->in(i); 992 Node* res_phi_input = nullptr; 993 994 const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other); 995 if (tcmp->singleton()) { 996 if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) || 997 (mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) { 998 res_phi_input = one; 999 } else { 1000 res_phi_input = zero; 1001 } 1002 } else { 1003 Node* ncmp = _igvn->transform(cmp->clone()); 1004 ncmp->set_req(1, ophi_input); 1005 ncmp->set_req(2, other); 1006 Node* bol = _igvn->transform(new BoolNode(ncmp, mask)); 1007 res_phi_input = bol->as_Bool()->as_int_value(_igvn); 1008 } 1009 1010 res_phi->set_req(i, res_phi_input); 1011 } 1012 1013 // This CMP always compares whether the output of "res_phi" is TRUE as far as the "mask". 1014 Node* new_cmp = _igvn->transform(new CmpINode(_igvn->transform(res_phi), (mask == BoolTest::mask::eq) ? one : zero)); 1015 _igvn->replace_node(cmp, new_cmp); 1016 } 1017 1018 // Push the newly created AddP on alloc_worklist and patch 1019 // the connection graph. Note that the changes in the CG below 1020 // won't affect the ES of objects since the new nodes have the 1021 // same status as the old ones. 1022 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) { 1023 assert(data_phi != nullptr, "Output of split_through_phi is null."); 1024 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 1025 assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi."); 1026 1027 if (data_phi == nullptr || !data_phi->is_Phi()) { 1028 // Make this a retry? 1029 return ; 1030 } 1031 1032 Node* previous_addp = previous_load->in(MemNode::Address); 1033 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1034 for (uint i = 1; i < data_phi->req(); i++) { 1035 Node* new_load = data_phi->in(i); 1036 1037 if (new_load->is_Phi()) { 1038 // new_load is currently the "intermediate_phi" from an specialized 1039 // CastPP. 1040 new_load = new_load->in(1); 1041 } 1042 1043 // "new_load" might actually be a constant, parameter, etc. 1044 if (new_load->is_Load()) { 1045 Node* new_addp = new_load->in(MemNode::Address); 1046 Node* base = get_addp_base(new_addp); 1047 1048 // The base might not be something that we can create an unique 1049 // type for. If that's the case we are done with that input. 1050 PointsToNode* jobj_ptn = unique_java_object(base); 1051 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 1052 continue; 1053 } 1054 1055 // Push to alloc_worklist since the base has an unique_type 1056 alloc_worklist.append_if_missing(new_addp); 1057 1058 // Now let's add the node to the connection graph 1059 _nodes.at_grow(new_addp->_idx, nullptr); 1060 add_field(new_addp, fn->escape_state(), fn->offset()); 1061 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 1062 1063 // If the load doesn't load an object then it won't be 1064 // part of the connection graph 1065 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 1066 if (curr_load_ptn != nullptr) { 1067 _nodes.at_grow(new_load->_idx, nullptr); 1068 add_local_var(new_load, curr_load_ptn->escape_state()); 1069 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 1070 } 1071 } 1072 } 1073 } 1074 1075 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) { 1076 // We'll pass this to 'split_through_phi' so that it'll do the split even 1077 // though the load doesn't have an unique instance type. 1078 bool ignore_missing_instance_id = true; 1079 1080 // All AddPs are present in the connection graph 1081 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1082 1083 // Iterate over AddP looking for a Load 1084 for (int k = previous_addp->outcnt()-1; k >= 0;) { 1085 Node* previous_load = previous_addp->raw_out(k); 1086 if (previous_load->is_Load()) { 1087 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 1088 1089 // Takes care of updating CG and split_unique_types worklists due to cloned 1090 // AddP->Load. 1091 updates_after_load_split(data_phi, previous_load, alloc_worklist); 1092 1093 _igvn->replace_node(previous_load, data_phi); 1094 } 1095 --k; 1096 k = MIN2(k, (int)previous_addp->outcnt()-1); 1097 } 1098 1099 // Remove the old AddP from the processing list because it's dead now 1100 assert(previous_addp->outcnt() == 0, "AddP should be dead now."); 1101 alloc_worklist.remove_if_existing(previous_addp); 1102 } 1103 1104 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the 1105 // selector is: 1106 // -> a '-1' constant, the i'th input of the original Phi is NSR. 1107 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and 1108 // the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects 1109 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const { 1110 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 1111 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 1112 uint number_of_sr_objects = 0; 1113 for (uint i = 1; i < ophi->req(); i++) { 1114 Node* base = ophi->in(i); 1115 JavaObjectNode* ptn = unique_java_object(base); 1116 1117 if (ptn != nullptr && ptn->scalar_replaceable()) { 1118 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 1119 selector->set_req(i, sr_obj_idx); 1120 number_of_sr_objects++; 1121 } 1122 } 1123 1124 return selector->as_Phi(); 1125 } 1126 1127 // Returns true if the AddP node 'n' has at least one base that is a reducible 1128 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is 1129 // checked instead. 1130 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) { 1131 PointsToNode* ptn = ptnode_adr(n->_idx); 1132 if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) { 1133 return false; 1134 } 1135 1136 for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) { 1137 Node* base = i.get()->ideal_node(); 1138 1139 if (reducible_merges.member(base)) { 1140 return true; 1141 } 1142 1143 if (base->is_CastPP() || base->is_CheckCastPP()) { 1144 base = base->in(1); 1145 if (reducible_merges.member(base)) { 1146 return true; 1147 } 1148 } 1149 } 1150 1151 return false; 1152 } 1153 1154 // This method will call its helper method to reduce SafePoint nodes that use 1155 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same 1156 // "version" of Phi use the same debug information (regarding the Phi). 1157 // Therefore, I collect all safepoints and patch them all at once. 1158 // 1159 // The safepoints using the Phi node have to be processed before safepoints of 1160 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the 1161 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the 1162 // safepoint. If we process CastPP's safepoints before Phi's safepoints the 1163 // algorithm that process Phi's safepoints will think that the added Phi 1164 // reference is a regular reference. 1165 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) { 1166 PhiNode* selector = create_selector(ophi); 1167 Unique_Node_List safepoints; 1168 Unique_Node_List casts; 1169 1170 // Just collect the users of the Phis for later processing 1171 // in the needed order. 1172 for (uint i = 0; i < ophi->outcnt(); i++) { 1173 Node* use = ophi->raw_out(i); 1174 if (use->is_SafePoint()) { 1175 safepoints.push(use); 1176 } else if (use->is_CastPP()) { 1177 casts.push(use); 1178 } else { 1179 assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left."); 1180 } 1181 } 1182 1183 // Need to process safepoints using the Phi first 1184 if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) { 1185 return false; 1186 } 1187 1188 // Now process CastPP->safepoints 1189 for (uint i = 0; i < casts.size(); i++) { 1190 Node* cast = casts.at(i); 1191 Unique_Node_List cast_sfpts; 1192 1193 for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) { 1194 Node* use_use = cast->fast_out(j); 1195 if (use_use->is_SafePoint()) { 1196 cast_sfpts.push(use_use); 1197 } else { 1198 assert(use_use->outcnt() == 0, "Only SafePoint users should be left."); 1199 } 1200 } 1201 1202 if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) { 1203 return false; 1204 } 1205 } 1206 1207 return true; 1208 } 1209 1210 // This method will create a SafePointScalarMERGEnode for each SafePoint in 1211 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a 1212 // SafePointScalarObjectNode for each scalar replaceable input. Each 1213 // SafePointScalarMergeNode may describe multiple scalar replaced objects - 1214 // check detailed description in SafePointScalarMergeNode class header. 1215 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) { 1216 PhaseMacroExpand mexp(*_igvn); 1217 Node* original_sfpt_parent = cast != nullptr ? cast : ophi; 1218 const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr(); 1219 1220 Node* nsr_merge_pointer = ophi; 1221 if (cast != nullptr) { 1222 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1223 nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr)); 1224 } 1225 1226 for (uint spi = 0; spi < safepoints.size(); spi++) { 1227 SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint(); 1228 JVMState *jvms = sfpt->jvms(); 1229 uint merge_idx = (sfpt->req() - jvms->scloff()); 1230 int debug_start = jvms->debug_start(); 1231 1232 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 1233 smerge->init_req(0, _compile->root()); 1234 _igvn->register_new_node_with_optimizer(smerge); 1235 1236 // The next two inputs are: 1237 // (1) A copy of the original pointer to NSR objects. 1238 // (2) A selector, used to decide if we need to rematerialize an object 1239 // or use the pointer to a NSR object. 1240 // See more details of these fields in the declaration of SafePointScalarMergeNode 1241 sfpt->add_req(nsr_merge_pointer); 1242 sfpt->add_req(selector); 1243 1244 for (uint i = 1; i < ophi->req(); i++) { 1245 Node* base = ophi->in(i); 1246 JavaObjectNode* ptn = unique_java_object(base); 1247 1248 // If the base is not scalar replaceable we don't need to register information about 1249 // it at this time. 1250 if (ptn == nullptr || !ptn->scalar_replaceable()) { 1251 continue; 1252 } 1253 1254 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 1255 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt); 1256 if (sobj == nullptr) { 1257 return false; 1258 } 1259 1260 // Now make a pass over the debug information replacing any references 1261 // to the allocated object with "sobj" 1262 Node* ccpp = alloc->result_cast(); 1263 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 1264 1265 // Register the scalarized object as a candidate for reallocation 1266 smerge->add_req(sobj); 1267 } 1268 1269 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge" 1270 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn); 1271 1272 // The call to 'replace_edges_in_range' above might have removed the 1273 // reference to ophi that we need at _merge_pointer_idx. The line below make 1274 // sure the reference is maintained. 1275 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer); 1276 _igvn->_worklist.push(sfpt); 1277 } 1278 1279 return true; 1280 } 1281 1282 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 1283 bool delay = _igvn->delay_transform(); 1284 _igvn->set_delay_transform(true); 1285 _igvn->hash_delete(ophi); 1286 1287 // Copying all users first because some will be removed and others won't. 1288 // Ophi also may acquire some new users as part of Cast reduction. 1289 // CastPPs also need to be processed before CmpPs. 1290 Unique_Node_List castpps; 1291 Unique_Node_List others; 1292 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 1293 Node* use = ophi->fast_out(i); 1294 1295 if (use->is_CastPP()) { 1296 castpps.push(use); 1297 } else if (use->is_AddP() || use->is_Cmp()) { 1298 others.push(use); 1299 } else if (use->is_SafePoint()) { 1300 // processed later 1301 } else { 1302 assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt()); 1303 } 1304 } 1305 1306 // CastPPs need to be processed before Cmps because during the process of 1307 // splitting CastPPs we make reference to the inputs of the Cmp that is used 1308 // by the If controlling the CastPP. 1309 for (uint i = 0; i < castpps.size(); i++) { 1310 reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist); 1311 } 1312 1313 for (uint i = 0; i < others.size(); i++) { 1314 Node* use = others.at(i); 1315 1316 if (use->is_AddP()) { 1317 reduce_phi_on_field_access(use, alloc_worklist); 1318 } else if(use->is_Cmp()) { 1319 reduce_phi_on_cmp(use); 1320 } 1321 } 1322 1323 _igvn->set_delay_transform(delay); 1324 } 1325 1326 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) { 1327 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 1328 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 1329 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1330 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 1331 1332 for (uint i = 1; i < ophi->req(); i++) { 1333 Node* base = ophi->in(i); 1334 JavaObjectNode* ptn = unique_java_object(base); 1335 1336 if (ptn != nullptr && ptn->scalar_replaceable()) { 1337 new_phi->set_req(i, null_ptr); 1338 } else { 1339 new_phi->set_req(i, ophi->in(i)); 1340 } 1341 } 1342 1343 for (int i = ophi->outcnt()-1; i >= 0;) { 1344 Node* out = ophi->raw_out(i); 1345 1346 if (out->is_ConstraintCast()) { 1347 const Type* out_t = _igvn->type(out)->make_ptr(); 1348 const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR); 1349 bool change = out_new_t != out_t; 1350 1351 for (int j = out->outcnt()-1; change && j >= 0; --j) { 1352 Node* out2 = out->raw_out(j); 1353 if (!out2->is_SafePoint()) { 1354 change = false; 1355 break; 1356 } 1357 } 1358 1359 if (change) { 1360 Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr); 1361 _igvn->replace_node(out, new_cast); 1362 _igvn->register_new_node_with_optimizer(new_cast); 1363 } 1364 } 1365 1366 --i; 1367 i = MIN2(i, (int)ophi->outcnt()-1); 1368 } 1369 1370 _igvn->replace_node(ophi, new_phi); 1371 } 1372 1373 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 1374 if (!C->do_reduce_allocation_merges()) return; 1375 1376 Unique_Node_List ideal_nodes; 1377 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 1378 ideal_nodes.push(root); 1379 1380 for (uint next = 0; next < ideal_nodes.size(); ++next) { 1381 Node* n = ideal_nodes.at(next); 1382 1383 if (n->is_SafePointScalarMerge()) { 1384 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 1385 1386 // Validate inputs of merge 1387 for (uint i = 1; i < merge->req(); i++) { 1388 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 1389 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 1390 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1391 } 1392 } 1393 1394 // Validate users of merge 1395 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 1396 Node* sfpt = merge->fast_out(i); 1397 if (sfpt->is_SafePoint()) { 1398 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 1399 1400 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 1401 assert(false, "SafePointScalarMerge nodes can't be nested."); 1402 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1403 } 1404 } else { 1405 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 1406 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1407 } 1408 } 1409 } 1410 1411 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1412 Node* m = n->fast_out(i); 1413 ideal_nodes.push(m); 1414 } 1415 } 1416 } 1417 1418 // Returns true if there is an object in the scope of sfn that does not escape globally. 1419 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 1420 Compile* C = _compile; 1421 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1422 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 1423 DeoptimizeObjectsALot) { 1424 // Jvmti agents can access locals. Must provide info about local objects at runtime. 1425 int num_locs = jvms->loc_size(); 1426 for (int idx = 0; idx < num_locs; idx++) { 1427 Node* l = sfn->local(jvms, idx); 1428 if (not_global_escape(l)) { 1429 return true; 1430 } 1431 } 1432 } 1433 if (C->env()->jvmti_can_get_owned_monitor_info() || 1434 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 1435 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 1436 int num_mon = jvms->nof_monitors(); 1437 for (int idx = 0; idx < num_mon; idx++) { 1438 Node* m = sfn->monitor_obj(jvms, idx); 1439 if (m != nullptr && not_global_escape(m)) { 1440 return true; 1441 } 1442 } 1443 } 1444 } 1445 return false; 1446 } 1447 1448 // Returns true if at least one of the arguments to the call is an object 1449 // that does not escape globally. 1450 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 1451 if (call->method() != nullptr) { 1452 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 1453 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 1454 Node* p = call->in(idx); 1455 if (not_global_escape(p)) { 1456 return true; 1457 } 1458 } 1459 } else { 1460 const char* name = call->as_CallStaticJava()->_name; 1461 assert(name != nullptr, "no name"); 1462 // no arg escapes through uncommon traps 1463 if (strcmp(name, "uncommon_trap") != 0) { 1464 // process_call_arguments() assumes that all arguments escape globally 1465 const TypeTuple* d = call->tf()->domain(); 1466 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1467 const Type* at = d->field_at(i); 1468 if (at->isa_oopptr() != nullptr) { 1469 return true; 1470 } 1471 } 1472 } 1473 } 1474 return false; 1475 } 1476 1477 1478 1479 // Utility function for nodes that load an object 1480 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1481 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1482 // ThreadLocal has RawPtr type. 1483 const Type* t = _igvn->type(n); 1484 if (t->make_ptr() != nullptr) { 1485 Node* adr = n->in(MemNode::Address); 1486 #ifdef ASSERT 1487 if (!adr->is_AddP()) { 1488 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 1489 } else { 1490 assert((ptnode_adr(adr->_idx) == nullptr || 1491 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 1492 } 1493 #endif 1494 add_local_var_and_edge(n, PointsToNode::NoEscape, 1495 adr, delayed_worklist); 1496 } 1497 } 1498 1499 // Populate Connection Graph with PointsTo nodes and create simple 1500 // connection graph edges. 1501 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1502 assert(!_verify, "this method should not be called for verification"); 1503 PhaseGVN* igvn = _igvn; 1504 uint n_idx = n->_idx; 1505 PointsToNode* n_ptn = ptnode_adr(n_idx); 1506 if (n_ptn != nullptr) { 1507 return; // No need to redefine PointsTo node during first iteration. 1508 } 1509 int opcode = n->Opcode(); 1510 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 1511 if (gc_handled) { 1512 return; // Ignore node if already handled by GC. 1513 } 1514 1515 if (n->is_Call()) { 1516 // Arguments to allocation and locking don't escape. 1517 if (n->is_AbstractLock()) { 1518 // Put Lock and Unlock nodes on IGVN worklist to process them during 1519 // first IGVN optimization when escape information is still available. 1520 record_for_optimizer(n); 1521 } else if (n->is_Allocate()) { 1522 add_call_node(n->as_Call()); 1523 record_for_optimizer(n); 1524 } else { 1525 if (n->is_CallStaticJava()) { 1526 const char* name = n->as_CallStaticJava()->_name; 1527 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 1528 return; // Skip uncommon traps 1529 } 1530 } 1531 // Don't mark as processed since call's arguments have to be processed. 1532 delayed_worklist->push(n); 1533 // Check if a call returns an object. 1534 if ((n->as_Call()->returns_pointer() && 1535 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 1536 (n->is_CallStaticJava() && 1537 n->as_CallStaticJava()->is_boxing_method())) { 1538 add_call_node(n->as_Call()); 1539 } 1540 } 1541 return; 1542 } 1543 // Put this check here to process call arguments since some call nodes 1544 // point to phantom_obj. 1545 if (n_ptn == phantom_obj || n_ptn == null_obj) { 1546 return; // Skip predefined nodes. 1547 } 1548 switch (opcode) { 1549 case Op_AddP: { 1550 Node* base = get_addp_base(n); 1551 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1552 // Field nodes are created for all field types. They are used in 1553 // adjust_scalar_replaceable_state() and split_unique_types(). 1554 // Note, non-oop fields will have only base edges in Connection 1555 // Graph because such fields are not used for oop loads and stores. 1556 int offset = address_offset(n, igvn); 1557 add_field(n, PointsToNode::NoEscape, offset); 1558 if (ptn_base == nullptr) { 1559 delayed_worklist->push(n); // Process it later. 1560 } else { 1561 n_ptn = ptnode_adr(n_idx); 1562 add_base(n_ptn->as_Field(), ptn_base); 1563 } 1564 break; 1565 } 1566 case Op_CastX2P: { 1567 map_ideal_node(n, phantom_obj); 1568 break; 1569 } 1570 case Op_CastPP: 1571 case Op_CheckCastPP: 1572 case Op_EncodeP: 1573 case Op_DecodeN: 1574 case Op_EncodePKlass: 1575 case Op_DecodeNKlass: { 1576 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1577 break; 1578 } 1579 case Op_CMoveP: { 1580 add_local_var(n, PointsToNode::NoEscape); 1581 // Do not add edges during first iteration because some could be 1582 // not defined yet. 1583 delayed_worklist->push(n); 1584 break; 1585 } 1586 case Op_ConP: 1587 case Op_ConN: 1588 case Op_ConNKlass: { 1589 // assume all oop constants globally escape except for null 1590 PointsToNode::EscapeState es; 1591 const Type* t = igvn->type(n); 1592 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 1593 es = PointsToNode::NoEscape; 1594 } else { 1595 es = PointsToNode::GlobalEscape; 1596 } 1597 PointsToNode* ptn_con = add_java_object(n, es); 1598 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 1599 break; 1600 } 1601 case Op_CreateEx: { 1602 // assume that all exception objects globally escape 1603 map_ideal_node(n, phantom_obj); 1604 break; 1605 } 1606 case Op_LoadKlass: 1607 case Op_LoadNKlass: { 1608 // Unknown class is loaded 1609 map_ideal_node(n, phantom_obj); 1610 break; 1611 } 1612 case Op_LoadP: 1613 case Op_LoadN: { 1614 add_objload_to_connection_graph(n, delayed_worklist); 1615 break; 1616 } 1617 case Op_Parm: { 1618 map_ideal_node(n, phantom_obj); 1619 break; 1620 } 1621 case Op_PartialSubtypeCheck: { 1622 // Produces Null or notNull and is used in only in CmpP so 1623 // phantom_obj could be used. 1624 map_ideal_node(n, phantom_obj); // Result is unknown 1625 break; 1626 } 1627 case Op_Phi: { 1628 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1629 // ThreadLocal has RawPtr type. 1630 const Type* t = n->as_Phi()->type(); 1631 if (t->make_ptr() != nullptr) { 1632 add_local_var(n, PointsToNode::NoEscape); 1633 // Do not add edges during first iteration because some could be 1634 // not defined yet. 1635 delayed_worklist->push(n); 1636 } 1637 break; 1638 } 1639 case Op_Proj: { 1640 // we are only interested in the oop result projection from a call 1641 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1642 n->in(0)->as_Call()->returns_pointer()) { 1643 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1644 } 1645 break; 1646 } 1647 case Op_Rethrow: // Exception object escapes 1648 case Op_Return: { 1649 if (n->req() > TypeFunc::Parms && 1650 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1651 // Treat Return value as LocalVar with GlobalEscape escape state. 1652 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1653 } 1654 break; 1655 } 1656 case Op_CompareAndExchangeP: 1657 case Op_CompareAndExchangeN: 1658 case Op_GetAndSetP: 1659 case Op_GetAndSetN: { 1660 add_objload_to_connection_graph(n, delayed_worklist); 1661 // fall-through 1662 } 1663 case Op_StoreP: 1664 case Op_StoreN: 1665 case Op_StoreNKlass: 1666 case Op_WeakCompareAndSwapP: 1667 case Op_WeakCompareAndSwapN: 1668 case Op_CompareAndSwapP: 1669 case Op_CompareAndSwapN: { 1670 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1671 break; 1672 } 1673 case Op_AryEq: 1674 case Op_CountPositives: 1675 case Op_StrComp: 1676 case Op_StrEquals: 1677 case Op_StrIndexOf: 1678 case Op_StrIndexOfChar: 1679 case Op_StrInflatedCopy: 1680 case Op_StrCompressedCopy: 1681 case Op_VectorizedHashCode: 1682 case Op_EncodeISOArray: { 1683 add_local_var(n, PointsToNode::ArgEscape); 1684 delayed_worklist->push(n); // Process it later. 1685 break; 1686 } 1687 case Op_ThreadLocal: { 1688 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1689 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1690 break; 1691 } 1692 case Op_Blackhole: { 1693 // All blackhole pointer arguments are globally escaping. 1694 // Only do this if there is at least one pointer argument. 1695 // Do not add edges during first iteration because some could be 1696 // not defined yet, defer to final step. 1697 for (uint i = 0; i < n->req(); i++) { 1698 Node* in = n->in(i); 1699 if (in != nullptr) { 1700 const Type* at = _igvn->type(in); 1701 if (!at->isa_ptr()) continue; 1702 1703 add_local_var(n, PointsToNode::GlobalEscape); 1704 delayed_worklist->push(n); 1705 break; 1706 } 1707 } 1708 break; 1709 } 1710 default: 1711 ; // Do nothing for nodes not related to EA. 1712 } 1713 return; 1714 } 1715 1716 // Add final simple edges to graph. 1717 void ConnectionGraph::add_final_edges(Node *n) { 1718 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1719 #ifdef ASSERT 1720 if (_verify && n_ptn->is_JavaObject()) 1721 return; // This method does not change graph for JavaObject. 1722 #endif 1723 1724 if (n->is_Call()) { 1725 process_call_arguments(n->as_Call()); 1726 return; 1727 } 1728 assert(n->is_Store() || n->is_LoadStore() || 1729 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)), 1730 "node should be registered already"); 1731 int opcode = n->Opcode(); 1732 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1733 if (gc_handled) { 1734 return; // Ignore node if already handled by GC. 1735 } 1736 switch (opcode) { 1737 case Op_AddP: { 1738 Node* base = get_addp_base(n); 1739 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1740 assert(ptn_base != nullptr, "field's base should be registered"); 1741 add_base(n_ptn->as_Field(), ptn_base); 1742 break; 1743 } 1744 case Op_CastPP: 1745 case Op_CheckCastPP: 1746 case Op_EncodeP: 1747 case Op_DecodeN: 1748 case Op_EncodePKlass: 1749 case Op_DecodeNKlass: { 1750 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1751 break; 1752 } 1753 case Op_CMoveP: { 1754 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1755 Node* in = n->in(i); 1756 if (in == nullptr) { 1757 continue; // ignore null 1758 } 1759 Node* uncast_in = in->uncast(); 1760 if (uncast_in->is_top() || uncast_in == n) { 1761 continue; // ignore top or inputs which go back this node 1762 } 1763 PointsToNode* ptn = ptnode_adr(in->_idx); 1764 assert(ptn != nullptr, "node should be registered"); 1765 add_edge(n_ptn, ptn); 1766 } 1767 break; 1768 } 1769 case Op_LoadP: 1770 case Op_LoadN: { 1771 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1772 // ThreadLocal has RawPtr type. 1773 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1774 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1775 break; 1776 } 1777 case Op_Phi: { 1778 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1779 // ThreadLocal has RawPtr type. 1780 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1781 for (uint i = 1; i < n->req(); i++) { 1782 Node* in = n->in(i); 1783 if (in == nullptr) { 1784 continue; // ignore null 1785 } 1786 Node* uncast_in = in->uncast(); 1787 if (uncast_in->is_top() || uncast_in == n) { 1788 continue; // ignore top or inputs which go back this node 1789 } 1790 PointsToNode* ptn = ptnode_adr(in->_idx); 1791 assert(ptn != nullptr, "node should be registered"); 1792 add_edge(n_ptn, ptn); 1793 } 1794 break; 1795 } 1796 case Op_Proj: { 1797 // we are only interested in the oop result projection from a call 1798 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 1799 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); 1800 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1801 break; 1802 } 1803 case Op_Rethrow: // Exception object escapes 1804 case Op_Return: { 1805 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1806 "Unexpected node type"); 1807 // Treat Return value as LocalVar with GlobalEscape escape state. 1808 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1809 break; 1810 } 1811 case Op_CompareAndExchangeP: 1812 case Op_CompareAndExchangeN: 1813 case Op_GetAndSetP: 1814 case Op_GetAndSetN:{ 1815 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1816 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1817 // fall-through 1818 } 1819 case Op_CompareAndSwapP: 1820 case Op_CompareAndSwapN: 1821 case Op_WeakCompareAndSwapP: 1822 case Op_WeakCompareAndSwapN: 1823 case Op_StoreP: 1824 case Op_StoreN: 1825 case Op_StoreNKlass:{ 1826 add_final_edges_unsafe_access(n, opcode); 1827 break; 1828 } 1829 case Op_VectorizedHashCode: 1830 case Op_AryEq: 1831 case Op_CountPositives: 1832 case Op_StrComp: 1833 case Op_StrEquals: 1834 case Op_StrIndexOf: 1835 case Op_StrIndexOfChar: 1836 case Op_StrInflatedCopy: 1837 case Op_StrCompressedCopy: 1838 case Op_EncodeISOArray: { 1839 // char[]/byte[] arrays passed to string intrinsic do not escape but 1840 // they are not scalar replaceable. Adjust escape state for them. 1841 // Start from in(2) edge since in(1) is memory edge. 1842 for (uint i = 2; i < n->req(); i++) { 1843 Node* adr = n->in(i); 1844 const Type* at = _igvn->type(adr); 1845 if (!adr->is_top() && at->isa_ptr()) { 1846 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1847 at->isa_ptr() != nullptr, "expecting a pointer"); 1848 if (adr->is_AddP()) { 1849 adr = get_addp_base(adr); 1850 } 1851 PointsToNode* ptn = ptnode_adr(adr->_idx); 1852 assert(ptn != nullptr, "node should be registered"); 1853 add_edge(n_ptn, ptn); 1854 } 1855 } 1856 break; 1857 } 1858 case Op_Blackhole: { 1859 // All blackhole pointer arguments are globally escaping. 1860 for (uint i = 0; i < n->req(); i++) { 1861 Node* in = n->in(i); 1862 if (in != nullptr) { 1863 const Type* at = _igvn->type(in); 1864 if (!at->isa_ptr()) continue; 1865 1866 if (in->is_AddP()) { 1867 in = get_addp_base(in); 1868 } 1869 1870 PointsToNode* ptn = ptnode_adr(in->_idx); 1871 assert(ptn != nullptr, "should be defined already"); 1872 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1873 add_edge(n_ptn, ptn); 1874 } 1875 } 1876 break; 1877 } 1878 default: { 1879 // This method should be called only for EA specific nodes which may 1880 // miss some edges when they were created. 1881 #ifdef ASSERT 1882 n->dump(1); 1883 #endif 1884 guarantee(false, "unknown node"); 1885 } 1886 } 1887 return; 1888 } 1889 1890 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1891 Node* adr = n->in(MemNode::Address); 1892 const Type* adr_type = _igvn->type(adr); 1893 adr_type = adr_type->make_ptr(); 1894 if (adr_type == nullptr) { 1895 return; // skip dead nodes 1896 } 1897 if (adr_type->isa_oopptr() 1898 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1899 && adr_type == TypeRawPtr::NOTNULL 1900 && is_captured_store_address(adr))) { 1901 delayed_worklist->push(n); // Process it later. 1902 #ifdef ASSERT 1903 assert (adr->is_AddP(), "expecting an AddP"); 1904 if (adr_type == TypeRawPtr::NOTNULL) { 1905 // Verify a raw address for a store captured by Initialize node. 1906 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1907 assert(offs != Type::OffsetBot, "offset must be a constant"); 1908 } 1909 #endif 1910 } else { 1911 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1912 if (adr->is_BoxLock()) { 1913 return; 1914 } 1915 // Stored value escapes in unsafe access. 1916 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1917 delayed_worklist->push(n); // Process unsafe access later. 1918 return; 1919 } 1920 #ifdef ASSERT 1921 n->dump(1); 1922 assert(false, "not unsafe"); 1923 #endif 1924 } 1925 } 1926 1927 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1928 Node* adr = n->in(MemNode::Address); 1929 const Type *adr_type = _igvn->type(adr); 1930 adr_type = adr_type->make_ptr(); 1931 #ifdef ASSERT 1932 if (adr_type == nullptr) { 1933 n->dump(1); 1934 assert(adr_type != nullptr, "dead node should not be on list"); 1935 return true; 1936 } 1937 #endif 1938 1939 if (adr_type->isa_oopptr() 1940 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1941 && adr_type == TypeRawPtr::NOTNULL 1942 && is_captured_store_address(adr))) { 1943 // Point Address to Value 1944 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1945 assert(adr_ptn != nullptr && 1946 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1947 Node* val = n->in(MemNode::ValueIn); 1948 PointsToNode* ptn = ptnode_adr(val->_idx); 1949 assert(ptn != nullptr, "node should be registered"); 1950 add_edge(adr_ptn, ptn); 1951 return true; 1952 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1953 // Stored value escapes in unsafe access. 1954 Node* val = n->in(MemNode::ValueIn); 1955 PointsToNode* ptn = ptnode_adr(val->_idx); 1956 assert(ptn != nullptr, "node should be registered"); 1957 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1958 // Add edge to object for unsafe access with offset. 1959 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1960 assert(adr_ptn != nullptr, "node should be registered"); 1961 if (adr_ptn->is_Field()) { 1962 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 1963 add_edge(adr_ptn, ptn); 1964 } 1965 return true; 1966 } 1967 #ifdef ASSERT 1968 n->dump(1); 1969 assert(false, "not unsafe"); 1970 #endif 1971 return false; 1972 } 1973 1974 void ConnectionGraph::add_call_node(CallNode* call) { 1975 assert(call->returns_pointer(), "only for call which returns pointer"); 1976 uint call_idx = call->_idx; 1977 if (call->is_Allocate()) { 1978 Node* k = call->in(AllocateNode::KlassNode); 1979 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 1980 assert(kt != nullptr, "TypeKlassPtr required."); 1981 PointsToNode::EscapeState es = PointsToNode::NoEscape; 1982 bool scalar_replaceable = true; 1983 NOT_PRODUCT(const char* nsr_reason = ""); 1984 if (call->is_AllocateArray()) { 1985 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 1986 es = PointsToNode::GlobalEscape; 1987 } else { 1988 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 1989 if (length < 0) { 1990 // Not scalar replaceable if the length is not constant. 1991 scalar_replaceable = false; 1992 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1993 } else if (length > EliminateAllocationArraySizeLimit) { 1994 // Not scalar replaceable if the length is too big. 1995 scalar_replaceable = false; 1996 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1997 } 1998 } 1999 } else { // Allocate instance 2000 if (!kt->isa_instklassptr()) { // StressReflectiveCode 2001 es = PointsToNode::GlobalEscape; 2002 } else { 2003 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 2004 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 2005 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 2006 ik->is_subclass_of(_compile->env()->Reference_klass()) || 2007 !ik->can_be_instantiated() || 2008 ik->has_finalizer()) { 2009 es = PointsToNode::GlobalEscape; 2010 } else { 2011 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 2012 if (nfields > EliminateAllocationFieldsLimit) { 2013 // Not scalar replaceable if there are too many fields. 2014 scalar_replaceable = false; 2015 NOT_PRODUCT(nsr_reason = "has too many fields"); 2016 } 2017 } 2018 } 2019 } 2020 add_java_object(call, es); 2021 PointsToNode* ptn = ptnode_adr(call_idx); 2022 if (!scalar_replaceable && ptn->scalar_replaceable()) { 2023 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 2024 } 2025 } else if (call->is_CallStaticJava()) { 2026 // Call nodes could be different types: 2027 // 2028 // 1. CallDynamicJavaNode (what happened during call is unknown): 2029 // 2030 // - mapped to GlobalEscape JavaObject node if oop is returned; 2031 // 2032 // - all oop arguments are escaping globally; 2033 // 2034 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 2035 // 2036 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 2037 // 2038 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 2039 // - mapped to NoEscape JavaObject node if non-escaping object allocated 2040 // during call is returned; 2041 // - mapped to ArgEscape LocalVar node pointed to object arguments 2042 // which are returned and does not escape during call; 2043 // 2044 // - oop arguments escaping status is defined by bytecode analysis; 2045 // 2046 // For a static call, we know exactly what method is being called. 2047 // Use bytecode estimator to record whether the call's return value escapes. 2048 ciMethod* meth = call->as_CallJava()->method(); 2049 if (meth == nullptr) { 2050 const char* name = call->as_CallStaticJava()->_name; 2051 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check"); 2052 // Returns a newly allocated non-escaped object. 2053 add_java_object(call, PointsToNode::NoEscape); 2054 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 2055 } else if (meth->is_boxing_method()) { 2056 // Returns boxing object 2057 PointsToNode::EscapeState es; 2058 vmIntrinsics::ID intr = meth->intrinsic_id(); 2059 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 2060 // It does not escape if object is always allocated. 2061 es = PointsToNode::NoEscape; 2062 } else { 2063 // It escapes globally if object could be loaded from cache. 2064 es = PointsToNode::GlobalEscape; 2065 } 2066 add_java_object(call, es); 2067 if (es == PointsToNode::GlobalEscape) { 2068 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 2069 } 2070 } else { 2071 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 2072 call_analyzer->copy_dependencies(_compile->dependencies()); 2073 if (call_analyzer->is_return_allocated()) { 2074 // Returns a newly allocated non-escaped object, simply 2075 // update dependency information. 2076 // Mark it as NoEscape so that objects referenced by 2077 // it's fields will be marked as NoEscape at least. 2078 add_java_object(call, PointsToNode::NoEscape); 2079 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 2080 } else { 2081 // Determine whether any arguments are returned. 2082 const TypeTuple* d = call->tf()->domain(); 2083 bool ret_arg = false; 2084 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2085 if (d->field_at(i)->isa_ptr() != nullptr && 2086 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 2087 ret_arg = true; 2088 break; 2089 } 2090 } 2091 if (ret_arg) { 2092 add_local_var(call, PointsToNode::ArgEscape); 2093 } else { 2094 // Returns unknown object. 2095 map_ideal_node(call, phantom_obj); 2096 } 2097 } 2098 } 2099 } else { 2100 // An other type of call, assume the worst case: 2101 // returned value is unknown and globally escapes. 2102 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 2103 map_ideal_node(call, phantom_obj); 2104 } 2105 } 2106 2107 void ConnectionGraph::process_call_arguments(CallNode *call) { 2108 bool is_arraycopy = false; 2109 switch (call->Opcode()) { 2110 #ifdef ASSERT 2111 case Op_Allocate: 2112 case Op_AllocateArray: 2113 case Op_Lock: 2114 case Op_Unlock: 2115 assert(false, "should be done already"); 2116 break; 2117 #endif 2118 case Op_ArrayCopy: 2119 case Op_CallLeafNoFP: 2120 // Most array copies are ArrayCopy nodes at this point but there 2121 // are still a few direct calls to the copy subroutines (See 2122 // PhaseStringOpts::copy_string()) 2123 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 2124 call->as_CallLeaf()->is_call_to_arraycopystub(); 2125 // fall through 2126 case Op_CallLeafVector: 2127 case Op_CallLeaf: { 2128 // Stub calls, objects do not escape but they are not scale replaceable. 2129 // Adjust escape state for outgoing arguments. 2130 const TypeTuple * d = call->tf()->domain(); 2131 bool src_has_oops = false; 2132 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2133 const Type* at = d->field_at(i); 2134 Node *arg = call->in(i); 2135 if (arg == nullptr) { 2136 continue; 2137 } 2138 const Type *aat = _igvn->type(arg); 2139 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 2140 continue; 2141 } 2142 if (arg->is_AddP()) { 2143 // 2144 // The inline_native_clone() case when the arraycopy stub is called 2145 // after the allocation before Initialize and CheckCastPP nodes. 2146 // Or normal arraycopy for object arrays case. 2147 // 2148 // Set AddP's base (Allocate) as not scalar replaceable since 2149 // pointer to the base (with offset) is passed as argument. 2150 // 2151 arg = get_addp_base(arg); 2152 } 2153 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2154 assert(arg_ptn != nullptr, "should be registered"); 2155 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 2156 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 2157 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 2158 aat->isa_ptr() != nullptr, "expecting an Ptr"); 2159 bool arg_has_oops = aat->isa_oopptr() && 2160 (aat->isa_instptr() || 2161 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr))); 2162 if (i == TypeFunc::Parms) { 2163 src_has_oops = arg_has_oops; 2164 } 2165 // 2166 // src or dst could be j.l.Object when other is basic type array: 2167 // 2168 // arraycopy(char[],0,Object*,0,size); 2169 // arraycopy(Object*,0,char[],0,size); 2170 // 2171 // Don't add edges in such cases. 2172 // 2173 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 2174 arg_has_oops && (i > TypeFunc::Parms); 2175 #ifdef ASSERT 2176 if (!(is_arraycopy || 2177 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 2178 (call->as_CallLeaf()->_name != nullptr && 2179 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 2180 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 2181 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 2182 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 2183 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 2184 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 2185 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 2186 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 2187 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 2188 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 2189 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 2190 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 2191 strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 || 2192 strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 || 2193 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 2194 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 2195 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 2196 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 2197 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 2198 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 2199 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 2200 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 2201 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 2202 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 2203 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 2204 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 2205 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 2206 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 2207 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 2208 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 2209 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 2210 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 2211 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 2212 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 2213 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 2214 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2215 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 || 2216 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 2217 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 2218 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 || 2219 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0) 2220 ))) { 2221 call->dump(); 2222 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 2223 } 2224 #endif 2225 // Always process arraycopy's destination object since 2226 // we need to add all possible edges to references in 2227 // source object. 2228 if (arg_esc >= PointsToNode::ArgEscape && 2229 !arg_is_arraycopy_dest) { 2230 continue; 2231 } 2232 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 2233 if (call->is_ArrayCopy()) { 2234 ArrayCopyNode* ac = call->as_ArrayCopy(); 2235 if (ac->is_clonebasic() || 2236 ac->is_arraycopy_validated() || 2237 ac->is_copyof_validated() || 2238 ac->is_copyofrange_validated()) { 2239 es = PointsToNode::NoEscape; 2240 } 2241 } 2242 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2243 if (arg_is_arraycopy_dest) { 2244 Node* src = call->in(TypeFunc::Parms); 2245 if (src->is_AddP()) { 2246 src = get_addp_base(src); 2247 } 2248 PointsToNode* src_ptn = ptnode_adr(src->_idx); 2249 assert(src_ptn != nullptr, "should be registered"); 2250 if (arg_ptn != src_ptn) { 2251 // Special arraycopy edge: 2252 // A destination object's field can't have the source object 2253 // as base since objects escape states are not related. 2254 // Only escape state of destination object's fields affects 2255 // escape state of fields in source object. 2256 add_arraycopy(call, es, src_ptn, arg_ptn); 2257 } 2258 } 2259 } 2260 } 2261 break; 2262 } 2263 case Op_CallStaticJava: { 2264 // For a static call, we know exactly what method is being called. 2265 // Use bytecode estimator to record the call's escape affects 2266 #ifdef ASSERT 2267 const char* name = call->as_CallStaticJava()->_name; 2268 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 2269 #endif 2270 ciMethod* meth = call->as_CallJava()->method(); 2271 if ((meth != nullptr) && meth->is_boxing_method()) { 2272 break; // Boxing methods do not modify any oops. 2273 } 2274 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 2275 // fall-through if not a Java method or no analyzer information 2276 if (call_analyzer != nullptr) { 2277 PointsToNode* call_ptn = ptnode_adr(call->_idx); 2278 const TypeTuple* d = call->tf()->domain(); 2279 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2280 const Type* at = d->field_at(i); 2281 int k = i - TypeFunc::Parms; 2282 Node* arg = call->in(i); 2283 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2284 if (at->isa_ptr() != nullptr && 2285 call_analyzer->is_arg_returned(k)) { 2286 // The call returns arguments. 2287 if (call_ptn != nullptr) { // Is call's result used? 2288 assert(call_ptn->is_LocalVar(), "node should be registered"); 2289 assert(arg_ptn != nullptr, "node should be registered"); 2290 add_edge(call_ptn, arg_ptn); 2291 } 2292 } 2293 if (at->isa_oopptr() != nullptr && 2294 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 2295 if (!call_analyzer->is_arg_stack(k)) { 2296 // The argument global escapes 2297 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2298 } else { 2299 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2300 if (!call_analyzer->is_arg_local(k)) { 2301 // The argument itself doesn't escape, but any fields might 2302 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2303 } 2304 } 2305 } 2306 } 2307 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 2308 // The call returns arguments. 2309 assert(call_ptn->edge_count() > 0, "sanity"); 2310 if (!call_analyzer->is_return_local()) { 2311 // Returns also unknown object. 2312 add_edge(call_ptn, phantom_obj); 2313 } 2314 } 2315 break; 2316 } 2317 } 2318 default: { 2319 // Fall-through here if not a Java method or no analyzer information 2320 // or some other type of call, assume the worst case: all arguments 2321 // globally escape. 2322 const TypeTuple* d = call->tf()->domain(); 2323 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2324 const Type* at = d->field_at(i); 2325 if (at->isa_oopptr() != nullptr) { 2326 Node* arg = call->in(i); 2327 if (arg->is_AddP()) { 2328 arg = get_addp_base(arg); 2329 } 2330 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 2331 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2332 } 2333 } 2334 } 2335 } 2336 } 2337 2338 2339 // Finish Graph construction. 2340 bool ConnectionGraph::complete_connection_graph( 2341 GrowableArray<PointsToNode*>& ptnodes_worklist, 2342 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2343 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2344 GrowableArray<FieldNode*>& oop_fields_worklist) { 2345 // Normally only 1-3 passes needed to build Connection Graph depending 2346 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 2347 // Set limit to 20 to catch situation when something did go wrong and 2348 // bailout Escape Analysis. 2349 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 2350 #define GRAPH_BUILD_ITER_LIMIT 20 2351 2352 // Propagate GlobalEscape and ArgEscape escape states and check that 2353 // we still have non-escaping objects. The method pushs on _worklist 2354 // Field nodes which reference phantom_object. 2355 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2356 return false; // Nothing to do. 2357 } 2358 // Now propagate references to all JavaObject nodes. 2359 int java_objects_length = java_objects_worklist.length(); 2360 elapsedTimer build_time; 2361 build_time.start(); 2362 elapsedTimer time; 2363 bool timeout = false; 2364 int new_edges = 1; 2365 int iterations = 0; 2366 do { 2367 while ((new_edges > 0) && 2368 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 2369 double start_time = time.seconds(); 2370 time.start(); 2371 new_edges = 0; 2372 // Propagate references to phantom_object for nodes pushed on _worklist 2373 // by find_non_escaped_objects() and find_field_value(). 2374 new_edges += add_java_object_edges(phantom_obj, false); 2375 for (int next = 0; next < java_objects_length; ++next) { 2376 JavaObjectNode* ptn = java_objects_worklist.at(next); 2377 new_edges += add_java_object_edges(ptn, true); 2378 2379 #define SAMPLE_SIZE 4 2380 if ((next % SAMPLE_SIZE) == 0) { 2381 // Each 4 iterations calculate how much time it will take 2382 // to complete graph construction. 2383 time.stop(); 2384 // Poll for requests from shutdown mechanism to quiesce compiler 2385 // because Connection graph construction may take long time. 2386 CompileBroker::maybe_block(); 2387 double stop_time = time.seconds(); 2388 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 2389 double time_until_end = time_per_iter * (double)(java_objects_length - next); 2390 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 2391 timeout = true; 2392 break; // Timeout 2393 } 2394 start_time = stop_time; 2395 time.start(); 2396 } 2397 #undef SAMPLE_SIZE 2398 2399 } 2400 if (timeout) break; 2401 if (new_edges > 0) { 2402 // Update escape states on each iteration if graph was updated. 2403 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2404 return false; // Nothing to do. 2405 } 2406 } 2407 time.stop(); 2408 if (time.seconds() >= EscapeAnalysisTimeout) { 2409 timeout = true; 2410 break; 2411 } 2412 } 2413 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 2414 time.start(); 2415 // Find fields which have unknown value. 2416 int fields_length = oop_fields_worklist.length(); 2417 for (int next = 0; next < fields_length; next++) { 2418 FieldNode* field = oop_fields_worklist.at(next); 2419 if (field->edge_count() == 0) { 2420 new_edges += find_field_value(field); 2421 // This code may added new edges to phantom_object. 2422 // Need an other cycle to propagate references to phantom_object. 2423 } 2424 } 2425 time.stop(); 2426 if (time.seconds() >= EscapeAnalysisTimeout) { 2427 timeout = true; 2428 break; 2429 } 2430 } else { 2431 new_edges = 0; // Bailout 2432 } 2433 } while (new_edges > 0); 2434 2435 build_time.stop(); 2436 _build_time = build_time.seconds(); 2437 _build_iterations = iterations; 2438 2439 // Bailout if passed limits. 2440 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 2441 Compile* C = _compile; 2442 if (C->log() != nullptr) { 2443 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 2444 C->log()->text("%s", timeout ? "time" : "iterations"); 2445 C->log()->end_elem(" limit'"); 2446 } 2447 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 2448 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 2449 // Possible infinite build_connection_graph loop, 2450 // bailout (no changes to ideal graph were made). 2451 return false; 2452 } 2453 2454 #undef GRAPH_BUILD_ITER_LIMIT 2455 2456 // Find fields initialized by null for non-escaping Allocations. 2457 int non_escaped_length = non_escaped_allocs_worklist.length(); 2458 for (int next = 0; next < non_escaped_length; next++) { 2459 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2460 PointsToNode::EscapeState es = ptn->escape_state(); 2461 assert(es <= PointsToNode::ArgEscape, "sanity"); 2462 if (es == PointsToNode::NoEscape) { 2463 if (find_init_values_null(ptn, _igvn) > 0) { 2464 // Adding references to null object does not change escape states 2465 // since it does not escape. Also no fields are added to null object. 2466 add_java_object_edges(null_obj, false); 2467 } 2468 } 2469 Node* n = ptn->ideal_node(); 2470 if (n->is_Allocate()) { 2471 // The object allocated by this Allocate node will never be 2472 // seen by an other thread. Mark it so that when it is 2473 // expanded no MemBarStoreStore is added. 2474 InitializeNode* ini = n->as_Allocate()->initialization(); 2475 if (ini != nullptr) 2476 ini->set_does_not_escape(); 2477 } 2478 } 2479 return true; // Finished graph construction. 2480 } 2481 2482 // Propagate GlobalEscape and ArgEscape escape states to all nodes 2483 // and check that we still have non-escaping java objects. 2484 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 2485 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 2486 GrowableArray<PointsToNode*> escape_worklist; 2487 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 2488 int ptnodes_length = ptnodes_worklist.length(); 2489 for (int next = 0; next < ptnodes_length; ++next) { 2490 PointsToNode* ptn = ptnodes_worklist.at(next); 2491 if (ptn->escape_state() >= PointsToNode::ArgEscape || 2492 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 2493 escape_worklist.push(ptn); 2494 } 2495 } 2496 // Set escape states to referenced nodes (edges list). 2497 while (escape_worklist.length() > 0) { 2498 PointsToNode* ptn = escape_worklist.pop(); 2499 PointsToNode::EscapeState es = ptn->escape_state(); 2500 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 2501 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 2502 es >= PointsToNode::ArgEscape) { 2503 // GlobalEscape or ArgEscape state of field means it has unknown value. 2504 if (add_edge(ptn, phantom_obj)) { 2505 // New edge was added 2506 add_field_uses_to_worklist(ptn->as_Field()); 2507 } 2508 } 2509 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2510 PointsToNode* e = i.get(); 2511 if (e->is_Arraycopy()) { 2512 assert(ptn->arraycopy_dst(), "sanity"); 2513 // Propagate only fields escape state through arraycopy edge. 2514 if (e->fields_escape_state() < field_es) { 2515 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2516 escape_worklist.push(e); 2517 } 2518 } else if (es >= field_es) { 2519 // fields_escape_state is also set to 'es' if it is less than 'es'. 2520 if (e->escape_state() < es) { 2521 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2522 escape_worklist.push(e); 2523 } 2524 } else { 2525 // Propagate field escape state. 2526 bool es_changed = false; 2527 if (e->fields_escape_state() < field_es) { 2528 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2529 es_changed = true; 2530 } 2531 if ((e->escape_state() < field_es) && 2532 e->is_Field() && ptn->is_JavaObject() && 2533 e->as_Field()->is_oop()) { 2534 // Change escape state of referenced fields. 2535 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2536 es_changed = true; 2537 } else if (e->escape_state() < es) { 2538 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2539 es_changed = true; 2540 } 2541 if (es_changed) { 2542 escape_worklist.push(e); 2543 } 2544 } 2545 } 2546 } 2547 // Remove escaped objects from non_escaped list. 2548 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 2549 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2550 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 2551 non_escaped_allocs_worklist.delete_at(next); 2552 } 2553 if (ptn->escape_state() == PointsToNode::NoEscape) { 2554 // Find fields in non-escaped allocations which have unknown value. 2555 find_init_values_phantom(ptn); 2556 } 2557 } 2558 return (non_escaped_allocs_worklist.length() > 0); 2559 } 2560 2561 // Add all references to JavaObject node by walking over all uses. 2562 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 2563 int new_edges = 0; 2564 if (populate_worklist) { 2565 // Populate _worklist by uses of jobj's uses. 2566 for (UseIterator i(jobj); i.has_next(); i.next()) { 2567 PointsToNode* use = i.get(); 2568 if (use->is_Arraycopy()) { 2569 continue; 2570 } 2571 add_uses_to_worklist(use); 2572 if (use->is_Field() && use->as_Field()->is_oop()) { 2573 // Put on worklist all field's uses (loads) and 2574 // related field nodes (same base and offset). 2575 add_field_uses_to_worklist(use->as_Field()); 2576 } 2577 } 2578 } 2579 for (int l = 0; l < _worklist.length(); l++) { 2580 PointsToNode* use = _worklist.at(l); 2581 if (PointsToNode::is_base_use(use)) { 2582 // Add reference from jobj to field and from field to jobj (field's base). 2583 use = PointsToNode::get_use_node(use)->as_Field(); 2584 if (add_base(use->as_Field(), jobj)) { 2585 new_edges++; 2586 } 2587 continue; 2588 } 2589 assert(!use->is_JavaObject(), "sanity"); 2590 if (use->is_Arraycopy()) { 2591 if (jobj == null_obj) { // null object does not have field edges 2592 continue; 2593 } 2594 // Added edge from Arraycopy node to arraycopy's source java object 2595 if (add_edge(use, jobj)) { 2596 jobj->set_arraycopy_src(); 2597 new_edges++; 2598 } 2599 // and stop here. 2600 continue; 2601 } 2602 if (!add_edge(use, jobj)) { 2603 continue; // No new edge added, there was such edge already. 2604 } 2605 new_edges++; 2606 if (use->is_LocalVar()) { 2607 add_uses_to_worklist(use); 2608 if (use->arraycopy_dst()) { 2609 for (EdgeIterator i(use); i.has_next(); i.next()) { 2610 PointsToNode* e = i.get(); 2611 if (e->is_Arraycopy()) { 2612 if (jobj == null_obj) { // null object does not have field edges 2613 continue; 2614 } 2615 // Add edge from arraycopy's destination java object to Arraycopy node. 2616 if (add_edge(jobj, e)) { 2617 new_edges++; 2618 jobj->set_arraycopy_dst(); 2619 } 2620 } 2621 } 2622 } 2623 } else { 2624 // Added new edge to stored in field values. 2625 // Put on worklist all field's uses (loads) and 2626 // related field nodes (same base and offset). 2627 add_field_uses_to_worklist(use->as_Field()); 2628 } 2629 } 2630 _worklist.clear(); 2631 _in_worklist.reset(); 2632 return new_edges; 2633 } 2634 2635 // Put on worklist all related field nodes. 2636 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2637 assert(field->is_oop(), "sanity"); 2638 int offset = field->offset(); 2639 add_uses_to_worklist(field); 2640 // Loop over all bases of this field and push on worklist Field nodes 2641 // with the same offset and base (since they may reference the same field). 2642 for (BaseIterator i(field); i.has_next(); i.next()) { 2643 PointsToNode* base = i.get(); 2644 add_fields_to_worklist(field, base); 2645 // Check if the base was source object of arraycopy and go over arraycopy's 2646 // destination objects since values stored to a field of source object are 2647 // accessible by uses (loads) of fields of destination objects. 2648 if (base->arraycopy_src()) { 2649 for (UseIterator j(base); j.has_next(); j.next()) { 2650 PointsToNode* arycp = j.get(); 2651 if (arycp->is_Arraycopy()) { 2652 for (UseIterator k(arycp); k.has_next(); k.next()) { 2653 PointsToNode* abase = k.get(); 2654 if (abase->arraycopy_dst() && abase != base) { 2655 // Look for the same arraycopy reference. 2656 add_fields_to_worklist(field, abase); 2657 } 2658 } 2659 } 2660 } 2661 } 2662 } 2663 } 2664 2665 // Put on worklist all related field nodes. 2666 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2667 int offset = field->offset(); 2668 if (base->is_LocalVar()) { 2669 for (UseIterator j(base); j.has_next(); j.next()) { 2670 PointsToNode* f = j.get(); 2671 if (PointsToNode::is_base_use(f)) { // Field 2672 f = PointsToNode::get_use_node(f); 2673 if (f == field || !f->as_Field()->is_oop()) { 2674 continue; 2675 } 2676 int offs = f->as_Field()->offset(); 2677 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2678 add_to_worklist(f); 2679 } 2680 } 2681 } 2682 } else { 2683 assert(base->is_JavaObject(), "sanity"); 2684 if (// Skip phantom_object since it is only used to indicate that 2685 // this field's content globally escapes. 2686 (base != phantom_obj) && 2687 // null object node does not have fields. 2688 (base != null_obj)) { 2689 for (EdgeIterator i(base); i.has_next(); i.next()) { 2690 PointsToNode* f = i.get(); 2691 // Skip arraycopy edge since store to destination object field 2692 // does not update value in source object field. 2693 if (f->is_Arraycopy()) { 2694 assert(base->arraycopy_dst(), "sanity"); 2695 continue; 2696 } 2697 if (f == field || !f->as_Field()->is_oop()) { 2698 continue; 2699 } 2700 int offs = f->as_Field()->offset(); 2701 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2702 add_to_worklist(f); 2703 } 2704 } 2705 } 2706 } 2707 } 2708 2709 // Find fields which have unknown value. 2710 int ConnectionGraph::find_field_value(FieldNode* field) { 2711 // Escaped fields should have init value already. 2712 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2713 int new_edges = 0; 2714 for (BaseIterator i(field); i.has_next(); i.next()) { 2715 PointsToNode* base = i.get(); 2716 if (base->is_JavaObject()) { 2717 // Skip Allocate's fields which will be processed later. 2718 if (base->ideal_node()->is_Allocate()) { 2719 return 0; 2720 } 2721 assert(base == null_obj, "only null ptr base expected here"); 2722 } 2723 } 2724 if (add_edge(field, phantom_obj)) { 2725 // New edge was added 2726 new_edges++; 2727 add_field_uses_to_worklist(field); 2728 } 2729 return new_edges; 2730 } 2731 2732 // Find fields initializing values for allocations. 2733 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2734 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2735 Node* alloc = pta->ideal_node(); 2736 2737 // Do nothing for Allocate nodes since its fields values are 2738 // "known" unless they are initialized by arraycopy/clone. 2739 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2740 return 0; 2741 } 2742 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 2743 #ifdef ASSERT 2744 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { 2745 const char* name = alloc->as_CallStaticJava()->_name; 2746 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity"); 2747 } 2748 #endif 2749 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2750 int new_edges = 0; 2751 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2752 PointsToNode* field = i.get(); 2753 if (field->is_Field() && field->as_Field()->is_oop()) { 2754 if (add_edge(field, phantom_obj)) { 2755 // New edge was added 2756 new_edges++; 2757 add_field_uses_to_worklist(field->as_Field()); 2758 } 2759 } 2760 } 2761 return new_edges; 2762 } 2763 2764 // Find fields initializing values for allocations. 2765 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2766 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2767 Node* alloc = pta->ideal_node(); 2768 // Do nothing for Call nodes since its fields values are unknown. 2769 if (!alloc->is_Allocate()) { 2770 return 0; 2771 } 2772 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2773 bool visited_bottom_offset = false; 2774 GrowableArray<int> offsets_worklist; 2775 int new_edges = 0; 2776 2777 // Check if an oop field's initializing value is recorded and add 2778 // a corresponding null if field's value if it is not recorded. 2779 // Connection Graph does not record a default initialization by null 2780 // captured by Initialize node. 2781 // 2782 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2783 PointsToNode* field = i.get(); // Field (AddP) 2784 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2785 continue; // Not oop field 2786 } 2787 int offset = field->as_Field()->offset(); 2788 if (offset == Type::OffsetBot) { 2789 if (!visited_bottom_offset) { 2790 // OffsetBot is used to reference array's element, 2791 // always add reference to null to all Field nodes since we don't 2792 // known which element is referenced. 2793 if (add_edge(field, null_obj)) { 2794 // New edge was added 2795 new_edges++; 2796 add_field_uses_to_worklist(field->as_Field()); 2797 visited_bottom_offset = true; 2798 } 2799 } 2800 } else { 2801 // Check only oop fields. 2802 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2803 if (adr_type->isa_rawptr()) { 2804 #ifdef ASSERT 2805 // Raw pointers are used for initializing stores so skip it 2806 // since it should be recorded already 2807 Node* base = get_addp_base(field->ideal_node()); 2808 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2809 #endif 2810 continue; 2811 } 2812 if (!offsets_worklist.contains(offset)) { 2813 offsets_worklist.append(offset); 2814 Node* value = nullptr; 2815 if (ini != nullptr) { 2816 // StoreP::memory_type() == T_ADDRESS 2817 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2818 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2819 // Make sure initializing store has the same type as this AddP. 2820 // This AddP may reference non existing field because it is on a 2821 // dead branch of bimorphic call which is not eliminated yet. 2822 if (store != nullptr && store->is_Store() && 2823 store->as_Store()->memory_type() == ft) { 2824 value = store->in(MemNode::ValueIn); 2825 #ifdef ASSERT 2826 if (VerifyConnectionGraph) { 2827 // Verify that AddP already points to all objects the value points to. 2828 PointsToNode* val = ptnode_adr(value->_idx); 2829 assert((val != nullptr), "should be processed already"); 2830 PointsToNode* missed_obj = nullptr; 2831 if (val->is_JavaObject()) { 2832 if (!field->points_to(val->as_JavaObject())) { 2833 missed_obj = val; 2834 } 2835 } else { 2836 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2837 tty->print_cr("----------init store has invalid value -----"); 2838 store->dump(); 2839 val->dump(); 2840 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2841 } 2842 for (EdgeIterator j(val); j.has_next(); j.next()) { 2843 PointsToNode* obj = j.get(); 2844 if (obj->is_JavaObject()) { 2845 if (!field->points_to(obj->as_JavaObject())) { 2846 missed_obj = obj; 2847 break; 2848 } 2849 } 2850 } 2851 } 2852 if (missed_obj != nullptr) { 2853 tty->print_cr("----------field---------------------------------"); 2854 field->dump(); 2855 tty->print_cr("----------missed referernce to object-----------"); 2856 missed_obj->dump(); 2857 tty->print_cr("----------object referernced by init store -----"); 2858 store->dump(); 2859 val->dump(); 2860 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2861 } 2862 } 2863 #endif 2864 } else { 2865 // There could be initializing stores which follow allocation. 2866 // For example, a volatile field store is not collected 2867 // by Initialize node. 2868 // 2869 // Need to check for dependent loads to separate such stores from 2870 // stores which follow loads. For now, add initial value null so 2871 // that compare pointers optimization works correctly. 2872 } 2873 } 2874 if (value == nullptr) { 2875 // A field's initializing value was not recorded. Add null. 2876 if (add_edge(field, null_obj)) { 2877 // New edge was added 2878 new_edges++; 2879 add_field_uses_to_worklist(field->as_Field()); 2880 } 2881 } 2882 } 2883 } 2884 } 2885 return new_edges; 2886 } 2887 2888 // Adjust scalar_replaceable state after Connection Graph is built. 2889 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2890 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2891 // returns true. If one of the constraints in this method set 'jobj' to NSR 2892 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2893 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2894 // that other object and the Phi will become a reducible Phi. 2895 // There could be multiple merges involving the same jobj. 2896 Unique_Node_List candidates; 2897 2898 // Search for non-escaping objects which are not scalar replaceable 2899 // and mark them to propagate the state to referenced objects. 2900 2901 for (UseIterator i(jobj); i.has_next(); i.next()) { 2902 PointsToNode* use = i.get(); 2903 if (use->is_Arraycopy()) { 2904 continue; 2905 } 2906 if (use->is_Field()) { 2907 FieldNode* field = use->as_Field(); 2908 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2909 // 1. An object is not scalar replaceable if the field into which it is 2910 // stored has unknown offset (stored into unknown element of an array). 2911 if (field->offset() == Type::OffsetBot) { 2912 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2913 return; 2914 } 2915 for (BaseIterator i(field); i.has_next(); i.next()) { 2916 PointsToNode* base = i.get(); 2917 // 2. An object is not scalar replaceable if the field into which it is 2918 // stored has multiple bases one of which is null. 2919 if ((base == null_obj) && (field->base_count() > 1)) { 2920 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2921 return; 2922 } 2923 // 2.5. An object is not scalar replaceable if the field into which it is 2924 // stored has NSR base. 2925 if (!base->scalar_replaceable()) { 2926 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2927 return; 2928 } 2929 } 2930 } 2931 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2932 // 3. An object is not scalar replaceable if it is merged with other objects 2933 // and we can't remove the merge 2934 for (EdgeIterator j(use); j.has_next(); j.next()) { 2935 PointsToNode* ptn = j.get(); 2936 if (ptn->is_JavaObject() && ptn != jobj) { 2937 Node* use_n = use->ideal_node(); 2938 2939 // These other local vars may point to multiple objects through a Phi 2940 // In this case we skip them and see if we can reduce the Phi. 2941 if (use_n->is_CastPP() || use_n->is_CheckCastPP()) { 2942 use_n = use_n->in(1); 2943 } 2944 2945 // If it's already a candidate or confirmed reducible merge we can skip verification 2946 if (candidates.member(use_n) || reducible_merges.member(use_n)) { 2947 continue; 2948 } 2949 2950 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 2951 candidates.push(use_n); 2952 } else { 2953 // Mark all objects as NSR if we can't remove the merge 2954 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 2955 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 2956 } 2957 } 2958 } 2959 if (!jobj->scalar_replaceable()) { 2960 return; 2961 } 2962 } 2963 2964 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 2965 if (j.get()->is_Arraycopy()) { 2966 continue; 2967 } 2968 2969 // Non-escaping object node should point only to field nodes. 2970 FieldNode* field = j.get()->as_Field(); 2971 int offset = field->as_Field()->offset(); 2972 2973 // 4. An object is not scalar replaceable if it has a field with unknown 2974 // offset (array's element is accessed in loop). 2975 if (offset == Type::OffsetBot) { 2976 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 2977 return; 2978 } 2979 // 5. Currently an object is not scalar replaceable if a LoadStore node 2980 // access its field since the field value is unknown after it. 2981 // 2982 Node* n = field->ideal_node(); 2983 2984 // Test for an unsafe access that was parsed as maybe off heap 2985 // (with a CheckCastPP to raw memory). 2986 assert(n->is_AddP(), "expect an address computation"); 2987 if (n->in(AddPNode::Base)->is_top() && 2988 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 2989 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 2990 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 2991 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 2992 return; 2993 } 2994 2995 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2996 Node* u = n->fast_out(i); 2997 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 2998 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 2999 return; 3000 } 3001 } 3002 3003 // 6. Or the address may point to more then one object. This may produce 3004 // the false positive result (set not scalar replaceable) 3005 // since the flow-insensitive escape analysis can't separate 3006 // the case when stores overwrite the field's value from the case 3007 // when stores happened on different control branches. 3008 // 3009 // Note: it will disable scalar replacement in some cases: 3010 // 3011 // Point p[] = new Point[1]; 3012 // p[0] = new Point(); // Will be not scalar replaced 3013 // 3014 // but it will save us from incorrect optimizations in next cases: 3015 // 3016 // Point p[] = new Point[1]; 3017 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 3018 // 3019 if (field->base_count() > 1 && candidates.size() == 0) { 3020 if (has_non_reducible_merge(field, reducible_merges)) { 3021 for (BaseIterator i(field); i.has_next(); i.next()) { 3022 PointsToNode* base = i.get(); 3023 // Don't take into account LocalVar nodes which 3024 // may point to only one object which should be also 3025 // this field's base by now. 3026 if (base->is_JavaObject() && base != jobj) { 3027 // Mark all bases. 3028 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 3029 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 3030 } 3031 } 3032 3033 if (!jobj->scalar_replaceable()) { 3034 return; 3035 } 3036 } 3037 } 3038 } 3039 3040 // The candidate is truly a reducible merge only if none of the other 3041 // constraints ruled it as NSR. There could be multiple merges involving the 3042 // same jobj. 3043 assert(jobj->scalar_replaceable(), "sanity"); 3044 for (uint i = 0; i < candidates.size(); i++ ) { 3045 Node* candidate = candidates.at(i); 3046 reducible_merges.push(candidate); 3047 } 3048 } 3049 3050 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) { 3051 for (BaseIterator i(field); i.has_next(); i.next()) { 3052 Node* base = i.get()->ideal_node(); 3053 if (base->is_Phi() && !reducible_merges.member(base)) { 3054 return true; 3055 } 3056 } 3057 return false; 3058 } 3059 3060 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) { 3061 assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function."); 3062 3063 // Look for 'phis' that refer to 'jobj' as the last 3064 // remaining scalar replaceable input. 3065 uint reducible_merges_cnt = reducible_merges.size(); 3066 for (uint i = 0; i < reducible_merges_cnt; i++) { 3067 Node* phi = reducible_merges.at(i); 3068 3069 // This 'Phi' will be a 'good' if it still points to 3070 // at least one scalar replaceable object. Note that 'obj' 3071 // was/should be marked as NSR before calling this function. 3072 bool good_phi = false; 3073 3074 for (uint j = 1; j < phi->req(); j++) { 3075 JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j)); 3076 if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) { 3077 good_phi = true; 3078 break; 3079 } 3080 } 3081 3082 if (!good_phi) { 3083 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);) 3084 reducible_merges.remove(i); 3085 3086 // Decrement the index because the 'remove' call above actually 3087 // moves the last entry of the list to position 'i'. 3088 i--; 3089 3090 reducible_merges_cnt--; 3091 } 3092 } 3093 } 3094 3095 // Propagate NSR (Not scalar replaceable) state. 3096 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) { 3097 int jobj_length = jobj_worklist.length(); 3098 bool found_nsr_alloc = true; 3099 while (found_nsr_alloc) { 3100 found_nsr_alloc = false; 3101 for (int next = 0; next < jobj_length; ++next) { 3102 JavaObjectNode* jobj = jobj_worklist.at(next); 3103 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 3104 PointsToNode* use = i.get(); 3105 if (use->is_Field()) { 3106 FieldNode* field = use->as_Field(); 3107 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 3108 assert(field->offset() != Type::OffsetBot, "sanity"); 3109 for (BaseIterator i(field); i.has_next(); i.next()) { 3110 PointsToNode* base = i.get(); 3111 // An object is not scalar replaceable if the field into which 3112 // it is stored has NSR base. 3113 if ((base != null_obj) && !base->scalar_replaceable()) { 3114 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 3115 // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible, 3116 // because there is no point in reducing a Phi that won't improve the number of SR 3117 // objects. 3118 revisit_reducible_phi_status(jobj, reducible_merges); 3119 found_nsr_alloc = true; 3120 break; 3121 } 3122 } 3123 } 3124 } 3125 } 3126 } 3127 } 3128 3129 #ifdef ASSERT 3130 void ConnectionGraph::verify_connection_graph( 3131 GrowableArray<PointsToNode*>& ptnodes_worklist, 3132 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 3133 GrowableArray<JavaObjectNode*>& java_objects_worklist, 3134 GrowableArray<Node*>& addp_worklist) { 3135 // Verify that graph is complete - no new edges could be added. 3136 int java_objects_length = java_objects_worklist.length(); 3137 int non_escaped_length = non_escaped_allocs_worklist.length(); 3138 int new_edges = 0; 3139 for (int next = 0; next < java_objects_length; ++next) { 3140 JavaObjectNode* ptn = java_objects_worklist.at(next); 3141 new_edges += add_java_object_edges(ptn, true); 3142 } 3143 assert(new_edges == 0, "graph was not complete"); 3144 // Verify that escape state is final. 3145 int length = non_escaped_allocs_worklist.length(); 3146 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 3147 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 3148 (non_escaped_length == length) && 3149 (_worklist.length() == 0), "escape state was not final"); 3150 3151 // Verify fields information. 3152 int addp_length = addp_worklist.length(); 3153 for (int next = 0; next < addp_length; ++next ) { 3154 Node* n = addp_worklist.at(next); 3155 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 3156 if (field->is_oop()) { 3157 // Verify that field has all bases 3158 Node* base = get_addp_base(n); 3159 PointsToNode* ptn = ptnode_adr(base->_idx); 3160 if (ptn->is_JavaObject()) { 3161 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 3162 } else { 3163 assert(ptn->is_LocalVar(), "sanity"); 3164 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3165 PointsToNode* e = i.get(); 3166 if (e->is_JavaObject()) { 3167 assert(field->has_base(e->as_JavaObject()), "sanity"); 3168 } 3169 } 3170 } 3171 // Verify that all fields have initializing values. 3172 if (field->edge_count() == 0) { 3173 tty->print_cr("----------field does not have references----------"); 3174 field->dump(); 3175 for (BaseIterator i(field); i.has_next(); i.next()) { 3176 PointsToNode* base = i.get(); 3177 tty->print_cr("----------field has next base---------------------"); 3178 base->dump(); 3179 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 3180 tty->print_cr("----------base has fields-------------------------"); 3181 for (EdgeIterator j(base); j.has_next(); j.next()) { 3182 j.get()->dump(); 3183 } 3184 tty->print_cr("----------base has references---------------------"); 3185 for (UseIterator j(base); j.has_next(); j.next()) { 3186 j.get()->dump(); 3187 } 3188 } 3189 } 3190 for (UseIterator i(field); i.has_next(); i.next()) { 3191 i.get()->dump(); 3192 } 3193 assert(field->edge_count() > 0, "sanity"); 3194 } 3195 } 3196 } 3197 } 3198 #endif 3199 3200 // Optimize ideal graph. 3201 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 3202 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 3203 Compile* C = _compile; 3204 PhaseIterGVN* igvn = _igvn; 3205 if (EliminateLocks) { 3206 // Mark locks before changing ideal graph. 3207 int cnt = C->macro_count(); 3208 for (int i = 0; i < cnt; i++) { 3209 Node *n = C->macro_node(i); 3210 if (n->is_AbstractLock()) { // Lock and Unlock nodes 3211 AbstractLockNode* alock = n->as_AbstractLock(); 3212 if (!alock->is_non_esc_obj()) { 3213 if (can_eliminate_lock(alock)) { 3214 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 3215 // The lock could be marked eliminated by lock coarsening 3216 // code during first IGVN before EA. Replace coarsened flag 3217 // to eliminate all associated locks/unlocks. 3218 #ifdef ASSERT 3219 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 3220 #endif 3221 alock->set_non_esc_obj(); 3222 } 3223 } 3224 } 3225 } 3226 } 3227 3228 if (OptimizePtrCompare) { 3229 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 3230 Node *n = ptr_cmp_worklist.at(i); 3231 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 3232 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2)); 3233 if (tcmp->singleton()) { 3234 Node* cmp = igvn->makecon(tcmp); 3235 #ifndef PRODUCT 3236 if (PrintOptimizePtrCompare) { 3237 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 3238 if (Verbose) { 3239 n->dump(1); 3240 } 3241 } 3242 #endif 3243 igvn->replace_node(n, cmp); 3244 } 3245 } 3246 } 3247 3248 // For MemBarStoreStore nodes added in library_call.cpp, check 3249 // escape status of associated AllocateNode and optimize out 3250 // MemBarStoreStore node if the allocated object never escapes. 3251 for (int i = 0; i < storestore_worklist.length(); i++) { 3252 Node* storestore = storestore_worklist.at(i); 3253 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 3254 if (alloc->is_Allocate() && not_global_escape(alloc)) { 3255 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 3256 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 3257 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 3258 igvn->register_new_node_with_optimizer(mb); 3259 igvn->replace_node(storestore, mb); 3260 } 3261 } 3262 } 3263 3264 // Optimize objects compare. 3265 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) { 3266 assert(OptimizePtrCompare, "sanity"); 3267 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 3268 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 3269 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 3270 3271 PointsToNode* ptn1 = ptnode_adr(left->_idx); 3272 PointsToNode* ptn2 = ptnode_adr(right->_idx); 3273 JavaObjectNode* jobj1 = unique_java_object(left); 3274 JavaObjectNode* jobj2 = unique_java_object(right); 3275 3276 // The use of this method during allocation merge reduction may cause 'left' 3277 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or 3278 // that doesn't reference an unique java object. 3279 if (ptn1 == nullptr || ptn2 == nullptr || 3280 jobj1 == nullptr || jobj2 == nullptr) { 3281 return UNKNOWN; 3282 } 3283 3284 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 3285 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 3286 3287 // Check simple cases first. 3288 if (jobj1 != nullptr) { 3289 if (jobj1->escape_state() == PointsToNode::NoEscape) { 3290 if (jobj1 == jobj2) { 3291 // Comparing the same not escaping object. 3292 return EQ; 3293 } 3294 Node* obj = jobj1->ideal_node(); 3295 // Comparing not escaping allocation. 3296 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3297 !ptn2->points_to(jobj1)) { 3298 return NE; // This includes nullness check. 3299 } 3300 } 3301 } 3302 if (jobj2 != nullptr) { 3303 if (jobj2->escape_state() == PointsToNode::NoEscape) { 3304 Node* obj = jobj2->ideal_node(); 3305 // Comparing not escaping allocation. 3306 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3307 !ptn1->points_to(jobj2)) { 3308 return NE; // This includes nullness check. 3309 } 3310 } 3311 } 3312 if (jobj1 != nullptr && jobj1 != phantom_obj && 3313 jobj2 != nullptr && jobj2 != phantom_obj && 3314 jobj1->ideal_node()->is_Con() && 3315 jobj2->ideal_node()->is_Con()) { 3316 // Klass or String constants compare. Need to be careful with 3317 // compressed pointers - compare types of ConN and ConP instead of nodes. 3318 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 3319 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 3320 if (t1->make_ptr() == t2->make_ptr()) { 3321 return EQ; 3322 } else { 3323 return NE; 3324 } 3325 } 3326 if (ptn1->meet(ptn2)) { 3327 return UNKNOWN; // Sets are not disjoint 3328 } 3329 3330 // Sets are disjoint. 3331 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 3332 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 3333 bool set1_has_null_ptr = ptn1->points_to(null_obj); 3334 bool set2_has_null_ptr = ptn2->points_to(null_obj); 3335 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 3336 (set2_has_unknown_ptr && set1_has_null_ptr)) { 3337 // Check nullness of unknown object. 3338 return UNKNOWN; 3339 } 3340 3341 // Disjointness by itself is not sufficient since 3342 // alias analysis is not complete for escaped objects. 3343 // Disjoint sets are definitely unrelated only when 3344 // at least one set has only not escaping allocations. 3345 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 3346 if (ptn1->non_escaping_allocation()) { 3347 return NE; 3348 } 3349 } 3350 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 3351 if (ptn2->non_escaping_allocation()) { 3352 return NE; 3353 } 3354 } 3355 return UNKNOWN; 3356 } 3357 3358 // Connection Graph construction functions. 3359 3360 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 3361 PointsToNode* ptadr = _nodes.at(n->_idx); 3362 if (ptadr != nullptr) { 3363 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 3364 return; 3365 } 3366 Compile* C = _compile; 3367 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 3368 map_ideal_node(n, ptadr); 3369 } 3370 3371 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 3372 PointsToNode* ptadr = _nodes.at(n->_idx); 3373 if (ptadr != nullptr) { 3374 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 3375 return ptadr; 3376 } 3377 Compile* C = _compile; 3378 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 3379 map_ideal_node(n, ptadr); 3380 return ptadr; 3381 } 3382 3383 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 3384 PointsToNode* ptadr = _nodes.at(n->_idx); 3385 if (ptadr != nullptr) { 3386 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 3387 return; 3388 } 3389 bool unsafe = false; 3390 bool is_oop = is_oop_field(n, offset, &unsafe); 3391 if (unsafe) { 3392 es = PointsToNode::GlobalEscape; 3393 } 3394 Compile* C = _compile; 3395 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 3396 map_ideal_node(n, field); 3397 } 3398 3399 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 3400 PointsToNode* src, PointsToNode* dst) { 3401 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 3402 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 3403 PointsToNode* ptadr = _nodes.at(n->_idx); 3404 if (ptadr != nullptr) { 3405 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 3406 return; 3407 } 3408 Compile* C = _compile; 3409 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 3410 map_ideal_node(n, ptadr); 3411 // Add edge from arraycopy node to source object. 3412 (void)add_edge(ptadr, src); 3413 src->set_arraycopy_src(); 3414 // Add edge from destination object to arraycopy node. 3415 (void)add_edge(dst, ptadr); 3416 dst->set_arraycopy_dst(); 3417 } 3418 3419 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 3420 const Type* adr_type = n->as_AddP()->bottom_type(); 3421 BasicType bt = T_INT; 3422 if (offset == Type::OffsetBot) { 3423 // Check only oop fields. 3424 if (!adr_type->isa_aryptr() || 3425 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 3426 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 3427 // OffsetBot is used to reference array's element. Ignore first AddP. 3428 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 3429 bt = T_OBJECT; 3430 } 3431 } 3432 } else if (offset != Type::klass_offset()) { 3433 if (adr_type->isa_instptr()) { 3434 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 3435 if (field != nullptr) { 3436 bt = field->layout_type(); 3437 } else { 3438 // Check for unsafe oop field access 3439 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3440 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3441 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3442 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3443 bt = T_OBJECT; 3444 (*unsafe) = true; 3445 } 3446 } 3447 } else if (adr_type->isa_aryptr()) { 3448 if (offset == arrayOopDesc::length_offset_in_bytes()) { 3449 // Ignore array length load. 3450 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 3451 // Ignore first AddP. 3452 } else { 3453 const Type* elemtype = adr_type->isa_aryptr()->elem(); 3454 bt = elemtype->array_element_basic_type(); 3455 } 3456 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 3457 // Allocation initialization, ThreadLocal field access, unsafe access 3458 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3459 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3460 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3461 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3462 bt = T_OBJECT; 3463 } 3464 } 3465 } 3466 // Note: T_NARROWOOP is not classed as a real reference type 3467 return (is_reference_type(bt) || bt == T_NARROWOOP); 3468 } 3469 3470 // Returns unique pointed java object or null. 3471 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 3472 // If the node was created after the escape computation we can't answer. 3473 uint idx = n->_idx; 3474 if (idx >= nodes_size()) { 3475 return nullptr; 3476 } 3477 PointsToNode* ptn = ptnode_adr(idx); 3478 if (ptn == nullptr) { 3479 return nullptr; 3480 } 3481 if (ptn->is_JavaObject()) { 3482 return ptn->as_JavaObject(); 3483 } 3484 assert(ptn->is_LocalVar(), "sanity"); 3485 // Check all java objects it points to. 3486 JavaObjectNode* jobj = nullptr; 3487 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3488 PointsToNode* e = i.get(); 3489 if (e->is_JavaObject()) { 3490 if (jobj == nullptr) { 3491 jobj = e->as_JavaObject(); 3492 } else if (jobj != e) { 3493 return nullptr; 3494 } 3495 } 3496 } 3497 return jobj; 3498 } 3499 3500 // Return true if this node points only to non-escaping allocations. 3501 bool PointsToNode::non_escaping_allocation() { 3502 if (is_JavaObject()) { 3503 Node* n = ideal_node(); 3504 if (n->is_Allocate() || n->is_CallStaticJava()) { 3505 return (escape_state() == PointsToNode::NoEscape); 3506 } else { 3507 return false; 3508 } 3509 } 3510 assert(is_LocalVar(), "sanity"); 3511 // Check all java objects it points to. 3512 for (EdgeIterator i(this); i.has_next(); i.next()) { 3513 PointsToNode* e = i.get(); 3514 if (e->is_JavaObject()) { 3515 Node* n = e->ideal_node(); 3516 if ((e->escape_state() != PointsToNode::NoEscape) || 3517 !(n->is_Allocate() || n->is_CallStaticJava())) { 3518 return false; 3519 } 3520 } 3521 } 3522 return true; 3523 } 3524 3525 // Return true if we know the node does not escape globally. 3526 bool ConnectionGraph::not_global_escape(Node *n) { 3527 assert(!_collecting, "should not call during graph construction"); 3528 // If the node was created after the escape computation we can't answer. 3529 uint idx = n->_idx; 3530 if (idx >= nodes_size()) { 3531 return false; 3532 } 3533 PointsToNode* ptn = ptnode_adr(idx); 3534 if (ptn == nullptr) { 3535 return false; // not in congraph (e.g. ConI) 3536 } 3537 PointsToNode::EscapeState es = ptn->escape_state(); 3538 // If we have already computed a value, return it. 3539 if (es >= PointsToNode::GlobalEscape) { 3540 return false; 3541 } 3542 if (ptn->is_JavaObject()) { 3543 return true; // (es < PointsToNode::GlobalEscape); 3544 } 3545 assert(ptn->is_LocalVar(), "sanity"); 3546 // Check all java objects it points to. 3547 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3548 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 3549 return false; 3550 } 3551 } 3552 return true; 3553 } 3554 3555 // Return true if locked object does not escape globally 3556 // and locked code region (identified by BoxLockNode) is balanced: 3557 // all compiled code paths have corresponding Lock/Unlock pairs. 3558 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) { 3559 if (alock->is_balanced() && not_global_escape(alock->obj_node())) { 3560 if (EliminateNestedLocks) { 3561 // We can mark whole locking region as Local only when only 3562 // one object is used for locking. 3563 alock->box_node()->as_BoxLock()->set_local(); 3564 } 3565 return true; 3566 } 3567 return false; 3568 } 3569 3570 // Helper functions 3571 3572 // Return true if this node points to specified node or nodes it points to. 3573 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 3574 if (is_JavaObject()) { 3575 return (this == ptn); 3576 } 3577 assert(is_LocalVar() || is_Field(), "sanity"); 3578 for (EdgeIterator i(this); i.has_next(); i.next()) { 3579 if (i.get() == ptn) { 3580 return true; 3581 } 3582 } 3583 return false; 3584 } 3585 3586 // Return true if one node points to an other. 3587 bool PointsToNode::meet(PointsToNode* ptn) { 3588 if (this == ptn) { 3589 return true; 3590 } else if (ptn->is_JavaObject()) { 3591 return this->points_to(ptn->as_JavaObject()); 3592 } else if (this->is_JavaObject()) { 3593 return ptn->points_to(this->as_JavaObject()); 3594 } 3595 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 3596 int ptn_count = ptn->edge_count(); 3597 for (EdgeIterator i(this); i.has_next(); i.next()) { 3598 PointsToNode* this_e = i.get(); 3599 for (int j = 0; j < ptn_count; j++) { 3600 if (this_e == ptn->edge(j)) { 3601 return true; 3602 } 3603 } 3604 } 3605 return false; 3606 } 3607 3608 #ifdef ASSERT 3609 // Return true if bases point to this java object. 3610 bool FieldNode::has_base(JavaObjectNode* jobj) const { 3611 for (BaseIterator i(this); i.has_next(); i.next()) { 3612 if (i.get() == jobj) { 3613 return true; 3614 } 3615 } 3616 return false; 3617 } 3618 #endif 3619 3620 bool ConnectionGraph::is_captured_store_address(Node* addp) { 3621 // Handle simple case first. 3622 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 3623 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 3624 return true; 3625 } else if (addp->in(AddPNode::Address)->is_Phi()) { 3626 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 3627 Node* addp_use = addp->fast_out(i); 3628 if (addp_use->is_Store()) { 3629 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 3630 if (addp_use->fast_out(j)->is_Initialize()) { 3631 return true; 3632 } 3633 } 3634 } 3635 } 3636 } 3637 return false; 3638 } 3639 3640 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 3641 const Type *adr_type = phase->type(adr); 3642 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 3643 // We are computing a raw address for a store captured by an Initialize 3644 // compute an appropriate address type. AddP cases #3 and #5 (see below). 3645 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 3646 assert(offs != Type::OffsetBot || 3647 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 3648 "offset must be a constant or it is initialization of array"); 3649 return offs; 3650 } 3651 const TypePtr *t_ptr = adr_type->isa_ptr(); 3652 assert(t_ptr != nullptr, "must be a pointer type"); 3653 return t_ptr->offset(); 3654 } 3655 3656 Node* ConnectionGraph::get_addp_base(Node *addp) { 3657 assert(addp->is_AddP(), "must be AddP"); 3658 // 3659 // AddP cases for Base and Address inputs: 3660 // case #1. Direct object's field reference: 3661 // Allocate 3662 // | 3663 // Proj #5 ( oop result ) 3664 // | 3665 // CheckCastPP (cast to instance type) 3666 // | | 3667 // AddP ( base == address ) 3668 // 3669 // case #2. Indirect object's field reference: 3670 // Phi 3671 // | 3672 // CastPP (cast to instance type) 3673 // | | 3674 // AddP ( base == address ) 3675 // 3676 // case #3. Raw object's field reference for Initialize node: 3677 // Allocate 3678 // | 3679 // Proj #5 ( oop result ) 3680 // top | 3681 // \ | 3682 // AddP ( base == top ) 3683 // 3684 // case #4. Array's element reference: 3685 // {CheckCastPP | CastPP} 3686 // | | | 3687 // | AddP ( array's element offset ) 3688 // | | 3689 // AddP ( array's offset ) 3690 // 3691 // case #5. Raw object's field reference for arraycopy stub call: 3692 // The inline_native_clone() case when the arraycopy stub is called 3693 // after the allocation before Initialize and CheckCastPP nodes. 3694 // Allocate 3695 // | 3696 // Proj #5 ( oop result ) 3697 // | | 3698 // AddP ( base == address ) 3699 // 3700 // case #6. Constant Pool, ThreadLocal, CastX2P or 3701 // Raw object's field reference: 3702 // {ConP, ThreadLocal, CastX2P, raw Load} 3703 // top | 3704 // \ | 3705 // AddP ( base == top ) 3706 // 3707 // case #7. Klass's field reference. 3708 // LoadKlass 3709 // | | 3710 // AddP ( base == address ) 3711 // 3712 // case #8. narrow Klass's field reference. 3713 // LoadNKlass 3714 // | 3715 // DecodeN 3716 // | | 3717 // AddP ( base == address ) 3718 // 3719 // case #9. Mixed unsafe access 3720 // {instance} 3721 // | 3722 // CheckCastPP (raw) 3723 // top | 3724 // \ | 3725 // AddP ( base == top ) 3726 // 3727 Node *base = addp->in(AddPNode::Base); 3728 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3729 base = addp->in(AddPNode::Address); 3730 while (base->is_AddP()) { 3731 // Case #6 (unsafe access) may have several chained AddP nodes. 3732 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3733 base = base->in(AddPNode::Address); 3734 } 3735 if (base->Opcode() == Op_CheckCastPP && 3736 base->bottom_type()->isa_rawptr() && 3737 _igvn->type(base->in(1))->isa_oopptr()) { 3738 base = base->in(1); // Case #9 3739 } else { 3740 Node* uncast_base = base->uncast(); 3741 int opcode = uncast_base->Opcode(); 3742 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3743 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3744 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3745 is_captured_store_address(addp), "sanity"); 3746 } 3747 } 3748 return base; 3749 } 3750 3751 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3752 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3753 Node* addp2 = addp->raw_out(0); 3754 if (addp->outcnt() == 1 && addp2->is_AddP() && 3755 addp2->in(AddPNode::Base) == n && 3756 addp2->in(AddPNode::Address) == addp) { 3757 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3758 // 3759 // Find array's offset to push it on worklist first and 3760 // as result process an array's element offset first (pushed second) 3761 // to avoid CastPP for the array's offset. 3762 // Otherwise the inserted CastPP (LocalVar) will point to what 3763 // the AddP (Field) points to. Which would be wrong since 3764 // the algorithm expects the CastPP has the same point as 3765 // as AddP's base CheckCastPP (LocalVar). 3766 // 3767 // ArrayAllocation 3768 // | 3769 // CheckCastPP 3770 // | 3771 // memProj (from ArrayAllocation CheckCastPP) 3772 // | || 3773 // | || Int (element index) 3774 // | || | ConI (log(element size)) 3775 // | || | / 3776 // | || LShift 3777 // | || / 3778 // | AddP (array's element offset) 3779 // | | 3780 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3781 // | / / 3782 // AddP (array's offset) 3783 // | 3784 // Load/Store (memory operation on array's element) 3785 // 3786 return addp2; 3787 } 3788 return nullptr; 3789 } 3790 3791 // 3792 // Adjust the type and inputs of an AddP which computes the 3793 // address of a field of an instance 3794 // 3795 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3796 PhaseGVN* igvn = _igvn; 3797 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3798 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3799 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3800 if (t == nullptr) { 3801 // We are computing a raw address for a store captured by an Initialize 3802 // compute an appropriate address type (cases #3 and #5). 3803 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3804 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3805 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3806 assert(offs != Type::OffsetBot, "offset must be a constant"); 3807 t = base_t->add_offset(offs)->is_oopptr(); 3808 } 3809 int inst_id = base_t->instance_id(); 3810 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3811 "old type must be non-instance or match new type"); 3812 3813 // The type 't' could be subclass of 'base_t'. 3814 // As result t->offset() could be large then base_t's size and it will 3815 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3816 // constructor verifies correctness of the offset. 3817 // 3818 // It could happened on subclass's branch (from the type profiling 3819 // inlining) which was not eliminated during parsing since the exactness 3820 // of the allocation type was not propagated to the subclass type check. 3821 // 3822 // Or the type 't' could be not related to 'base_t' at all. 3823 // It could happened when CHA type is different from MDO type on a dead path 3824 // (for example, from instanceof check) which is not collapsed during parsing. 3825 // 3826 // Do nothing for such AddP node and don't process its users since 3827 // this code branch will go away. 3828 // 3829 if (!t->is_known_instance() && 3830 !base_t->maybe_java_subtype_of(t)) { 3831 return false; // bail out 3832 } 3833 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 3834 // Do NOT remove the next line: ensure a new alias index is allocated 3835 // for the instance type. Note: C++ will not remove it since the call 3836 // has side effect. 3837 int alias_idx = _compile->get_alias_index(tinst); 3838 igvn->set_type(addp, tinst); 3839 // record the allocation in the node map 3840 set_map(addp, get_map(base->_idx)); 3841 // Set addp's Base and Address to 'base'. 3842 Node *abase = addp->in(AddPNode::Base); 3843 Node *adr = addp->in(AddPNode::Address); 3844 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3845 adr->in(0)->_idx == (uint)inst_id) { 3846 // Skip AddP cases #3 and #5. 3847 } else { 3848 assert(!abase->is_top(), "sanity"); // AddP case #3 3849 if (abase != base) { 3850 igvn->hash_delete(addp); 3851 addp->set_req(AddPNode::Base, base); 3852 if (abase == adr) { 3853 addp->set_req(AddPNode::Address, base); 3854 } else { 3855 // AddP case #4 (adr is array's element offset AddP node) 3856 #ifdef ASSERT 3857 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3858 assert(adr->is_AddP() && atype != nullptr && 3859 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3860 #endif 3861 } 3862 igvn->hash_insert(addp); 3863 } 3864 } 3865 // Put on IGVN worklist since at least addp's type was changed above. 3866 record_for_optimizer(addp); 3867 return true; 3868 } 3869 3870 // 3871 // Create a new version of orig_phi if necessary. Returns either the newly 3872 // created phi or an existing phi. Sets create_new to indicate whether a new 3873 // phi was created. Cache the last newly created phi in the node map. 3874 // 3875 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3876 Compile *C = _compile; 3877 PhaseGVN* igvn = _igvn; 3878 new_created = false; 3879 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3880 // nothing to do if orig_phi is bottom memory or matches alias_idx 3881 if (phi_alias_idx == alias_idx) { 3882 return orig_phi; 3883 } 3884 // Have we recently created a Phi for this alias index? 3885 PhiNode *result = get_map_phi(orig_phi->_idx); 3886 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3887 return result; 3888 } 3889 // Previous check may fail when the same wide memory Phi was split into Phis 3890 // for different memory slices. Search all Phis for this region. 3891 if (result != nullptr) { 3892 Node* region = orig_phi->in(0); 3893 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3894 Node* phi = region->fast_out(i); 3895 if (phi->is_Phi() && 3896 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3897 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3898 return phi->as_Phi(); 3899 } 3900 } 3901 } 3902 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3903 if (C->do_escape_analysis() == true && !C->failing()) { 3904 // Retry compilation without escape analysis. 3905 // If this is the first failure, the sentinel string will "stick" 3906 // to the Compile object, and the C2Compiler will see it and retry. 3907 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3908 } 3909 return nullptr; 3910 } 3911 orig_phi_worklist.append_if_missing(orig_phi); 3912 const TypePtr *atype = C->get_adr_type(alias_idx); 3913 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3914 C->copy_node_notes_to(result, orig_phi); 3915 igvn->set_type(result, result->bottom_type()); 3916 record_for_optimizer(result); 3917 set_map(orig_phi, result); 3918 new_created = true; 3919 return result; 3920 } 3921 3922 // 3923 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3924 // specified alias index. 3925 // 3926 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) { 3927 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3928 Compile *C = _compile; 3929 PhaseGVN* igvn = _igvn; 3930 bool new_phi_created; 3931 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3932 if (!new_phi_created) { 3933 return result; 3934 } 3935 GrowableArray<PhiNode *> phi_list; 3936 GrowableArray<uint> cur_input; 3937 PhiNode *phi = orig_phi; 3938 uint idx = 1; 3939 bool finished = false; 3940 while(!finished) { 3941 while (idx < phi->req()) { 3942 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1); 3943 if (mem != nullptr && mem->is_Phi()) { 3944 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3945 if (new_phi_created) { 3946 // found an phi for which we created a new split, push current one on worklist and begin 3947 // processing new one 3948 phi_list.push(phi); 3949 cur_input.push(idx); 3950 phi = mem->as_Phi(); 3951 result = newphi; 3952 idx = 1; 3953 continue; 3954 } else { 3955 mem = newphi; 3956 } 3957 } 3958 if (C->failing()) { 3959 return nullptr; 3960 } 3961 result->set_req(idx++, mem); 3962 } 3963 #ifdef ASSERT 3964 // verify that the new Phi has an input for each input of the original 3965 assert( phi->req() == result->req(), "must have same number of inputs."); 3966 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 3967 #endif 3968 // Check if all new phi's inputs have specified alias index. 3969 // Otherwise use old phi. 3970 for (uint i = 1; i < phi->req(); i++) { 3971 Node* in = result->in(i); 3972 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 3973 } 3974 // we have finished processing a Phi, see if there are any more to do 3975 finished = (phi_list.length() == 0 ); 3976 if (!finished) { 3977 phi = phi_list.pop(); 3978 idx = cur_input.pop(); 3979 PhiNode *prev_result = get_map_phi(phi->_idx); 3980 prev_result->set_req(idx++, result); 3981 result = prev_result; 3982 } 3983 } 3984 return result; 3985 } 3986 3987 // 3988 // The next methods are derived from methods in MemNode. 3989 // 3990 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 3991 Node *mem = mmem; 3992 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 3993 // means an array I have not precisely typed yet. Do not do any 3994 // alias stuff with it any time soon. 3995 if (toop->base() != Type::AnyPtr && 3996 !(toop->isa_instptr() && 3997 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 3998 toop->offset() == Type::OffsetBot)) { 3999 mem = mmem->memory_at(alias_idx); 4000 // Update input if it is progress over what we have now 4001 } 4002 return mem; 4003 } 4004 4005 // 4006 // Move memory users to their memory slices. 4007 // 4008 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 4009 Compile* C = _compile; 4010 PhaseGVN* igvn = _igvn; 4011 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 4012 assert(tp != nullptr, "ptr type"); 4013 int alias_idx = C->get_alias_index(tp); 4014 int general_idx = C->get_general_index(alias_idx); 4015 4016 // Move users first 4017 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4018 Node* use = n->fast_out(i); 4019 if (use->is_MergeMem()) { 4020 MergeMemNode* mmem = use->as_MergeMem(); 4021 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 4022 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 4023 continue; // Nothing to do 4024 } 4025 // Replace previous general reference to mem node. 4026 uint orig_uniq = C->unique(); 4027 Node* m = find_inst_mem(n, general_idx, orig_phis); 4028 assert(orig_uniq == C->unique(), "no new nodes"); 4029 mmem->set_memory_at(general_idx, m); 4030 --imax; 4031 --i; 4032 } else if (use->is_MemBar()) { 4033 assert(!use->is_Initialize(), "initializing stores should not be moved"); 4034 if (use->req() > MemBarNode::Precedent && 4035 use->in(MemBarNode::Precedent) == n) { 4036 // Don't move related membars. 4037 record_for_optimizer(use); 4038 continue; 4039 } 4040 tp = use->as_MemBar()->adr_type()->isa_ptr(); 4041 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 4042 alias_idx == general_idx) { 4043 continue; // Nothing to do 4044 } 4045 // Move to general memory slice. 4046 uint orig_uniq = C->unique(); 4047 Node* m = find_inst_mem(n, general_idx, orig_phis); 4048 assert(orig_uniq == C->unique(), "no new nodes"); 4049 igvn->hash_delete(use); 4050 imax -= use->replace_edge(n, m, igvn); 4051 igvn->hash_insert(use); 4052 record_for_optimizer(use); 4053 --i; 4054 #ifdef ASSERT 4055 } else if (use->is_Mem()) { 4056 // Memory nodes should have new memory input. 4057 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 4058 assert(tp != nullptr, "ptr type"); 4059 int idx = C->get_alias_index(tp); 4060 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 4061 "Following memory nodes should have new memory input or be on the same memory slice"); 4062 } else if (use->is_Phi()) { 4063 // Phi nodes should be split and moved already. 4064 tp = use->as_Phi()->adr_type()->isa_ptr(); 4065 assert(tp != nullptr, "ptr type"); 4066 int idx = C->get_alias_index(tp); 4067 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 4068 } else { 4069 use->dump(); 4070 assert(false, "should not be here"); 4071 #endif 4072 } 4073 } 4074 } 4075 4076 // 4077 // Search memory chain of "mem" to find a MemNode whose address 4078 // is the specified alias index. 4079 // 4080 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000 4081 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, uint rec_depth) { 4082 if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) { 4083 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4084 return nullptr; 4085 } 4086 if (orig_mem == nullptr) { 4087 return orig_mem; 4088 } 4089 Compile* C = _compile; 4090 PhaseGVN* igvn = _igvn; 4091 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 4092 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 4093 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 4094 Node *prev = nullptr; 4095 Node *result = orig_mem; 4096 while (prev != result) { 4097 prev = result; 4098 if (result == start_mem) { 4099 break; // hit one of our sentinels 4100 } 4101 if (result->is_Mem()) { 4102 const Type *at = igvn->type(result->in(MemNode::Address)); 4103 if (at == Type::TOP) { 4104 break; // Dead 4105 } 4106 assert (at->isa_ptr() != nullptr, "pointer type required."); 4107 int idx = C->get_alias_index(at->is_ptr()); 4108 if (idx == alias_idx) { 4109 break; // Found 4110 } 4111 if (!is_instance && (at->isa_oopptr() == nullptr || 4112 !at->is_oopptr()->is_known_instance())) { 4113 break; // Do not skip store to general memory slice. 4114 } 4115 result = result->in(MemNode::Memory); 4116 } 4117 if (!is_instance) { 4118 continue; // don't search further for non-instance types 4119 } 4120 // skip over a call which does not affect this memory slice 4121 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 4122 Node *proj_in = result->in(0); 4123 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 4124 break; // hit one of our sentinels 4125 } else if (proj_in->is_Call()) { 4126 // ArrayCopy node processed here as well 4127 CallNode *call = proj_in->as_Call(); 4128 if (!call->may_modify(toop, igvn)) { 4129 result = call->in(TypeFunc::Memory); 4130 } 4131 } else if (proj_in->is_Initialize()) { 4132 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 4133 // Stop if this is the initialization for the object instance which 4134 // which contains this memory slice, otherwise skip over it. 4135 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 4136 result = proj_in->in(TypeFunc::Memory); 4137 } 4138 } else if (proj_in->is_MemBar()) { 4139 // Check if there is an array copy for a clone 4140 // Step over GC barrier when ReduceInitialCardMarks is disabled 4141 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4142 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 4143 4144 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 4145 // Stop if it is a clone 4146 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 4147 if (ac->may_modify(toop, igvn)) { 4148 break; 4149 } 4150 } 4151 result = proj_in->in(TypeFunc::Memory); 4152 } 4153 } else if (result->is_MergeMem()) { 4154 MergeMemNode *mmem = result->as_MergeMem(); 4155 result = step_through_mergemem(mmem, alias_idx, toop); 4156 if (result == mmem->base_memory()) { 4157 // Didn't find instance memory, search through general slice recursively. 4158 result = mmem->memory_at(C->get_general_index(alias_idx)); 4159 result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1); 4160 if (C->failing()) { 4161 return nullptr; 4162 } 4163 mmem->set_memory_at(alias_idx, result); 4164 } 4165 } else if (result->is_Phi() && 4166 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 4167 Node *un = result->as_Phi()->unique_input(igvn); 4168 if (un != nullptr) { 4169 orig_phis.append_if_missing(result->as_Phi()); 4170 result = un; 4171 } else { 4172 break; 4173 } 4174 } else if (result->is_ClearArray()) { 4175 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 4176 // Can not bypass initialization of the instance 4177 // we are looking for. 4178 break; 4179 } 4180 // Otherwise skip it (the call updated 'result' value). 4181 } else if (result->Opcode() == Op_SCMemProj) { 4182 Node* mem = result->in(0); 4183 Node* adr = nullptr; 4184 if (mem->is_LoadStore()) { 4185 adr = mem->in(MemNode::Address); 4186 } else { 4187 assert(mem->Opcode() == Op_EncodeISOArray || 4188 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 4189 adr = mem->in(3); // Memory edge corresponds to destination array 4190 } 4191 const Type *at = igvn->type(adr); 4192 if (at != Type::TOP) { 4193 assert(at->isa_ptr() != nullptr, "pointer type required."); 4194 int idx = C->get_alias_index(at->is_ptr()); 4195 if (idx == alias_idx) { 4196 // Assert in debug mode 4197 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 4198 break; // In product mode return SCMemProj node 4199 } 4200 } 4201 result = mem->in(MemNode::Memory); 4202 } else if (result->Opcode() == Op_StrInflatedCopy) { 4203 Node* adr = result->in(3); // Memory edge corresponds to destination array 4204 const Type *at = igvn->type(adr); 4205 if (at != Type::TOP) { 4206 assert(at->isa_ptr() != nullptr, "pointer type required."); 4207 int idx = C->get_alias_index(at->is_ptr()); 4208 if (idx == alias_idx) { 4209 // Assert in debug mode 4210 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 4211 break; // In product mode return SCMemProj node 4212 } 4213 } 4214 result = result->in(MemNode::Memory); 4215 } 4216 } 4217 if (result->is_Phi()) { 4218 PhiNode *mphi = result->as_Phi(); 4219 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 4220 const TypePtr *t = mphi->adr_type(); 4221 if (!is_instance) { 4222 // Push all non-instance Phis on the orig_phis worklist to update inputs 4223 // during Phase 4 if needed. 4224 orig_phis.append_if_missing(mphi); 4225 } else if (C->get_alias_index(t) != alias_idx) { 4226 // Create a new Phi with the specified alias index type. 4227 result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1); 4228 } 4229 } 4230 // the result is either MemNode, PhiNode, InitializeNode. 4231 return result; 4232 } 4233 4234 // 4235 // Convert the types of non-escaped object to instance types where possible, 4236 // propagate the new type information through the graph, and update memory 4237 // edges and MergeMem inputs to reflect the new type. 4238 // 4239 // We start with allocations (and calls which may be allocations) on alloc_worklist. 4240 // The processing is done in 4 phases: 4241 // 4242 // Phase 1: Process possible allocations from alloc_worklist. Create instance 4243 // types for the CheckCastPP for allocations where possible. 4244 // Propagate the new types through users as follows: 4245 // casts and Phi: push users on alloc_worklist 4246 // AddP: cast Base and Address inputs to the instance type 4247 // push any AddP users on alloc_worklist and push any memnode 4248 // users onto memnode_worklist. 4249 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4250 // search the Memory chain for a store with the appropriate type 4251 // address type. If a Phi is found, create a new version with 4252 // the appropriate memory slices from each of the Phi inputs. 4253 // For stores, process the users as follows: 4254 // MemNode: push on memnode_worklist 4255 // MergeMem: push on mergemem_worklist 4256 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 4257 // moving the first node encountered of each instance type to the 4258 // the input corresponding to its alias index. 4259 // appropriate memory slice. 4260 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 4261 // 4262 // In the following example, the CheckCastPP nodes are the cast of allocation 4263 // results and the allocation of node 29 is non-escaped and eligible to be an 4264 // instance type. 4265 // 4266 // We start with: 4267 // 4268 // 7 Parm #memory 4269 // 10 ConI "12" 4270 // 19 CheckCastPP "Foo" 4271 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4272 // 29 CheckCastPP "Foo" 4273 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 4274 // 4275 // 40 StoreP 25 7 20 ... alias_index=4 4276 // 50 StoreP 35 40 30 ... alias_index=4 4277 // 60 StoreP 45 50 20 ... alias_index=4 4278 // 70 LoadP _ 60 30 ... alias_index=4 4279 // 80 Phi 75 50 60 Memory alias_index=4 4280 // 90 LoadP _ 80 30 ... alias_index=4 4281 // 100 LoadP _ 80 20 ... alias_index=4 4282 // 4283 // 4284 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 4285 // and creating a new alias index for node 30. This gives: 4286 // 4287 // 7 Parm #memory 4288 // 10 ConI "12" 4289 // 19 CheckCastPP "Foo" 4290 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4291 // 29 CheckCastPP "Foo" iid=24 4292 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4293 // 4294 // 40 StoreP 25 7 20 ... alias_index=4 4295 // 50 StoreP 35 40 30 ... alias_index=6 4296 // 60 StoreP 45 50 20 ... alias_index=4 4297 // 70 LoadP _ 60 30 ... alias_index=6 4298 // 80 Phi 75 50 60 Memory alias_index=4 4299 // 90 LoadP _ 80 30 ... alias_index=6 4300 // 100 LoadP _ 80 20 ... alias_index=4 4301 // 4302 // In phase 2, new memory inputs are computed for the loads and stores, 4303 // And a new version of the phi is created. In phase 4, the inputs to 4304 // node 80 are updated and then the memory nodes are updated with the 4305 // values computed in phase 2. This results in: 4306 // 4307 // 7 Parm #memory 4308 // 10 ConI "12" 4309 // 19 CheckCastPP "Foo" 4310 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4311 // 29 CheckCastPP "Foo" iid=24 4312 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4313 // 4314 // 40 StoreP 25 7 20 ... alias_index=4 4315 // 50 StoreP 35 7 30 ... alias_index=6 4316 // 60 StoreP 45 40 20 ... alias_index=4 4317 // 70 LoadP _ 50 30 ... alias_index=6 4318 // 80 Phi 75 40 60 Memory alias_index=4 4319 // 120 Phi 75 50 50 Memory alias_index=6 4320 // 90 LoadP _ 120 30 ... alias_index=6 4321 // 100 LoadP _ 80 20 ... alias_index=4 4322 // 4323 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 4324 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 4325 GrowableArray<MergeMemNode*> &mergemem_worklist, 4326 Unique_Node_List &reducible_merges) { 4327 DEBUG_ONLY(Unique_Node_List reduced_merges;) 4328 GrowableArray<Node *> memnode_worklist; 4329 GrowableArray<PhiNode *> orig_phis; 4330 PhaseIterGVN *igvn = _igvn; 4331 uint new_index_start = (uint) _compile->num_alias_types(); 4332 VectorSet visited; 4333 ideal_nodes.clear(); // Reset for use with set_map/get_map. 4334 uint unique_old = _compile->unique(); 4335 4336 // Phase 1: Process possible allocations from alloc_worklist. 4337 // Create instance types for the CheckCastPP for allocations where possible. 4338 // 4339 // (Note: don't forget to change the order of the second AddP node on 4340 // the alloc_worklist if the order of the worklist processing is changed, 4341 // see the comment in find_second_addp().) 4342 // 4343 while (alloc_worklist.length() != 0) { 4344 Node *n = alloc_worklist.pop(); 4345 uint ni = n->_idx; 4346 if (n->is_Call()) { 4347 CallNode *alloc = n->as_Call(); 4348 // copy escape information to call node 4349 PointsToNode* ptn = ptnode_adr(alloc->_idx); 4350 PointsToNode::EscapeState es = ptn->escape_state(); 4351 // We have an allocation or call which returns a Java object, 4352 // see if it is non-escaped. 4353 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 4354 continue; 4355 } 4356 // Find CheckCastPP for the allocate or for the return value of a call 4357 n = alloc->result_cast(); 4358 if (n == nullptr) { // No uses except Initialize node 4359 if (alloc->is_Allocate()) { 4360 // Set the scalar_replaceable flag for allocation 4361 // so it could be eliminated if it has no uses. 4362 alloc->as_Allocate()->_is_scalar_replaceable = true; 4363 } 4364 continue; 4365 } 4366 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 4367 // we could reach here for allocate case if one init is associated with many allocs. 4368 if (alloc->is_Allocate()) { 4369 alloc->as_Allocate()->_is_scalar_replaceable = false; 4370 } 4371 continue; 4372 } 4373 4374 // The inline code for Object.clone() casts the allocation result to 4375 // java.lang.Object and then to the actual type of the allocated 4376 // object. Detect this case and use the second cast. 4377 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 4378 // the allocation result is cast to java.lang.Object and then 4379 // to the actual Array type. 4380 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 4381 && (alloc->is_AllocateArray() || 4382 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 4383 Node *cast2 = nullptr; 4384 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4385 Node *use = n->fast_out(i); 4386 if (use->is_CheckCastPP()) { 4387 cast2 = use; 4388 break; 4389 } 4390 } 4391 if (cast2 != nullptr) { 4392 n = cast2; 4393 } else { 4394 // Non-scalar replaceable if the allocation type is unknown statically 4395 // (reflection allocation), the object can't be restored during 4396 // deoptimization without precise type. 4397 continue; 4398 } 4399 } 4400 4401 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 4402 if (t == nullptr) { 4403 continue; // not a TypeOopPtr 4404 } 4405 if (!t->klass_is_exact()) { 4406 continue; // not an unique type 4407 } 4408 if (alloc->is_Allocate()) { 4409 // Set the scalar_replaceable flag for allocation 4410 // so it could be eliminated. 4411 alloc->as_Allocate()->_is_scalar_replaceable = true; 4412 } 4413 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 4414 // in order for an object to be scalar-replaceable, it must be: 4415 // - a direct allocation (not a call returning an object) 4416 // - non-escaping 4417 // - eligible to be a unique type 4418 // - not determined to be ineligible by escape analysis 4419 set_map(alloc, n); 4420 set_map(n, alloc); 4421 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 4422 igvn->hash_delete(n); 4423 igvn->set_type(n, tinst); 4424 n->raise_bottom_type(tinst); 4425 igvn->hash_insert(n); 4426 record_for_optimizer(n); 4427 // Allocate an alias index for the header fields. Accesses to 4428 // the header emitted during macro expansion wouldn't have 4429 // correct memory state otherwise. 4430 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 4431 _compile->get_alias_index(tinst->add_offset(Type::klass_offset())); 4432 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 4433 4434 // First, put on the worklist all Field edges from Connection Graph 4435 // which is more accurate than putting immediate users from Ideal Graph. 4436 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 4437 PointsToNode* tgt = e.get(); 4438 if (tgt->is_Arraycopy()) { 4439 continue; 4440 } 4441 Node* use = tgt->ideal_node(); 4442 assert(tgt->is_Field() && use->is_AddP(), 4443 "only AddP nodes are Field edges in CG"); 4444 if (use->outcnt() > 0) { // Don't process dead nodes 4445 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 4446 if (addp2 != nullptr) { 4447 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4448 alloc_worklist.append_if_missing(addp2); 4449 } 4450 alloc_worklist.append_if_missing(use); 4451 } 4452 } 4453 4454 // An allocation may have an Initialize which has raw stores. Scan 4455 // the users of the raw allocation result and push AddP users 4456 // on alloc_worklist. 4457 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 4458 assert (raw_result != nullptr, "must have an allocation result"); 4459 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 4460 Node *use = raw_result->fast_out(i); 4461 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 4462 Node* addp2 = find_second_addp(use, raw_result); 4463 if (addp2 != nullptr) { 4464 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4465 alloc_worklist.append_if_missing(addp2); 4466 } 4467 alloc_worklist.append_if_missing(use); 4468 } else if (use->is_MemBar()) { 4469 memnode_worklist.append_if_missing(use); 4470 } 4471 } 4472 } 4473 } else if (n->is_AddP()) { 4474 if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) { 4475 // This AddP will go away when we reduce the the Phi 4476 continue; 4477 } 4478 Node* addp_base = get_addp_base(n); 4479 JavaObjectNode* jobj = unique_java_object(addp_base); 4480 if (jobj == nullptr || jobj == phantom_obj) { 4481 #ifdef ASSERT 4482 ptnode_adr(get_addp_base(n)->_idx)->dump(); 4483 ptnode_adr(n->_idx)->dump(); 4484 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4485 #endif 4486 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4487 return; 4488 } 4489 Node *base = get_map(jobj->idx()); // CheckCastPP node 4490 if (!split_AddP(n, base)) continue; // wrong type from dead path 4491 } else if (n->is_Phi() || 4492 n->is_CheckCastPP() || 4493 n->is_EncodeP() || 4494 n->is_DecodeN() || 4495 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 4496 if (visited.test_set(n->_idx)) { 4497 assert(n->is_Phi(), "loops only through Phi's"); 4498 continue; // already processed 4499 } 4500 // Reducible Phi's will be removed from the graph after split_unique_types 4501 // finishes. For now we just try to split out the SR inputs of the merge. 4502 Node* parent = n->in(1); 4503 if (reducible_merges.member(n)) { 4504 reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist); 4505 #ifdef ASSERT 4506 if (VerifyReduceAllocationMerges) { 4507 reduced_merges.push(n); 4508 } 4509 #endif 4510 continue; 4511 } else if (reducible_merges.member(parent)) { 4512 // 'n' is an user of a reducible merge (a Phi). It will be simplified as 4513 // part of reduce_merge. 4514 continue; 4515 } 4516 JavaObjectNode* jobj = unique_java_object(n); 4517 if (jobj == nullptr || jobj == phantom_obj) { 4518 #ifdef ASSERT 4519 ptnode_adr(n->_idx)->dump(); 4520 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4521 #endif 4522 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4523 return; 4524 } else { 4525 Node *val = get_map(jobj->idx()); // CheckCastPP node 4526 TypeNode *tn = n->as_Type(); 4527 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 4528 assert(tinst != nullptr && tinst->is_known_instance() && 4529 tinst->instance_id() == jobj->idx() , "instance type expected."); 4530 4531 const Type *tn_type = igvn->type(tn); 4532 const TypeOopPtr *tn_t; 4533 if (tn_type->isa_narrowoop()) { 4534 tn_t = tn_type->make_ptr()->isa_oopptr(); 4535 } else { 4536 tn_t = tn_type->isa_oopptr(); 4537 } 4538 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 4539 if (tn_type->isa_narrowoop()) { 4540 tn_type = tinst->make_narrowoop(); 4541 } else { 4542 tn_type = tinst; 4543 } 4544 igvn->hash_delete(tn); 4545 igvn->set_type(tn, tn_type); 4546 tn->set_type(tn_type); 4547 igvn->hash_insert(tn); 4548 record_for_optimizer(n); 4549 } else { 4550 assert(tn_type == TypePtr::NULL_PTR || 4551 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)), 4552 "unexpected type"); 4553 continue; // Skip dead path with different type 4554 } 4555 } 4556 } else { 4557 debug_only(n->dump();) 4558 assert(false, "EA: unexpected node"); 4559 continue; 4560 } 4561 // push allocation's users on appropriate worklist 4562 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4563 Node *use = n->fast_out(i); 4564 if(use->is_Mem() && use->in(MemNode::Address) == n) { 4565 // Load/store to instance's field 4566 memnode_worklist.append_if_missing(use); 4567 } else if (use->is_MemBar()) { 4568 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4569 memnode_worklist.append_if_missing(use); 4570 } 4571 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 4572 Node* addp2 = find_second_addp(use, n); 4573 if (addp2 != nullptr) { 4574 alloc_worklist.append_if_missing(addp2); 4575 } 4576 alloc_worklist.append_if_missing(use); 4577 } else if (use->is_Phi() || 4578 use->is_CheckCastPP() || 4579 use->is_EncodeNarrowPtr() || 4580 use->is_DecodeNarrowPtr() || 4581 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 4582 alloc_worklist.append_if_missing(use); 4583 #ifdef ASSERT 4584 } else if (use->is_Mem()) { 4585 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 4586 } else if (use->is_MergeMem()) { 4587 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4588 } else if (use->is_SafePoint()) { 4589 // Look for MergeMem nodes for calls which reference unique allocation 4590 // (through CheckCastPP nodes) even for debug info. 4591 Node* m = use->in(TypeFunc::Memory); 4592 if (m->is_MergeMem()) { 4593 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4594 } 4595 } else if (use->Opcode() == Op_EncodeISOArray) { 4596 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4597 // EncodeISOArray overwrites destination array 4598 memnode_worklist.append_if_missing(use); 4599 } 4600 } else { 4601 uint op = use->Opcode(); 4602 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 4603 (use->in(MemNode::Memory) == n)) { 4604 // They overwrite memory edge corresponding to destination array, 4605 memnode_worklist.append_if_missing(use); 4606 } else if (!(op == Op_CmpP || op == Op_Conv2B || 4607 op == Op_CastP2X || 4608 op == Op_FastLock || op == Op_AryEq || 4609 op == Op_StrComp || op == Op_CountPositives || 4610 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 4611 op == Op_StrEquals || op == Op_VectorizedHashCode || 4612 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 4613 op == Op_SubTypeCheck || 4614 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 4615 n->dump(); 4616 use->dump(); 4617 assert(false, "EA: missing allocation reference path"); 4618 } 4619 #endif 4620 } 4621 } 4622 4623 } 4624 4625 #ifdef ASSERT 4626 if (VerifyReduceAllocationMerges) { 4627 for (uint i = 0; i < reducible_merges.size(); i++) { 4628 Node* phi = reducible_merges.at(i); 4629 4630 if (!reduced_merges.member(phi)) { 4631 phi->dump(2); 4632 phi->dump(-2); 4633 assert(false, "This reducible merge wasn't reduced."); 4634 } 4635 4636 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts. 4637 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 4638 Node* use = phi->fast_out(j); 4639 if (!use->is_SafePoint() && !use->is_CastPP()) { 4640 phi->dump(2); 4641 phi->dump(-2); 4642 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 4643 } 4644 } 4645 } 4646 } 4647 #endif 4648 4649 // Go over all ArrayCopy nodes and if one of the inputs has a unique 4650 // type, record it in the ArrayCopy node so we know what memory this 4651 // node uses/modified. 4652 for (int next = 0; next < arraycopy_worklist.length(); next++) { 4653 ArrayCopyNode* ac = arraycopy_worklist.at(next); 4654 Node* dest = ac->in(ArrayCopyNode::Dest); 4655 if (dest->is_AddP()) { 4656 dest = get_addp_base(dest); 4657 } 4658 JavaObjectNode* jobj = unique_java_object(dest); 4659 if (jobj != nullptr) { 4660 Node *base = get_map(jobj->idx()); 4661 if (base != nullptr) { 4662 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4663 ac->_dest_type = base_t; 4664 } 4665 } 4666 Node* src = ac->in(ArrayCopyNode::Src); 4667 if (src->is_AddP()) { 4668 src = get_addp_base(src); 4669 } 4670 jobj = unique_java_object(src); 4671 if (jobj != nullptr) { 4672 Node* base = get_map(jobj->idx()); 4673 if (base != nullptr) { 4674 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4675 ac->_src_type = base_t; 4676 } 4677 } 4678 } 4679 4680 // New alias types were created in split_AddP(). 4681 uint new_index_end = (uint) _compile->num_alias_types(); 4682 4683 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4684 // compute new values for Memory inputs (the Memory inputs are not 4685 // actually updated until phase 4.) 4686 if (memnode_worklist.length() == 0) 4687 return; // nothing to do 4688 while (memnode_worklist.length() != 0) { 4689 Node *n = memnode_worklist.pop(); 4690 if (visited.test_set(n->_idx)) { 4691 continue; 4692 } 4693 if (n->is_Phi() || n->is_ClearArray()) { 4694 // we don't need to do anything, but the users must be pushed 4695 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4696 // we don't need to do anything, but the users must be pushed 4697 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4698 if (n == nullptr) { 4699 continue; 4700 } 4701 } else if (n->is_CallLeaf()) { 4702 // Runtime calls with narrow memory input (no MergeMem node) 4703 // get the memory projection 4704 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory); 4705 if (n == nullptr) { 4706 continue; 4707 } 4708 } else if (n->Opcode() == Op_StrCompressedCopy || 4709 n->Opcode() == Op_EncodeISOArray) { 4710 // get the memory projection 4711 n = n->find_out_with(Op_SCMemProj); 4712 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4713 } else { 4714 assert(n->is_Mem(), "memory node required."); 4715 Node *addr = n->in(MemNode::Address); 4716 const Type *addr_t = igvn->type(addr); 4717 if (addr_t == Type::TOP) { 4718 continue; 4719 } 4720 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4721 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4722 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4723 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4724 if (_compile->failing()) { 4725 return; 4726 } 4727 if (mem != n->in(MemNode::Memory)) { 4728 // We delay the memory edge update since we need old one in 4729 // MergeMem code below when instances memory slices are separated. 4730 set_map(n, mem); 4731 } 4732 if (n->is_Load()) { 4733 continue; // don't push users 4734 } else if (n->is_LoadStore()) { 4735 // get the memory projection 4736 n = n->find_out_with(Op_SCMemProj); 4737 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4738 } 4739 } 4740 // push user on appropriate worklist 4741 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4742 Node *use = n->fast_out(i); 4743 if (use->is_Phi() || use->is_ClearArray()) { 4744 memnode_worklist.append_if_missing(use); 4745 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4746 memnode_worklist.append_if_missing(use); 4747 } else if (use->is_MemBar() || use->is_CallLeaf()) { 4748 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4749 memnode_worklist.append_if_missing(use); 4750 } 4751 #ifdef ASSERT 4752 } else if(use->is_Mem()) { 4753 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4754 } else if (use->is_MergeMem()) { 4755 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4756 } else if (use->Opcode() == Op_EncodeISOArray) { 4757 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4758 // EncodeISOArray overwrites destination array 4759 memnode_worklist.append_if_missing(use); 4760 } 4761 } else { 4762 uint op = use->Opcode(); 4763 if ((use->in(MemNode::Memory) == n) && 4764 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4765 // They overwrite memory edge corresponding to destination array, 4766 memnode_worklist.append_if_missing(use); 4767 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4768 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4769 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4770 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 4771 n->dump(); 4772 use->dump(); 4773 assert(false, "EA: missing memory path"); 4774 } 4775 #endif 4776 } 4777 } 4778 } 4779 4780 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4781 // Walk each memory slice moving the first node encountered of each 4782 // instance type to the input corresponding to its alias index. 4783 uint length = mergemem_worklist.length(); 4784 for( uint next = 0; next < length; ++next ) { 4785 MergeMemNode* nmm = mergemem_worklist.at(next); 4786 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4787 // Note: we don't want to use MergeMemStream here because we only want to 4788 // scan inputs which exist at the start, not ones we add during processing. 4789 // Note 2: MergeMem may already contains instance memory slices added 4790 // during find_inst_mem() call when memory nodes were processed above. 4791 igvn->hash_delete(nmm); 4792 uint nslices = MIN2(nmm->req(), new_index_start); 4793 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4794 Node* mem = nmm->in(i); 4795 Node* cur = nullptr; 4796 if (mem == nullptr || mem->is_top()) { 4797 continue; 4798 } 4799 // First, update mergemem by moving memory nodes to corresponding slices 4800 // if their type became more precise since this mergemem was created. 4801 while (mem->is_Mem()) { 4802 const Type *at = igvn->type(mem->in(MemNode::Address)); 4803 if (at != Type::TOP) { 4804 assert (at->isa_ptr() != nullptr, "pointer type required."); 4805 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4806 if (idx == i) { 4807 if (cur == nullptr) { 4808 cur = mem; 4809 } 4810 } else { 4811 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4812 nmm->set_memory_at(idx, mem); 4813 } 4814 } 4815 } 4816 mem = mem->in(MemNode::Memory); 4817 } 4818 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4819 // Find any instance of the current type if we haven't encountered 4820 // already a memory slice of the instance along the memory chain. 4821 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4822 if((uint)_compile->get_general_index(ni) == i) { 4823 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4824 if (nmm->is_empty_memory(m)) { 4825 Node* result = find_inst_mem(mem, ni, orig_phis); 4826 if (_compile->failing()) { 4827 return; 4828 } 4829 nmm->set_memory_at(ni, result); 4830 } 4831 } 4832 } 4833 } 4834 // Find the rest of instances values 4835 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4836 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4837 Node* result = step_through_mergemem(nmm, ni, tinst); 4838 if (result == nmm->base_memory()) { 4839 // Didn't find instance memory, search through general slice recursively. 4840 result = nmm->memory_at(_compile->get_general_index(ni)); 4841 result = find_inst_mem(result, ni, orig_phis); 4842 if (_compile->failing()) { 4843 return; 4844 } 4845 nmm->set_memory_at(ni, result); 4846 } 4847 } 4848 4849 // If we have crossed the 3/4 point of max node limit it's too risky 4850 // to continue with EA/SR because we might hit the max node limit. 4851 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) { 4852 if (_compile->do_reduce_allocation_merges()) { 4853 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 4854 } else if (_invocation > 0) { 4855 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis()); 4856 } else { 4857 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 4858 } 4859 return; 4860 } 4861 4862 igvn->hash_insert(nmm); 4863 record_for_optimizer(nmm); 4864 } 4865 4866 // Phase 4: Update the inputs of non-instance memory Phis and 4867 // the Memory input of memnodes 4868 // First update the inputs of any non-instance Phi's from 4869 // which we split out an instance Phi. Note we don't have 4870 // to recursively process Phi's encountered on the input memory 4871 // chains as is done in split_memory_phi() since they will 4872 // also be processed here. 4873 for (int j = 0; j < orig_phis.length(); j++) { 4874 PhiNode *phi = orig_phis.at(j); 4875 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4876 igvn->hash_delete(phi); 4877 for (uint i = 1; i < phi->req(); i++) { 4878 Node *mem = phi->in(i); 4879 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4880 if (_compile->failing()) { 4881 return; 4882 } 4883 if (mem != new_mem) { 4884 phi->set_req(i, new_mem); 4885 } 4886 } 4887 igvn->hash_insert(phi); 4888 record_for_optimizer(phi); 4889 } 4890 4891 // Update the memory inputs of MemNodes with the value we computed 4892 // in Phase 2 and move stores memory users to corresponding memory slices. 4893 // Disable memory split verification code until the fix for 6984348. 4894 // Currently it produces false negative results since it does not cover all cases. 4895 #if 0 // ifdef ASSERT 4896 visited.Reset(); 4897 Node_Stack old_mems(arena, _compile->unique() >> 2); 4898 #endif 4899 for (uint i = 0; i < ideal_nodes.size(); i++) { 4900 Node* n = ideal_nodes.at(i); 4901 Node* nmem = get_map(n->_idx); 4902 assert(nmem != nullptr, "sanity"); 4903 if (n->is_Mem()) { 4904 #if 0 // ifdef ASSERT 4905 Node* old_mem = n->in(MemNode::Memory); 4906 if (!visited.test_set(old_mem->_idx)) { 4907 old_mems.push(old_mem, old_mem->outcnt()); 4908 } 4909 #endif 4910 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4911 if (!n->is_Load()) { 4912 // Move memory users of a store first. 4913 move_inst_mem(n, orig_phis); 4914 } 4915 // Now update memory input 4916 igvn->hash_delete(n); 4917 n->set_req(MemNode::Memory, nmem); 4918 igvn->hash_insert(n); 4919 record_for_optimizer(n); 4920 } else { 4921 assert(n->is_Allocate() || n->is_CheckCastPP() || 4922 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4923 } 4924 } 4925 #if 0 // ifdef ASSERT 4926 // Verify that memory was split correctly 4927 while (old_mems.is_nonempty()) { 4928 Node* old_mem = old_mems.node(); 4929 uint old_cnt = old_mems.index(); 4930 old_mems.pop(); 4931 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4932 } 4933 #endif 4934 } 4935 4936 #ifndef PRODUCT 4937 int ConnectionGraph::_no_escape_counter = 0; 4938 int ConnectionGraph::_arg_escape_counter = 0; 4939 int ConnectionGraph::_global_escape_counter = 0; 4940 4941 static const char *node_type_names[] = { 4942 "UnknownType", 4943 "JavaObject", 4944 "LocalVar", 4945 "Field", 4946 "Arraycopy" 4947 }; 4948 4949 static const char *esc_names[] = { 4950 "UnknownEscape", 4951 "NoEscape", 4952 "ArgEscape", 4953 "GlobalEscape" 4954 }; 4955 4956 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 4957 NodeType nt = node_type(); 4958 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 4959 if (print_state) { 4960 EscapeState es = escape_state(); 4961 EscapeState fields_es = fields_escape_state(); 4962 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 4963 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 4964 out->print("NSR "); 4965 } 4966 } 4967 } 4968 4969 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 4970 dump_header(print_state, out); 4971 if (is_Field()) { 4972 FieldNode* f = (FieldNode*)this; 4973 if (f->is_oop()) { 4974 out->print("oop "); 4975 } 4976 if (f->offset() > 0) { 4977 out->print("+%d ", f->offset()); 4978 } 4979 out->print("("); 4980 for (BaseIterator i(f); i.has_next(); i.next()) { 4981 PointsToNode* b = i.get(); 4982 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 4983 } 4984 out->print(" )"); 4985 } 4986 out->print("["); 4987 for (EdgeIterator i(this); i.has_next(); i.next()) { 4988 PointsToNode* e = i.get(); 4989 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 4990 } 4991 out->print(" ["); 4992 for (UseIterator i(this); i.has_next(); i.next()) { 4993 PointsToNode* u = i.get(); 4994 bool is_base = false; 4995 if (PointsToNode::is_base_use(u)) { 4996 is_base = true; 4997 u = PointsToNode::get_use_node(u)->as_Field(); 4998 } 4999 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 5000 } 5001 out->print(" ]] "); 5002 if (_node == nullptr) { 5003 out->print("<null>%s", newline ? "\n" : ""); 5004 } else { 5005 _node->dump(newline ? "\n" : "", false, out); 5006 } 5007 } 5008 5009 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 5010 bool first = true; 5011 int ptnodes_length = ptnodes_worklist.length(); 5012 for (int i = 0; i < ptnodes_length; i++) { 5013 PointsToNode *ptn = ptnodes_worklist.at(i); 5014 if (ptn == nullptr || !ptn->is_JavaObject()) { 5015 continue; 5016 } 5017 PointsToNode::EscapeState es = ptn->escape_state(); 5018 if ((es != PointsToNode::NoEscape) && !Verbose) { 5019 continue; 5020 } 5021 Node* n = ptn->ideal_node(); 5022 if (n->is_Allocate() || (n->is_CallStaticJava() && 5023 n->as_CallStaticJava()->is_boxing_method())) { 5024 if (first) { 5025 tty->cr(); 5026 tty->print("======== Connection graph for "); 5027 _compile->method()->print_short_name(); 5028 tty->cr(); 5029 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 5030 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 5031 tty->cr(); 5032 first = false; 5033 } 5034 ptn->dump(); 5035 // Print all locals and fields which reference this allocation 5036 for (UseIterator j(ptn); j.has_next(); j.next()) { 5037 PointsToNode* use = j.get(); 5038 if (use->is_LocalVar()) { 5039 use->dump(Verbose); 5040 } else if (Verbose) { 5041 use->dump(); 5042 } 5043 } 5044 tty->cr(); 5045 } 5046 } 5047 } 5048 5049 void ConnectionGraph::print_statistics() { 5050 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 5051 } 5052 5053 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 5054 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 5055 return; 5056 } 5057 for (int next = 0; next < java_objects_worklist.length(); ++next) { 5058 JavaObjectNode* ptn = java_objects_worklist.at(next); 5059 if (ptn->ideal_node()->is_Allocate()) { 5060 if (ptn->escape_state() == PointsToNode::NoEscape) { 5061 Atomic::inc(&ConnectionGraph::_no_escape_counter); 5062 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 5063 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 5064 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 5065 Atomic::inc(&ConnectionGraph::_global_escape_counter); 5066 } else { 5067 assert(false, "Unexpected Escape State"); 5068 } 5069 } 5070 } 5071 } 5072 5073 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 5074 if (_compile->directive()->TraceEscapeAnalysisOption) { 5075 assert(ptn != nullptr, "should not be null"); 5076 assert(reason != nullptr, "should not be null"); 5077 ptn->dump_header(true); 5078 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 5079 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 5080 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 5081 } 5082 } 5083 5084 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 5085 if (_compile->directive()->TraceEscapeAnalysisOption) { 5086 stringStream ss; 5087 ss.print("propagated from: "); 5088 from->dump(true, &ss, false); 5089 return ss.as_string(); 5090 } else { 5091 return nullptr; 5092 } 5093 } 5094 5095 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 5096 if (_compile->directive()->TraceEscapeAnalysisOption) { 5097 stringStream ss; 5098 ss.print("escapes as arg to:"); 5099 call->dump("", false, &ss); 5100 return ss.as_string(); 5101 } else { 5102 return nullptr; 5103 } 5104 } 5105 5106 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 5107 if (_compile->directive()->TraceEscapeAnalysisOption) { 5108 stringStream ss; 5109 ss.print("is merged with other object: "); 5110 other->dump_header(true, &ss); 5111 return ss.as_string(); 5112 } else { 5113 return nullptr; 5114 } 5115 } 5116 5117 #endif 5118 5119 void ConnectionGraph::record_for_optimizer(Node *n) { 5120 _igvn->_worklist.push(n); 5121 _igvn->add_users_to_worklist(n); 5122 }