1 /* 2 * Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/bcEscapeAnalyzer.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "libadt/vectset.hpp" 30 #include "memory/allocation.hpp" 31 #include "memory/metaspace.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/arraycopynode.hpp" 34 #include "opto/c2compiler.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/castnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/compile.hpp" 39 #include "opto/escape.hpp" 40 #include "opto/inlinetypenode.hpp" 41 #include "opto/locknode.hpp" 42 #include "opto/macro.hpp" 43 #include "opto/movenode.hpp" 44 #include "opto/narrowptrnode.hpp" 45 #include "opto/phaseX.hpp" 46 #include "opto/rootnode.hpp" 47 #include "utilities/macros.hpp" 48 49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 50 // If ReduceAllocationMerges is enabled we might call split_through_phi during 51 // split_unique_types and that will create additional nodes that need to be 52 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 53 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 54 // the array will be reallocated. 55 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 56 _in_worklist(C->comp_arena()), 57 _next_pidx(0), 58 _collecting(true), 59 _verify(false), 60 _compile(C), 61 _igvn(igvn), 62 _invocation(invocation), 63 _build_iterations(0), 64 _build_time(0.), 65 _node_map(C->comp_arena()) { 66 // Add unknown java object. 67 add_java_object(C->top(), PointsToNode::GlobalEscape); 68 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 69 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 70 // Add ConP and ConN null oop nodes 71 Node* oop_null = igvn->zerocon(T_OBJECT); 72 assert(oop_null->_idx < nodes_size(), "should be created already"); 73 add_java_object(oop_null, PointsToNode::NoEscape); 74 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 75 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 76 if (UseCompressedOops) { 77 Node* noop_null = igvn->zerocon(T_NARROWOOP); 78 assert(noop_null->_idx < nodes_size(), "should be created already"); 79 map_ideal_node(noop_null, null_obj); 80 } 81 } 82 83 bool ConnectionGraph::has_candidates(Compile *C) { 84 // EA brings benefits only when the code has allocations and/or locks which 85 // are represented by ideal Macro nodes. 86 int cnt = C->macro_count(); 87 for (int i = 0; i < cnt; i++) { 88 Node *n = C->macro_node(i); 89 if (n->is_Allocate()) { 90 return true; 91 } 92 if (n->is_Lock()) { 93 Node* obj = n->as_Lock()->obj_node()->uncast(); 94 if (!(obj->is_Parm() || obj->is_Con())) { 95 return true; 96 } 97 } 98 if (n->is_CallStaticJava() && 99 n->as_CallStaticJava()->is_boxing_method()) { 100 return true; 101 } 102 } 103 return false; 104 } 105 106 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 107 Compile::TracePhase tp(Phase::_t_escapeAnalysis); 108 ResourceMark rm; 109 110 // Add ConP and ConN null oop nodes before ConnectionGraph construction 111 // to create space for them in ConnectionGraph::_nodes[]. 112 Node* oop_null = igvn->zerocon(T_OBJECT); 113 Node* noop_null = igvn->zerocon(T_NARROWOOP); 114 int invocation = 0; 115 if (C->congraph() != nullptr) { 116 invocation = C->congraph()->_invocation + 1; 117 } 118 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 119 NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(congraph);) 120 // Perform escape analysis 121 if (congraph->compute_escape()) { 122 // There are non escaping objects. 123 C->set_congraph(congraph); 124 } 125 NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(nullptr);) 126 // Cleanup. 127 if (oop_null->outcnt() == 0) { 128 igvn->hash_delete(oop_null); 129 } 130 if (noop_null->outcnt() == 0) { 131 igvn->hash_delete(noop_null); 132 } 133 134 C->print_method(PHASE_AFTER_EA, 2); 135 } 136 137 bool ConnectionGraph::compute_escape() { 138 Compile* C = _compile; 139 PhaseGVN* igvn = _igvn; 140 141 // Worklists used by EA. 142 Unique_Node_List delayed_worklist; 143 Unique_Node_List reducible_merges; 144 GrowableArray<Node*> alloc_worklist; 145 GrowableArray<Node*> ptr_cmp_worklist; 146 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 147 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 148 GrowableArray<PointsToNode*> ptnodes_worklist; 149 GrowableArray<JavaObjectNode*> java_objects_worklist; 150 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 151 GrowableArray<FieldNode*> oop_fields_worklist; 152 GrowableArray<SafePointNode*> sfn_worklist; 153 GrowableArray<MergeMemNode*> mergemem_worklist; 154 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 155 156 { Compile::TracePhase tp(Phase::_t_connectionGraph); 157 158 // 1. Populate Connection Graph (CG) with PointsTo nodes. 159 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 160 // Initialize worklist 161 if (C->root() != nullptr) { 162 ideal_nodes.push(C->root()); 163 } 164 // Processed ideal nodes are unique on ideal_nodes list 165 // but several ideal nodes are mapped to the phantom_obj. 166 // To avoid duplicated entries on the following worklists 167 // add the phantom_obj only once to them. 168 ptnodes_worklist.append(phantom_obj); 169 java_objects_worklist.append(phantom_obj); 170 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 171 Node* n = ideal_nodes.at(next); 172 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && 173 !n->in(MemNode::Address)->is_AddP() && 174 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { 175 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA 176 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); 177 _igvn->register_new_node_with_optimizer(addp); 178 _igvn->replace_input_of(n, MemNode::Address, addp); 179 ideal_nodes.push(addp); 180 _nodes.at_put_grow(addp->_idx, nullptr, nullptr); 181 } 182 // Create PointsTo nodes and add them to Connection Graph. Called 183 // only once per ideal node since ideal_nodes is Unique_Node list. 184 add_node_to_connection_graph(n, &delayed_worklist); 185 PointsToNode* ptn = ptnode_adr(n->_idx); 186 if (ptn != nullptr && ptn != phantom_obj) { 187 ptnodes_worklist.append(ptn); 188 if (ptn->is_JavaObject()) { 189 java_objects_worklist.append(ptn->as_JavaObject()); 190 if ((n->is_Allocate() || n->is_CallStaticJava()) && 191 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 192 // Only allocations and java static calls results are interesting. 193 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 194 } 195 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 196 oop_fields_worklist.append(ptn->as_Field()); 197 } 198 } 199 // Collect some interesting nodes for further use. 200 switch (n->Opcode()) { 201 case Op_MergeMem: 202 // Collect all MergeMem nodes to add memory slices for 203 // scalar replaceable objects in split_unique_types(). 204 mergemem_worklist.append(n->as_MergeMem()); 205 break; 206 case Op_CmpP: 207 case Op_CmpN: 208 // Collect compare pointers nodes. 209 if (OptimizePtrCompare) { 210 ptr_cmp_worklist.append(n); 211 } 212 break; 213 case Op_MemBarStoreStore: 214 // Collect all MemBarStoreStore nodes so that depending on the 215 // escape status of the associated Allocate node some of them 216 // may be eliminated. 217 if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) { 218 storestore_worklist.append(n->as_MemBarStoreStore()); 219 } 220 // If MemBarStoreStore has a precedent edge add it to the worklist (like MemBarRelease) 221 case Op_MemBarRelease: 222 if (n->req() > MemBarNode::Precedent) { 223 record_for_optimizer(n); 224 } 225 break; 226 #ifdef ASSERT 227 case Op_AddP: 228 // Collect address nodes for graph verification. 229 addp_worklist.append(n); 230 break; 231 #endif 232 case Op_ArrayCopy: 233 // Keep a list of ArrayCopy nodes so if one of its input is non 234 // escaping, we can record a unique type 235 arraycopy_worklist.append(n->as_ArrayCopy()); 236 break; 237 default: 238 // not interested now, ignore... 239 break; 240 } 241 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 242 Node* m = n->fast_out(i); // Get user 243 ideal_nodes.push(m); 244 } 245 if (n->is_SafePoint()) { 246 sfn_worklist.append(n->as_SafePoint()); 247 } 248 } 249 250 #ifndef PRODUCT 251 if (_compile->directive()->TraceEscapeAnalysisOption) { 252 tty->print("+++++ Initial worklist for "); 253 _compile->method()->print_name(); 254 tty->print_cr(" (ea_inv=%d)", _invocation); 255 for (int i = 0; i < ptnodes_worklist.length(); i++) { 256 PointsToNode* ptn = ptnodes_worklist.at(i); 257 ptn->dump(); 258 } 259 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 260 } 261 #endif 262 263 if (non_escaped_allocs_worklist.length() == 0) { 264 _collecting = false; 265 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 266 return false; // Nothing to do. 267 } 268 // Add final simple edges to graph. 269 while(delayed_worklist.size() > 0) { 270 Node* n = delayed_worklist.pop(); 271 add_final_edges(n); 272 } 273 274 #ifdef ASSERT 275 if (VerifyConnectionGraph) { 276 // Verify that no new simple edges could be created and all 277 // local vars has edges. 278 _verify = true; 279 int ptnodes_length = ptnodes_worklist.length(); 280 for (int next = 0; next < ptnodes_length; ++next) { 281 PointsToNode* ptn = ptnodes_worklist.at(next); 282 add_final_edges(ptn->ideal_node()); 283 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 284 ptn->dump(); 285 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 286 } 287 } 288 _verify = false; 289 } 290 #endif 291 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 292 // processing, calls to CI to resolve symbols (types, fields, methods) 293 // referenced in bytecode. During symbol resolution VM may throw 294 // an exception which CI cleans and converts to compilation failure. 295 if (C->failing()) { 296 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 297 return false; 298 } 299 300 _compile->print_method(PHASE_EA_AFTER_INITIAL_CONGRAPH, 4); 301 302 // 2. Finish Graph construction by propagating references to all 303 // java objects through graph. 304 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 305 java_objects_worklist, oop_fields_worklist)) { 306 // All objects escaped or hit time or iterations limits. 307 _collecting = false; 308 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 309 return false; 310 } 311 312 _compile->print_method(PHASE_EA_AFTER_COMPLETE_CONGRAPH, 4); 313 314 // 3. Adjust scalar_replaceable state of nonescaping objects and push 315 // scalar replaceable allocations on alloc_worklist for processing 316 // in split_unique_types(). 317 GrowableArray<JavaObjectNode*> jobj_worklist; 318 int non_escaped_length = non_escaped_allocs_worklist.length(); 319 bool found_nsr_alloc = false; 320 for (int next = 0; next < non_escaped_length; next++) { 321 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 322 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 323 Node* n = ptn->ideal_node(); 324 if (n->is_Allocate()) { 325 n->as_Allocate()->_is_non_escaping = noescape; 326 } 327 if (noescape && ptn->scalar_replaceable()) { 328 adjust_scalar_replaceable_state(ptn, reducible_merges); 329 if (ptn->scalar_replaceable()) { 330 jobj_worklist.push(ptn); 331 } else { 332 found_nsr_alloc = true; 333 } 334 } 335 _compile->print_method(PHASE_EA_ADJUST_SCALAR_REPLACEABLE_ITER, 6, n); 336 } 337 338 // Propagate NSR (Not Scalar Replaceable) state. 339 if (found_nsr_alloc) { 340 find_scalar_replaceable_allocs(jobj_worklist, reducible_merges); 341 } 342 343 // alloc_worklist will be processed in reverse push order. 344 // Therefore the reducible Phis will be processed for last and that's what we 345 // want because by then the scalarizable inputs of the merge will already have 346 // an unique instance type. 347 for (uint i = 0; i < reducible_merges.size(); i++ ) { 348 Node* n = reducible_merges.at(i); 349 alloc_worklist.append(n); 350 } 351 352 for (int next = 0; next < jobj_worklist.length(); ++next) { 353 JavaObjectNode* jobj = jobj_worklist.at(next); 354 if (jobj->scalar_replaceable()) { 355 alloc_worklist.append(jobj->ideal_node()); 356 } 357 } 358 359 #ifdef ASSERT 360 if (VerifyConnectionGraph) { 361 // Verify that graph is complete - no new edges could be added or needed. 362 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 363 java_objects_worklist, addp_worklist); 364 } 365 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 366 assert(null_obj->escape_state() == PointsToNode::NoEscape && 367 null_obj->edge_count() == 0 && 368 !null_obj->arraycopy_src() && 369 !null_obj->arraycopy_dst(), "sanity"); 370 #endif 371 372 _collecting = false; 373 374 _compile->print_method(PHASE_EA_AFTER_PROPAGATE_NSR, 4); 375 } // TracePhase t3("connectionGraph") 376 377 // 4. Optimize ideal graph based on EA information. 378 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 379 if (has_non_escaping_obj) { 380 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 381 } 382 383 #ifndef PRODUCT 384 if (PrintEscapeAnalysis) { 385 dump(ptnodes_worklist); // Dump ConnectionGraph 386 } 387 #endif 388 389 #ifdef ASSERT 390 if (VerifyConnectionGraph) { 391 int alloc_length = alloc_worklist.length(); 392 for (int next = 0; next < alloc_length; ++next) { 393 Node* n = alloc_worklist.at(next); 394 PointsToNode* ptn = ptnode_adr(n->_idx); 395 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 396 } 397 } 398 399 if (VerifyReduceAllocationMerges) { 400 for (uint i = 0; i < reducible_merges.size(); i++ ) { 401 Node* n = reducible_merges.at(i); 402 if (!can_reduce_phi(n->as_Phi())) { 403 TraceReduceAllocationMerges = true; 404 n->dump(2); 405 n->dump(-2); 406 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 407 } 408 } 409 } 410 #endif 411 412 _compile->print_method(PHASE_EA_AFTER_GRAPH_OPTIMIZATION, 4); 413 414 // 5. Separate memory graph for scalar replaceable allcations. 415 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 416 if (has_scalar_replaceable_candidates && EliminateAllocations) { 417 assert(C->do_aliasing(), "Aliasing should be enabled"); 418 // Now use the escape information to create unique types for 419 // scalar replaceable objects. 420 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 421 if (C->failing()) { 422 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 423 return false; 424 } 425 426 #ifdef ASSERT 427 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 428 tty->print("=== No allocations eliminated for "); 429 C->method()->print_short_name(); 430 if (!EliminateAllocations) { 431 tty->print(" since EliminateAllocations is off ==="); 432 } else if(!has_scalar_replaceable_candidates) { 433 tty->print(" since there are no scalar replaceable candidates ==="); 434 } 435 tty->cr(); 436 #endif 437 } 438 439 // 6. Expand flat accesses if the object does not escape. This adds nodes to 440 // the graph, so it has to be after split_unique_types. This expands atomic 441 // mismatched accesses (though encapsulated in LoadFlats and StoreFlats) into 442 // non-mismatched accesses, so it is better before reduce allocation merges. 443 if (has_non_escaping_obj) { 444 optimize_flat_accesses(sfn_worklist); 445 } 446 447 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4); 448 449 // 7. Reduce allocation merges used as debug information. This is done after 450 // split_unique_types because the methods used to create SafePointScalarObject 451 // need to traverse the memory graph to find values for object fields. We also 452 // set to null the scalarized inputs of reducible Phis so that the Allocate 453 // that they point can be later scalar replaced. 454 bool delay = _igvn->delay_transform(); 455 _igvn->set_delay_transform(true); 456 for (uint i = 0; i < reducible_merges.size(); i++) { 457 Node* n = reducible_merges.at(i); 458 if (n->outcnt() > 0) { 459 if (!reduce_phi_on_safepoints(n->as_Phi())) { 460 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 461 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 462 return false; 463 } 464 465 // Now we set the scalar replaceable inputs of ophi to null, which is 466 // the last piece that would prevent it from being scalar replaceable. 467 reset_scalar_replaceable_entries(n->as_Phi()); 468 } 469 } 470 _igvn->set_delay_transform(delay); 471 472 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 473 // java calls if they pass ArgEscape objects as parameters. 474 if (has_non_escaping_obj && 475 (C->env()->should_retain_local_variables() || 476 C->env()->jvmti_can_get_owned_monitor_info() || 477 C->env()->jvmti_can_walk_any_space() || 478 DeoptimizeObjectsALot)) { 479 int sfn_length = sfn_worklist.length(); 480 for (int next = 0; next < sfn_length; next++) { 481 SafePointNode* sfn = sfn_worklist.at(next); 482 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 483 if (sfn->is_CallJava()) { 484 CallJavaNode* call = sfn->as_CallJava(); 485 call->set_arg_escape(has_arg_escape(call)); 486 } 487 } 488 } 489 490 _compile->print_method(PHASE_EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS, 4); 491 492 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 493 return has_non_escaping_obj; 494 } 495 496 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 497 // if at least one scalar replaceable allocation participates in the merge. 498 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 499 bool found_sr_allocate = false; 500 501 for (uint i = 1; i < ophi->req(); i++) { 502 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 503 if (ptn != nullptr && ptn->scalar_replaceable()) { 504 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 505 506 // Don't handle arrays. 507 if (alloc->Opcode() != Op_Allocate) { 508 assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation."); 509 continue; 510 } 511 512 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 513 found_sr_allocate = true; 514 } else { 515 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);) 516 ptn->set_scalar_replaceable(false); 517 } 518 } 519 } 520 521 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 522 return found_sr_allocate; 523 } 524 525 // We can reduce the Cmp if it's a comparison between the Phi and a constant. 526 // I require the 'other' input to be a constant so that I can move the Cmp 527 // around safely. 528 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const { 529 assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name()); 530 Node* left = cmp->in(1); 531 Node* right = cmp->in(2); 532 533 return (left == n || right == n) && 534 (left->is_Con() || right->is_Con()) && 535 cmp->outcnt() == 1; 536 } 537 538 // We are going to check if any of the SafePointScalarMerge entries 539 // in the SafePoint reference the Phi that we are checking. 540 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const { 541 JVMState *jvms = sfpt->jvms(); 542 543 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 544 Node* sfpt_in = sfpt->in(i); 545 if (sfpt_in->is_SafePointScalarMerge()) { 546 SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge(); 547 Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms)); 548 if (nsr_ptr == n) { 549 return true; 550 } 551 } 552 } 553 554 return false; 555 } 556 557 // Check if we are able to untangle the merge. The following patterns are 558 // supported: 559 // - Phi -> SafePoints 560 // - Phi -> CmpP/N 561 // - Phi -> AddP -> Load 562 // - Phi -> CastPP -> SafePoints 563 // - Phi -> CastPP -> AddP -> Load 564 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const { 565 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 566 Node* use = n->fast_out(i); 567 568 if (use->is_SafePoint()) { 569 if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) { 570 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);) 571 return false; 572 } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) { 573 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);) 574 return false; 575 } 576 } else if (use->is_AddP()) { 577 Node* addp = use; 578 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 579 Node* use_use = addp->fast_out(j); 580 const Type* load_type = _igvn->type(use_use); 581 582 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 583 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());) 584 return false; 585 } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) { 586 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());) 587 return false; 588 } 589 } 590 } else if (nesting > 0) { 591 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);) 592 return false; 593 } else if (use->is_CastPP()) { 594 const Type* cast_t = _igvn->type(use); 595 if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) { 596 #ifndef PRODUCT 597 if (TraceReduceAllocationMerges) { 598 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation); 599 use->dump(); 600 } 601 #endif 602 return false; 603 } 604 605 bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0); 606 if (!is_trivial_control) { 607 // If it's not a trivial control then we check if we can reduce the 608 // CmpP/N used by the If controlling the cast. 609 if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) { 610 Node* iff = use->in(0)->in(0); 611 // We may have an OpaqueConstantBool node between If and Bool nodes. But we could also have a sub class of IfNode, 612 // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases. 613 bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp(); 614 if (can_reduce) { 615 Node* iff_cmp = iff->in(1)->in(1); 616 int opc = iff_cmp->Opcode(); 617 can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp); 618 } 619 if (!can_reduce) { 620 #ifndef PRODUCT 621 if (TraceReduceAllocationMerges) { 622 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx); 623 n->dump(5); 624 } 625 #endif 626 return false; 627 } 628 } 629 } 630 631 if (!can_reduce_check_users(use, nesting+1)) { 632 return false; 633 } 634 } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) { 635 if (!can_reduce_cmp(n, use)) { 636 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);) 637 return false; 638 } 639 } else { 640 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());) 641 return false; 642 } 643 } 644 645 return true; 646 } 647 648 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 649 // only used in some certain code shapes. Check comments in 650 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 651 // details. 652 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 653 // If there was an error attempting to reduce allocation merges for this 654 // method we might have disabled the compilation and be retrying with RAM 655 // disabled. 656 if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) { 657 return false; 658 } 659 660 const Type* phi_t = _igvn->type(ophi); 661 if (phi_t == nullptr || 662 phi_t->make_ptr() == nullptr || 663 phi_t->make_ptr()->isa_aryptr() != nullptr) { 664 return false; 665 } 666 667 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) { 668 return false; 669 } 670 671 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 672 return true; 673 } 674 675 // This method will return a CmpP/N that we need to use on the If controlling a 676 // CastPP after it was split. This method is only called on bases that are 677 // nullable therefore we always need a controlling if for the splitted CastPP. 678 // 679 // 'curr_ctrl' is the control of the CastPP that we want to split through phi. 680 // If the CastPP currently doesn't have a control then the CmpP/N will be 681 // against the null constant, otherwise it will be against the constant input of 682 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later 683 // case because we have constraints on it and because the CastPP has a control 684 // input. 685 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) { 686 const Type* t = base->bottom_type(); 687 Node* con = nullptr; 688 689 if (curr_ctrl == nullptr || curr_ctrl->is_Region()) { 690 con = _igvn->zerocon(t->basic_type()); 691 } else { 692 // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp 693 assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name()); 694 Node* bol = curr_ctrl->in(0)->in(1); 695 assert(bol->is_Bool(), "unexpected node %s", bol->Name()); 696 Node* curr_cmp = bol->in(1); 697 assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name()); 698 con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2); 699 } 700 701 return CmpNode::make(base, con, t->basic_type()); 702 } 703 704 // This method 'specializes' the CastPP passed as parameter to the base passed 705 // as parameter. Note that the existing CastPP input is a Phi. "Specialize" 706 // means that the CastPP now will be specific for a given base instead of a Phi. 707 // An If-Then-Else-Region block is inserted to control the CastPP. The control 708 // of the CastPP is a copy of the current one (if there is one) or a check 709 // against null. 710 // 711 // Before: 712 // 713 // C1 C2 ... Cn 714 // \ | / 715 // \ | / 716 // \ | / 717 // \ | / 718 // \ | / 719 // \ | / 720 // \|/ 721 // Region B1 B2 ... Bn 722 // | \ | / 723 // | \ | / 724 // | \ | / 725 // | \ | / 726 // | \ | / 727 // | \ | / 728 // ---------------> Phi 729 // | 730 // X | 731 // | | 732 // | | 733 // ------> CastPP 734 // 735 // After (only partial illustration; base = B2, current_control = C2): 736 // 737 // C2 738 // | 739 // If 740 // / \ 741 // / \ 742 // T F 743 // /\ / 744 // / \ / 745 // / \ / 746 // C1 CastPP Reg Cn 747 // | | | 748 // | | | 749 // | | | 750 // -------------- | ---------- 751 // | | | 752 // Region 753 // 754 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) { 755 Node* control_successor = current_control->unique_ctrl_out(); 756 Node* cmp = _igvn->transform(specialize_cmp(base, castpp->in(0))); 757 Node* bol = _igvn->transform(new BoolNode(cmp, BoolTest::ne)); 758 IfNode* if_ne = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If(); 759 Node* not_eq_control = _igvn->transform(new IfTrueNode(if_ne)); 760 Node* yes_eq_control = _igvn->transform(new IfFalseNode(if_ne)); 761 Node* end_region = _igvn->transform(new RegionNode(3)); 762 763 // Insert the new if-else-region block into the graph 764 end_region->set_req(1, not_eq_control); 765 end_region->set_req(2, yes_eq_control); 766 control_successor->replace_edge(current_control, end_region, _igvn); 767 768 _igvn->_worklist.push(current_control); 769 _igvn->_worklist.push(control_successor); 770 771 return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::DependencyType::NonFloatingNonNarrowing, nullptr)); 772 } 773 774 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) { 775 const Type* load_type = _igvn->type(curr_load); 776 Node* nsr_value = _igvn->zerocon(load_type->basic_type()); 777 Node* memory = curr_load->in(MemNode::Memory); 778 779 // The data_phi merging the loads needs to be nullable if 780 // we are loading pointers. 781 if (load_type->make_ptr() != nullptr) { 782 if (load_type->isa_narrowoop()) { 783 load_type = load_type->meet(TypeNarrowOop::NULL_PTR); 784 } else if (load_type->isa_ptr()) { 785 load_type = load_type->meet(TypePtr::NULL_PTR); 786 } else { 787 assert(false, "Unexpected load ptr type."); 788 } 789 } 790 791 Node* data_phi = PhiNode::make(region, nsr_value, load_type); 792 793 for (int i = 1; i < bases_for_loads->length(); i++) { 794 Node* base = bases_for_loads->at(i); 795 Node* cmp_region = nullptr; 796 if (base != nullptr) { 797 if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node 798 cmp_region = base->unique_ctrl_out_or_null(); 799 assert(cmp_region != nullptr, "There should be."); 800 base = base->find_out_with(Op_CastPP); 801 } 802 803 Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset))); 804 Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory; 805 Node* load = curr_load->clone(); 806 load->set_req(0, nullptr); 807 load->set_req(1, mem); 808 load->set_req(2, addr); 809 810 if (cmp_region != nullptr) { // see comment on previous if 811 Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type); 812 intermediate_phi->set_req(1, _igvn->transform(load)); 813 load = intermediate_phi; 814 } 815 816 data_phi->set_req(i, _igvn->transform(load)); 817 } else { 818 // Just use the default, which is already in phi 819 } 820 } 821 822 // Takes care of updating CG and split_unique_types worklists due 823 // to cloned AddP->Load. 824 updates_after_load_split(data_phi, curr_load, alloc_worklist); 825 826 return _igvn->transform(data_phi); 827 } 828 829 // This method only reduces CastPP fields loads; SafePoints are handled 830 // separately. The idea here is basically to clone the CastPP and place copies 831 // on each input of the Phi, including non-scalar replaceable inputs. 832 // Experimentation shows that the resulting IR graph is simpler that way than if 833 // we just split the cast through scalar-replaceable inputs. 834 // 835 // The reduction process requires that CastPP's control be one of: 836 // 1) no control, 837 // 2) the same region as Ophi, or 838 // 3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant. 839 // 840 // After splitting the CastPP we'll put it under an If-Then-Else-Region control 841 // flow. If the CastPP originally had an IfTrue/False control input then we'll 842 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll 843 // juse use a CmpP/N against the null constant. 844 // 845 // The If-Then-Else-Region isn't always needed. For instance, if input to 846 // splitted cast was not nullable (or if it was the null constant) then we don't 847 // need (shouldn't) use a CastPP at all. 848 // 849 // After the casts are splitted we'll split the AddP->Loads through the Phi and 850 // connect them to the just split CastPPs. 851 // 852 // Before (CastPP control is same as Phi): 853 // 854 // Region Allocate Null Call 855 // | \ | / 856 // | \ | / 857 // | \ | / 858 // | \ | / 859 // | \ | / 860 // | \ | / 861 // ------------------> Phi # Oop Phi 862 // | | 863 // | | 864 // | | 865 // | | 866 // ----------------> CastPP 867 // | 868 // AddP 869 // | 870 // Load 871 // 872 // After (Very much simplified): 873 // 874 // Call Null 875 // \ / 876 // CmpP 877 // | 878 // Bool#NE 879 // | 880 // If 881 // / \ 882 // T F 883 // / \ / 884 // / R 885 // CastPP | 886 // | | 887 // AddP | 888 // | | 889 // Load | 890 // \ | 0 891 // Allocate \ | / 892 // \ \ | / 893 // AddP Phi 894 // \ / 895 // Load / 896 // \ 0 / 897 // \ | / 898 // \|/ 899 // Phi # "Field" Phi 900 // 901 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node*> &alloc_worklist) { 902 Node* ophi = curr_castpp->in(1); 903 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 904 905 // Identify which base should be used for AddP->Load later when spliting the 906 // CastPP->Loads through ophi. Three kind of values may be stored in this 907 // array, depending on the nullability status of the corresponding input in 908 // ophi. 909 // 910 // - nullptr: Meaning that the base is actually the null constant and therefore 911 // we won't try to load from it. 912 // 913 // - CFG Node: Meaning that the base is a CastPP that was specialized for 914 // this input of Ophi. I.e., we added an If->Then->Else-Region 915 // that will 'activate' the CastPp only when the input is not Null. 916 // 917 // - Other Node: Meaning that the base is not nullable and therefore we'll try 918 // to load directly from it. 919 GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr); 920 921 for (uint i = 1; i < ophi->req(); i++) { 922 Node* base = ophi->in(i); 923 const Type* base_t = _igvn->type(base); 924 925 if (base_t->maybe_null()) { 926 if (base->is_Con()) { 927 // Nothing todo as bases_for_loads[i] is already null 928 } else { 929 Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i)); 930 bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag 931 } 932 } else { 933 bases_for_loads.at_put(i, base); 934 } 935 } 936 937 // Now let's split the CastPP->Loads through the Phi 938 for (int i = curr_castpp->outcnt()-1; i >= 0;) { 939 Node* use = curr_castpp->raw_out(i); 940 if (use->is_AddP()) { 941 for (int j = use->outcnt()-1; j >= 0;) { 942 Node* use_use = use->raw_out(j); 943 assert(use_use->is_Load(), "Expected this to be a Load node."); 944 945 // We can't make an unconditional load from a nullable input. The 946 // 'split_castpp_load_through_phi` method will add an 947 // 'If-Then-Else-Region` around nullable bases and only load from them 948 // when the input is not null. 949 Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist); 950 _igvn->replace_node(use_use, phi); 951 952 --j; 953 j = MIN2(j, (int)use->outcnt()-1); 954 } 955 956 _igvn->remove_dead_node(use); 957 } 958 --i; 959 i = MIN2(i, (int)curr_castpp->outcnt()-1); 960 } 961 } 962 963 // This method split a given CmpP/N through the Phi used in one of its inputs. 964 // As a result we convert a comparison with a pointer to a comparison with an 965 // integer. 966 // The only requirement is that one of the inputs of the CmpP/N must be a Phi 967 // while the other must be a constant. 968 // The splitting process is basically just cloning the CmpP/N above the input 969 // Phi. However, some (most) of the cloned CmpP/Ns won't be requred because we 970 // can prove at compile time the result of the comparison. 971 // 972 // Before: 973 // 974 // in1 in2 ... inN 975 // \ | / 976 // \ | / 977 // \ | / 978 // \ | / 979 // \ | / 980 // \ | / 981 // Phi 982 // | Other 983 // | / 984 // | / 985 // | / 986 // CmpP/N 987 // 988 // After: 989 // 990 // in1 Other in2 Other inN Other 991 // | | | | | | 992 // \ | | | | | 993 // \ / | / | / 994 // CmpP/N CmpP/N CmpP/N 995 // Bool Bool Bool 996 // \ | / 997 // \ | / 998 // \ | / 999 // \ | / 1000 // \ | / 1001 // \ | / 1002 // \ | / 1003 // \ | / 1004 // Phi 1005 // | 1006 // | Zero 1007 // | / 1008 // | / 1009 // | / 1010 // CmpI 1011 // 1012 // 1013 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) { 1014 Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1); 1015 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 1016 1017 Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2); 1018 Node* zero = _igvn->intcon(0); 1019 Node* one = _igvn->intcon(1); 1020 BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test; 1021 1022 // This Phi will merge the result of the Cmps split through the Phi 1023 Node* res_phi = PhiNode::make(ophi->in(0), zero, TypeInt::INT); 1024 1025 for (uint i=1; i<ophi->req(); i++) { 1026 Node* ophi_input = ophi->in(i); 1027 Node* res_phi_input = nullptr; 1028 1029 const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other); 1030 if (tcmp->singleton()) { 1031 if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) || 1032 (mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) { 1033 res_phi_input = one; 1034 } else { 1035 res_phi_input = zero; 1036 } 1037 } else { 1038 Node* ncmp = _igvn->transform(cmp->clone()); 1039 ncmp->set_req(1, ophi_input); 1040 ncmp->set_req(2, other); 1041 Node* bol = _igvn->transform(new BoolNode(ncmp, mask)); 1042 res_phi_input = bol->as_Bool()->as_int_value(_igvn); 1043 } 1044 1045 res_phi->set_req(i, res_phi_input); 1046 } 1047 1048 // This CMP always compares whether the output of "res_phi" is TRUE as far as the "mask". 1049 Node* new_cmp = _igvn->transform(new CmpINode(_igvn->transform(res_phi), (mask == BoolTest::mask::eq) ? one : zero)); 1050 _igvn->replace_node(cmp, new_cmp); 1051 } 1052 1053 // Push the newly created AddP on alloc_worklist and patch 1054 // the connection graph. Note that the changes in the CG below 1055 // won't affect the ES of objects since the new nodes have the 1056 // same status as the old ones. 1057 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) { 1058 assert(data_phi != nullptr, "Output of split_through_phi is null."); 1059 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 1060 assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi."); 1061 1062 if (data_phi == nullptr || !data_phi->is_Phi()) { 1063 // Make this a retry? 1064 return ; 1065 } 1066 1067 Node* previous_addp = previous_load->in(MemNode::Address); 1068 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1069 for (uint i = 1; i < data_phi->req(); i++) { 1070 Node* new_load = data_phi->in(i); 1071 1072 if (new_load->is_Phi()) { 1073 // new_load is currently the "intermediate_phi" from an specialized 1074 // CastPP. 1075 new_load = new_load->in(1); 1076 } 1077 1078 // "new_load" might actually be a constant, parameter, etc. 1079 if (new_load->is_Load()) { 1080 Node* new_addp = new_load->in(MemNode::Address); 1081 1082 // If new_load is a Load but not from an AddP, it means that the load is folded into another 1083 // load. And since this load is not from a field, we cannot create a unique type for it. 1084 // For example: 1085 // 1086 // if (b) { 1087 // Holder h1 = new Holder(); 1088 // Object o = ...; 1089 // h.o = o.getClass(); 1090 // } else { 1091 // Holder h2 = ...; 1092 // } 1093 // Holder h = Phi(h1, h2); 1094 // Object r = h.o; 1095 // 1096 // Then, splitting r through the merge point results in: 1097 // 1098 // if (b) { 1099 // Holder h1 = new Holder(); 1100 // Object o = ...; 1101 // h.o = o.getClass(); 1102 // Object o1 = h.o; 1103 // } else { 1104 // Holder h2 = ...; 1105 // Object o2 = h2.o; 1106 // } 1107 // Object r = Phi(o1, o2); 1108 // 1109 // In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from 1110 // an OopHandle that is loaded from the Klass of o. 1111 if (!new_addp->is_AddP()) { 1112 continue; 1113 } 1114 Node* base = get_addp_base(new_addp); 1115 1116 // The base might not be something that we can create an unique 1117 // type for. If that's the case we are done with that input. 1118 PointsToNode* jobj_ptn = unique_java_object(base); 1119 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 1120 continue; 1121 } 1122 1123 // Push to alloc_worklist since the base has an unique_type 1124 alloc_worklist.append_if_missing(new_addp); 1125 1126 // Now let's add the node to the connection graph 1127 _nodes.at_grow(new_addp->_idx, nullptr); 1128 add_field(new_addp, fn->escape_state(), fn->offset()); 1129 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 1130 1131 // If the load doesn't load an object then it won't be 1132 // part of the connection graph 1133 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 1134 if (curr_load_ptn != nullptr) { 1135 _nodes.at_grow(new_load->_idx, nullptr); 1136 add_local_var(new_load, curr_load_ptn->escape_state()); 1137 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 1138 } 1139 } 1140 } 1141 } 1142 1143 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) { 1144 // We'll pass this to 'split_through_phi' so that it'll do the split even 1145 // though the load doesn't have an unique instance type. 1146 bool ignore_missing_instance_id = true; 1147 1148 // All AddPs are present in the connection graph 1149 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1150 1151 // Iterate over AddP looking for a Load 1152 for (int k = previous_addp->outcnt()-1; k >= 0;) { 1153 Node* previous_load = previous_addp->raw_out(k); 1154 if (previous_load->is_Load()) { 1155 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 1156 1157 // Takes care of updating CG and split_unique_types worklists due to cloned 1158 // AddP->Load. 1159 updates_after_load_split(data_phi, previous_load, alloc_worklist); 1160 1161 _igvn->replace_node(previous_load, data_phi); 1162 } 1163 --k; 1164 k = MIN2(k, (int)previous_addp->outcnt()-1); 1165 } 1166 1167 // Remove the old AddP from the processing list because it's dead now 1168 assert(previous_addp->outcnt() == 0, "AddP should be dead now."); 1169 alloc_worklist.remove_if_existing(previous_addp); 1170 } 1171 1172 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the 1173 // selector is: 1174 // -> a '-1' constant, the i'th input of the original Phi is NSR. 1175 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and 1176 // the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects 1177 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const { 1178 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 1179 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 1180 uint number_of_sr_objects = 0; 1181 for (uint i = 1; i < ophi->req(); i++) { 1182 Node* base = ophi->in(i); 1183 JavaObjectNode* ptn = unique_java_object(base); 1184 1185 if (ptn != nullptr && ptn->scalar_replaceable()) { 1186 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 1187 selector->set_req(i, sr_obj_idx); 1188 number_of_sr_objects++; 1189 } 1190 } 1191 1192 return selector->as_Phi(); 1193 } 1194 1195 // Returns true if the AddP node 'n' has at least one base that is a reducible 1196 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is 1197 // checked instead. 1198 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) { 1199 PointsToNode* ptn = ptnode_adr(n->_idx); 1200 if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) { 1201 return false; 1202 } 1203 1204 for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) { 1205 Node* base = i.get()->ideal_node(); 1206 1207 if (reducible_merges.member(base)) { 1208 return true; 1209 } 1210 1211 if (base->is_CastPP() || base->is_CheckCastPP()) { 1212 base = base->in(1); 1213 if (reducible_merges.member(base)) { 1214 return true; 1215 } 1216 } 1217 } 1218 1219 return false; 1220 } 1221 1222 // This method will call its helper method to reduce SafePoint nodes that use 1223 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same 1224 // "version" of Phi use the same debug information (regarding the Phi). 1225 // Therefore, I collect all safepoints and patch them all at once. 1226 // 1227 // The safepoints using the Phi node have to be processed before safepoints of 1228 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the 1229 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the 1230 // safepoint. If we process CastPP's safepoints before Phi's safepoints the 1231 // algorithm that process Phi's safepoints will think that the added Phi 1232 // reference is a regular reference. 1233 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) { 1234 PhiNode* selector = create_selector(ophi); 1235 Unique_Node_List safepoints; 1236 Unique_Node_List casts; 1237 1238 // Just collect the users of the Phis for later processing 1239 // in the needed order. 1240 for (uint i = 0; i < ophi->outcnt(); i++) { 1241 Node* use = ophi->raw_out(i); 1242 if (use->is_SafePoint()) { 1243 safepoints.push(use); 1244 } else if (use->is_CastPP()) { 1245 casts.push(use); 1246 } else { 1247 assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left."); 1248 } 1249 } 1250 1251 // Need to process safepoints using the Phi first 1252 if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) { 1253 return false; 1254 } 1255 1256 // Now process CastPP->safepoints 1257 for (uint i = 0; i < casts.size(); i++) { 1258 Node* cast = casts.at(i); 1259 Unique_Node_List cast_sfpts; 1260 1261 for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) { 1262 Node* use_use = cast->fast_out(j); 1263 if (use_use->is_SafePoint()) { 1264 cast_sfpts.push(use_use); 1265 } else { 1266 assert(use_use->outcnt() == 0, "Only SafePoint users should be left."); 1267 } 1268 } 1269 1270 if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) { 1271 return false; 1272 } 1273 } 1274 1275 return true; 1276 } 1277 1278 // This method will create a SafePointScalarMERGEnode for each SafePoint in 1279 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a 1280 // SafePointScalarObjectNode for each scalar replaceable input. Each 1281 // SafePointScalarMergeNode may describe multiple scalar replaced objects - 1282 // check detailed description in SafePointScalarMergeNode class header. 1283 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) { 1284 PhaseMacroExpand mexp(*_igvn); 1285 Node* original_sfpt_parent = cast != nullptr ? cast : ophi; 1286 const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr(); 1287 1288 Node* nsr_merge_pointer = ophi; 1289 if (cast != nullptr) { 1290 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1291 nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::DependencyType::FloatingNarrowing, nullptr)); 1292 } 1293 1294 for (uint spi = 0; spi < safepoints.size(); spi++) { 1295 SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint(); 1296 JVMState *jvms = sfpt->jvms(); 1297 uint merge_idx = (sfpt->req() - jvms->scloff()); 1298 int debug_start = jvms->debug_start(); 1299 1300 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 1301 smerge->init_req(0, _compile->root()); 1302 _igvn->register_new_node_with_optimizer(smerge); 1303 1304 // The next two inputs are: 1305 // (1) A copy of the original pointer to NSR objects. 1306 // (2) A selector, used to decide if we need to rematerialize an object 1307 // or use the pointer to a NSR object. 1308 // See more details of these fields in the declaration of SafePointScalarMergeNode 1309 sfpt->add_req(nsr_merge_pointer); 1310 sfpt->add_req(selector); 1311 1312 for (uint i = 1; i < ophi->req(); i++) { 1313 Node* base = ophi->in(i); 1314 JavaObjectNode* ptn = unique_java_object(base); 1315 1316 // If the base is not scalar replaceable we don't need to register information about 1317 // it at this time. 1318 if (ptn == nullptr || !ptn->scalar_replaceable()) { 1319 continue; 1320 } 1321 1322 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 1323 Unique_Node_List value_worklist; 1324 #ifdef ASSERT 1325 const Type* res_type = alloc->result_cast()->bottom_type(); 1326 if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) { 1327 PhiNode* phi = ophi->as_Phi(); 1328 assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity"); 1329 } 1330 #endif 1331 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist); 1332 if (sobj == nullptr) { 1333 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1334 return false; 1335 } 1336 1337 // Now make a pass over the debug information replacing any references 1338 // to the allocated object with "sobj" 1339 Node* ccpp = alloc->result_cast(); 1340 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 1341 1342 // Register the scalarized object as a candidate for reallocation 1343 smerge->add_req(sobj); 1344 1345 // Scalarize inline types that were added to the safepoint. 1346 // Don't allow linking a constant oop (if available) for flat array elements 1347 // because Deoptimization::reassign_flat_array_elements needs field values. 1348 const bool allow_oop = !merge_t->is_flat(); 1349 for (uint j = 0; j < value_worklist.size(); ++j) { 1350 InlineTypeNode* vt = value_worklist.at(j)->as_InlineType(); 1351 vt->make_scalar_in_safepoints(_igvn, allow_oop); 1352 } 1353 } 1354 1355 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge" 1356 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn); 1357 1358 // The call to 'replace_edges_in_range' above might have removed the 1359 // reference to ophi that we need at _merge_pointer_idx. The line below make 1360 // sure the reference is maintained. 1361 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer); 1362 _igvn->_worklist.push(sfpt); 1363 } 1364 1365 return true; 1366 } 1367 1368 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node*> &alloc_worklist) { 1369 bool delay = _igvn->delay_transform(); 1370 _igvn->set_delay_transform(true); 1371 _igvn->hash_delete(ophi); 1372 1373 // Copying all users first because some will be removed and others won't. 1374 // Ophi also may acquire some new users as part of Cast reduction. 1375 // CastPPs also need to be processed before CmpPs. 1376 Unique_Node_List castpps; 1377 Unique_Node_List others; 1378 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 1379 Node* use = ophi->fast_out(i); 1380 1381 if (use->is_CastPP()) { 1382 castpps.push(use); 1383 } else if (use->is_AddP() || use->is_Cmp()) { 1384 others.push(use); 1385 } else { 1386 // Safepoints to be processed later; other users aren't expected here 1387 assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt()); 1388 } 1389 } 1390 1391 _compile->print_method(PHASE_EA_BEFORE_PHI_REDUCTION, 5, ophi); 1392 1393 // CastPPs need to be processed before Cmps because during the process of 1394 // splitting CastPPs we make reference to the inputs of the Cmp that is used 1395 // by the If controlling the CastPP. 1396 for (uint i = 0; i < castpps.size(); i++) { 1397 reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist); 1398 _compile->print_method(PHASE_EA_AFTER_PHI_CASTPP_REDUCTION, 6, castpps.at(i)); 1399 } 1400 1401 for (uint i = 0; i < others.size(); i++) { 1402 Node* use = others.at(i); 1403 1404 if (use->is_AddP()) { 1405 reduce_phi_on_field_access(use, alloc_worklist); 1406 _compile->print_method(PHASE_EA_AFTER_PHI_ADDP_REDUCTION, 6, use); 1407 } else if(use->is_Cmp()) { 1408 reduce_phi_on_cmp(use); 1409 _compile->print_method(PHASE_EA_AFTER_PHI_CMP_REDUCTION, 6, use); 1410 } 1411 } 1412 1413 _igvn->set_delay_transform(delay); 1414 } 1415 1416 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) { 1417 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 1418 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 1419 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1420 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 1421 1422 for (uint i = 1; i < ophi->req(); i++) { 1423 Node* base = ophi->in(i); 1424 JavaObjectNode* ptn = unique_java_object(base); 1425 1426 if (ptn != nullptr && ptn->scalar_replaceable()) { 1427 new_phi->set_req(i, null_ptr); 1428 } else { 1429 new_phi->set_req(i, ophi->in(i)); 1430 } 1431 } 1432 1433 for (int i = ophi->outcnt()-1; i >= 0;) { 1434 Node* out = ophi->raw_out(i); 1435 1436 if (out->is_ConstraintCast()) { 1437 const Type* out_t = _igvn->type(out)->make_ptr(); 1438 const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR); 1439 bool change = out_new_t != out_t; 1440 1441 for (int j = out->outcnt()-1; change && j >= 0; --j) { 1442 Node* out2 = out->raw_out(j); 1443 if (!out2->is_SafePoint()) { 1444 change = false; 1445 break; 1446 } 1447 } 1448 1449 if (change) { 1450 Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::DependencyType::NonFloatingNarrowing, nullptr); 1451 _igvn->replace_node(out, new_cast); 1452 _igvn->register_new_node_with_optimizer(new_cast); 1453 } 1454 } 1455 1456 --i; 1457 i = MIN2(i, (int)ophi->outcnt()-1); 1458 } 1459 1460 _igvn->replace_node(ophi, new_phi); 1461 } 1462 1463 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 1464 if (!C->do_reduce_allocation_merges()) return; 1465 1466 Unique_Node_List ideal_nodes; 1467 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 1468 ideal_nodes.push(root); 1469 1470 for (uint next = 0; next < ideal_nodes.size(); ++next) { 1471 Node* n = ideal_nodes.at(next); 1472 1473 if (n->is_SafePointScalarMerge()) { 1474 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 1475 1476 // Validate inputs of merge 1477 for (uint i = 1; i < merge->req(); i++) { 1478 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 1479 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 1480 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1481 } 1482 } 1483 1484 // Validate users of merge 1485 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 1486 Node* sfpt = merge->fast_out(i); 1487 if (sfpt->is_SafePoint()) { 1488 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 1489 1490 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 1491 assert(false, "SafePointScalarMerge nodes can't be nested."); 1492 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1493 } 1494 } else { 1495 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 1496 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1497 } 1498 } 1499 } 1500 1501 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1502 Node* m = n->fast_out(i); 1503 ideal_nodes.push(m); 1504 } 1505 } 1506 } 1507 1508 // Returns true if there is an object in the scope of sfn that does not escape globally. 1509 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 1510 Compile* C = _compile; 1511 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1512 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 1513 DeoptimizeObjectsALot) { 1514 // Jvmti agents can access locals. Must provide info about local objects at runtime. 1515 int num_locs = jvms->loc_size(); 1516 for (int idx = 0; idx < num_locs; idx++) { 1517 Node* l = sfn->local(jvms, idx); 1518 if (not_global_escape(l)) { 1519 return true; 1520 } 1521 } 1522 } 1523 if (C->env()->jvmti_can_get_owned_monitor_info() || 1524 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 1525 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 1526 int num_mon = jvms->nof_monitors(); 1527 for (int idx = 0; idx < num_mon; idx++) { 1528 Node* m = sfn->monitor_obj(jvms, idx); 1529 if (m != nullptr && not_global_escape(m)) { 1530 return true; 1531 } 1532 } 1533 } 1534 } 1535 return false; 1536 } 1537 1538 // Returns true if at least one of the arguments to the call is an object 1539 // that does not escape globally. 1540 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 1541 if (call->method() != nullptr) { 1542 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 1543 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 1544 Node* p = call->in(idx); 1545 if (not_global_escape(p)) { 1546 return true; 1547 } 1548 } 1549 } else { 1550 const char* name = call->as_CallStaticJava()->_name; 1551 assert(name != nullptr, "no name"); 1552 // no arg escapes through uncommon traps 1553 if (strcmp(name, "uncommon_trap") != 0) { 1554 // process_call_arguments() assumes that all arguments escape globally 1555 const TypeTuple* d = call->tf()->domain_sig(); 1556 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1557 const Type* at = d->field_at(i); 1558 if (at->isa_oopptr() != nullptr) { 1559 return true; 1560 } 1561 } 1562 } 1563 } 1564 return false; 1565 } 1566 1567 1568 1569 // Utility function for nodes that load an object 1570 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1571 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1572 // ThreadLocal has RawPtr type. 1573 const Type* t = _igvn->type(n); 1574 if (t->make_ptr() != nullptr) { 1575 Node* adr = n->in(MemNode::Address); 1576 #ifdef ASSERT 1577 if (!adr->is_AddP()) { 1578 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 1579 } else { 1580 assert((ptnode_adr(adr->_idx) == nullptr || 1581 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 1582 } 1583 #endif 1584 add_local_var_and_edge(n, PointsToNode::NoEscape, 1585 adr, delayed_worklist); 1586 } 1587 } 1588 1589 // Populate Connection Graph with PointsTo nodes and create simple 1590 // connection graph edges. 1591 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1592 assert(!_verify, "this method should not be called for verification"); 1593 PhaseGVN* igvn = _igvn; 1594 uint n_idx = n->_idx; 1595 PointsToNode* n_ptn = ptnode_adr(n_idx); 1596 if (n_ptn != nullptr) { 1597 return; // No need to redefine PointsTo node during first iteration. 1598 } 1599 int opcode = n->Opcode(); 1600 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 1601 if (gc_handled) { 1602 return; // Ignore node if already handled by GC. 1603 } 1604 1605 if (n->is_Call()) { 1606 // Arguments to allocation and locking don't escape. 1607 if (n->is_AbstractLock()) { 1608 // Put Lock and Unlock nodes on IGVN worklist to process them during 1609 // first IGVN optimization when escape information is still available. 1610 record_for_optimizer(n); 1611 } else if (n->is_Allocate()) { 1612 add_call_node(n->as_Call()); 1613 record_for_optimizer(n); 1614 } else { 1615 if (n->is_CallStaticJava()) { 1616 const char* name = n->as_CallStaticJava()->_name; 1617 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 1618 return; // Skip uncommon traps 1619 } 1620 } 1621 // Don't mark as processed since call's arguments have to be processed. 1622 delayed_worklist->push(n); 1623 // Check if a call returns an object. 1624 if ((n->as_Call()->returns_pointer() && 1625 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 1626 (n->is_CallStaticJava() && 1627 n->as_CallStaticJava()->is_boxing_method())) { 1628 add_call_node(n->as_Call()); 1629 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) { 1630 bool returns_oop = false; 1631 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 1632 ProjNode* pn = n->fast_out(i)->as_Proj(); 1633 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 1634 returns_oop = true; 1635 } 1636 } 1637 if (returns_oop) { 1638 add_call_node(n->as_Call()); 1639 } 1640 } 1641 } 1642 return; 1643 } 1644 // Put this check here to process call arguments since some call nodes 1645 // point to phantom_obj. 1646 if (n_ptn == phantom_obj || n_ptn == null_obj) { 1647 return; // Skip predefined nodes. 1648 } 1649 switch (opcode) { 1650 case Op_AddP: { 1651 Node* base = get_addp_base(n); 1652 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1653 // Field nodes are created for all field types. They are used in 1654 // adjust_scalar_replaceable_state() and split_unique_types(). 1655 // Note, non-oop fields will have only base edges in Connection 1656 // Graph because such fields are not used for oop loads and stores. 1657 int offset = address_offset(n, igvn); 1658 add_field(n, PointsToNode::NoEscape, offset); 1659 if (ptn_base == nullptr) { 1660 delayed_worklist->push(n); // Process it later. 1661 } else { 1662 n_ptn = ptnode_adr(n_idx); 1663 add_base(n_ptn->as_Field(), ptn_base); 1664 } 1665 break; 1666 } 1667 case Op_CastX2P: 1668 case Op_CastI2N: { 1669 map_ideal_node(n, phantom_obj); 1670 break; 1671 } 1672 case Op_InlineType: 1673 case Op_CastPP: 1674 case Op_CheckCastPP: 1675 case Op_EncodeP: 1676 case Op_DecodeN: 1677 case Op_EncodePKlass: 1678 case Op_DecodeNKlass: { 1679 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1680 break; 1681 } 1682 case Op_CMoveP: { 1683 add_local_var(n, PointsToNode::NoEscape); 1684 // Do not add edges during first iteration because some could be 1685 // not defined yet. 1686 delayed_worklist->push(n); 1687 break; 1688 } 1689 case Op_ConP: 1690 case Op_ConN: 1691 case Op_ConNKlass: { 1692 // assume all oop constants globally escape except for null 1693 PointsToNode::EscapeState es; 1694 const Type* t = igvn->type(n); 1695 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 1696 es = PointsToNode::NoEscape; 1697 } else { 1698 es = PointsToNode::GlobalEscape; 1699 } 1700 PointsToNode* ptn_con = add_java_object(n, es); 1701 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 1702 break; 1703 } 1704 case Op_CreateEx: { 1705 // assume that all exception objects globally escape 1706 map_ideal_node(n, phantom_obj); 1707 break; 1708 } 1709 case Op_LoadKlass: 1710 case Op_LoadNKlass: { 1711 // Unknown class is loaded 1712 map_ideal_node(n, phantom_obj); 1713 break; 1714 } 1715 case Op_LoadP: 1716 case Op_LoadN: { 1717 add_objload_to_connection_graph(n, delayed_worklist); 1718 break; 1719 } 1720 case Op_Parm: { 1721 map_ideal_node(n, phantom_obj); 1722 break; 1723 } 1724 case Op_PartialSubtypeCheck: { 1725 // Produces Null or notNull and is used in only in CmpP so 1726 // phantom_obj could be used. 1727 map_ideal_node(n, phantom_obj); // Result is unknown 1728 break; 1729 } 1730 case Op_Phi: { 1731 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1732 // ThreadLocal has RawPtr type. 1733 const Type* t = n->as_Phi()->type(); 1734 if (t->make_ptr() != nullptr) { 1735 add_local_var(n, PointsToNode::NoEscape); 1736 // Do not add edges during first iteration because some could be 1737 // not defined yet. 1738 delayed_worklist->push(n); 1739 } 1740 break; 1741 } 1742 case Op_LoadFlat: 1743 // Treat LoadFlat similar to an unknown call that receives nothing and produces its results 1744 map_ideal_node(n, phantom_obj); 1745 break; 1746 case Op_StoreFlat: 1747 // Treat StoreFlat similar to a call that escapes the stored flattened fields 1748 delayed_worklist->push(n); 1749 break; 1750 case Op_Proj: { 1751 // we are only interested in the oop result projection from a call 1752 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 1753 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 1754 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1755 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1756 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1757 } else if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_LoadFlat() && igvn->type(n)->isa_ptr()) { 1758 // Treat LoadFlat outputs similar to a call return value 1759 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1760 } 1761 break; 1762 } 1763 case Op_Rethrow: // Exception object escapes 1764 case Op_Return: { 1765 if (n->req() > TypeFunc::Parms && 1766 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1767 // Treat Return value as LocalVar with GlobalEscape escape state. 1768 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1769 } 1770 break; 1771 } 1772 case Op_CompareAndExchangeP: 1773 case Op_CompareAndExchangeN: 1774 case Op_GetAndSetP: 1775 case Op_GetAndSetN: { 1776 add_objload_to_connection_graph(n, delayed_worklist); 1777 // fall-through 1778 } 1779 case Op_StoreP: 1780 case Op_StoreN: 1781 case Op_StoreNKlass: 1782 case Op_WeakCompareAndSwapP: 1783 case Op_WeakCompareAndSwapN: 1784 case Op_CompareAndSwapP: 1785 case Op_CompareAndSwapN: { 1786 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1787 break; 1788 } 1789 case Op_AryEq: 1790 case Op_CountPositives: 1791 case Op_StrComp: 1792 case Op_StrEquals: 1793 case Op_StrIndexOf: 1794 case Op_StrIndexOfChar: 1795 case Op_StrInflatedCopy: 1796 case Op_StrCompressedCopy: 1797 case Op_VectorizedHashCode: 1798 case Op_EncodeISOArray: { 1799 add_local_var(n, PointsToNode::ArgEscape); 1800 delayed_worklist->push(n); // Process it later. 1801 break; 1802 } 1803 case Op_ThreadLocal: { 1804 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1805 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1806 break; 1807 } 1808 case Op_Blackhole: { 1809 // All blackhole pointer arguments are globally escaping. 1810 // Only do this if there is at least one pointer argument. 1811 // Do not add edges during first iteration because some could be 1812 // not defined yet, defer to final step. 1813 for (uint i = 0; i < n->req(); i++) { 1814 Node* in = n->in(i); 1815 if (in != nullptr) { 1816 const Type* at = _igvn->type(in); 1817 if (!at->isa_ptr()) continue; 1818 1819 add_local_var(n, PointsToNode::GlobalEscape); 1820 delayed_worklist->push(n); 1821 break; 1822 } 1823 } 1824 break; 1825 } 1826 default: 1827 ; // Do nothing for nodes not related to EA. 1828 } 1829 return; 1830 } 1831 1832 // Add final simple edges to graph. 1833 void ConnectionGraph::add_final_edges(Node *n) { 1834 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1835 #ifdef ASSERT 1836 if (_verify && n_ptn->is_JavaObject()) 1837 return; // This method does not change graph for JavaObject. 1838 #endif 1839 1840 if (n->is_Call()) { 1841 process_call_arguments(n->as_Call()); 1842 return; 1843 } 1844 assert(n->is_Store() || n->is_LoadStore() || n->is_StoreFlat() || 1845 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)), 1846 "node should be registered already"); 1847 int opcode = n->Opcode(); 1848 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1849 if (gc_handled) { 1850 return; // Ignore node if already handled by GC. 1851 } 1852 switch (opcode) { 1853 case Op_AddP: { 1854 Node* base = get_addp_base(n); 1855 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1856 assert(ptn_base != nullptr, "field's base should be registered"); 1857 add_base(n_ptn->as_Field(), ptn_base); 1858 break; 1859 } 1860 case Op_InlineType: 1861 case Op_CastPP: 1862 case Op_CheckCastPP: 1863 case Op_EncodeP: 1864 case Op_DecodeN: 1865 case Op_EncodePKlass: 1866 case Op_DecodeNKlass: { 1867 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1868 break; 1869 } 1870 case Op_CMoveP: { 1871 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1872 Node* in = n->in(i); 1873 if (in == nullptr) { 1874 continue; // ignore null 1875 } 1876 Node* uncast_in = in->uncast(); 1877 if (uncast_in->is_top() || uncast_in == n) { 1878 continue; // ignore top or inputs which go back this node 1879 } 1880 PointsToNode* ptn = ptnode_adr(in->_idx); 1881 assert(ptn != nullptr, "node should be registered"); 1882 add_edge(n_ptn, ptn); 1883 } 1884 break; 1885 } 1886 case Op_LoadP: 1887 case Op_LoadN: { 1888 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1889 // ThreadLocal has RawPtr type. 1890 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1891 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1892 break; 1893 } 1894 case Op_Phi: { 1895 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1896 // ThreadLocal has RawPtr type. 1897 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1898 for (uint i = 1; i < n->req(); i++) { 1899 Node* in = n->in(i); 1900 if (in == nullptr) { 1901 continue; // ignore null 1902 } 1903 Node* uncast_in = in->uncast(); 1904 if (uncast_in->is_top() || uncast_in == n) { 1905 continue; // ignore top or inputs which go back this node 1906 } 1907 PointsToNode* ptn = ptnode_adr(in->_idx); 1908 assert(ptn != nullptr, "node should be registered"); 1909 add_edge(n_ptn, ptn); 1910 } 1911 break; 1912 } 1913 case Op_StoreFlat: { 1914 // StoreFlat globally escapes its stored flattened fields 1915 InlineTypeNode* value = n->as_StoreFlat()->value(); 1916 ciInlineKlass* vk = _igvn->type(value)->inline_klass(); 1917 for (int i = 0; i < vk->nof_nonstatic_fields(); i++) { 1918 ciField* field = vk->nonstatic_field_at(i); 1919 if (field->type()->is_primitive_type()) { 1920 continue; 1921 } 1922 1923 Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true); 1924 PointsToNode* field_value_ptn = ptnode_adr(field_value->_idx); 1925 set_escape_state(field_value_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "store into a flat field")); 1926 } 1927 break; 1928 } 1929 case Op_Proj: { 1930 if (n->in(0)->is_Call()) { 1931 // we are only interested in the oop result projection from a call 1932 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1933 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1934 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1935 } else if (n->in(0)->is_LoadFlat()) { 1936 // Treat LoadFlat outputs similar to a call return value 1937 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1938 } 1939 break; 1940 } 1941 case Op_Rethrow: // Exception object escapes 1942 case Op_Return: { 1943 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1944 "Unexpected node type"); 1945 // Treat Return value as LocalVar with GlobalEscape escape state. 1946 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1947 break; 1948 } 1949 case Op_CompareAndExchangeP: 1950 case Op_CompareAndExchangeN: 1951 case Op_GetAndSetP: 1952 case Op_GetAndSetN:{ 1953 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1954 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1955 // fall-through 1956 } 1957 case Op_CompareAndSwapP: 1958 case Op_CompareAndSwapN: 1959 case Op_WeakCompareAndSwapP: 1960 case Op_WeakCompareAndSwapN: 1961 case Op_StoreP: 1962 case Op_StoreN: 1963 case Op_StoreNKlass:{ 1964 add_final_edges_unsafe_access(n, opcode); 1965 break; 1966 } 1967 case Op_VectorizedHashCode: 1968 case Op_AryEq: 1969 case Op_CountPositives: 1970 case Op_StrComp: 1971 case Op_StrEquals: 1972 case Op_StrIndexOf: 1973 case Op_StrIndexOfChar: 1974 case Op_StrInflatedCopy: 1975 case Op_StrCompressedCopy: 1976 case Op_EncodeISOArray: { 1977 // char[]/byte[] arrays passed to string intrinsic do not escape but 1978 // they are not scalar replaceable. Adjust escape state for them. 1979 // Start from in(2) edge since in(1) is memory edge. 1980 for (uint i = 2; i < n->req(); i++) { 1981 Node* adr = n->in(i); 1982 const Type* at = _igvn->type(adr); 1983 if (!adr->is_top() && at->isa_ptr()) { 1984 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1985 at->isa_ptr() != nullptr, "expecting a pointer"); 1986 if (adr->is_AddP()) { 1987 adr = get_addp_base(adr); 1988 } 1989 PointsToNode* ptn = ptnode_adr(adr->_idx); 1990 assert(ptn != nullptr, "node should be registered"); 1991 add_edge(n_ptn, ptn); 1992 } 1993 } 1994 break; 1995 } 1996 case Op_Blackhole: { 1997 // All blackhole pointer arguments are globally escaping. 1998 for (uint i = 0; i < n->req(); i++) { 1999 Node* in = n->in(i); 2000 if (in != nullptr) { 2001 const Type* at = _igvn->type(in); 2002 if (!at->isa_ptr()) continue; 2003 2004 if (in->is_AddP()) { 2005 in = get_addp_base(in); 2006 } 2007 2008 PointsToNode* ptn = ptnode_adr(in->_idx); 2009 assert(ptn != nullptr, "should be defined already"); 2010 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 2011 add_edge(n_ptn, ptn); 2012 } 2013 } 2014 break; 2015 } 2016 default: { 2017 // This method should be called only for EA specific nodes which may 2018 // miss some edges when they were created. 2019 #ifdef ASSERT 2020 n->dump(1); 2021 #endif 2022 guarantee(false, "unknown node"); 2023 } 2024 } 2025 return; 2026 } 2027 2028 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 2029 Node* adr = n->in(MemNode::Address); 2030 const Type* adr_type = _igvn->type(adr); 2031 adr_type = adr_type->make_ptr(); 2032 if (adr_type == nullptr) { 2033 return; // skip dead nodes 2034 } 2035 if (adr_type->isa_oopptr() 2036 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 2037 && adr_type == TypeRawPtr::NOTNULL 2038 && is_captured_store_address(adr))) { 2039 delayed_worklist->push(n); // Process it later. 2040 #ifdef ASSERT 2041 assert (adr->is_AddP(), "expecting an AddP"); 2042 if (adr_type == TypeRawPtr::NOTNULL) { 2043 // Verify a raw address for a store captured by Initialize node. 2044 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2045 assert(offs != Type::OffsetBot, "offset must be a constant"); 2046 } 2047 #endif 2048 } else { 2049 // Ignore copy the displaced header to the BoxNode (OSR compilation). 2050 if (adr->is_BoxLock()) { 2051 return; 2052 } 2053 // Stored value escapes in unsafe access. 2054 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 2055 delayed_worklist->push(n); // Process unsafe access later. 2056 return; 2057 } 2058 #ifdef ASSERT 2059 n->dump(1); 2060 assert(false, "not unsafe"); 2061 #endif 2062 } 2063 } 2064 2065 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 2066 Node* adr = n->in(MemNode::Address); 2067 const Type *adr_type = _igvn->type(adr); 2068 adr_type = adr_type->make_ptr(); 2069 #ifdef ASSERT 2070 if (adr_type == nullptr) { 2071 n->dump(1); 2072 assert(adr_type != nullptr, "dead node should not be on list"); 2073 return true; 2074 } 2075 #endif 2076 2077 if (adr_type->isa_oopptr() 2078 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 2079 && adr_type == TypeRawPtr::NOTNULL 2080 && is_captured_store_address(adr))) { 2081 // Point Address to Value 2082 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 2083 assert(adr_ptn != nullptr && 2084 adr_ptn->as_Field()->is_oop(), "node should be registered"); 2085 Node* val = n->in(MemNode::ValueIn); 2086 PointsToNode* ptn = ptnode_adr(val->_idx); 2087 assert(ptn != nullptr, "node should be registered"); 2088 add_edge(adr_ptn, ptn); 2089 return true; 2090 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 2091 // Stored value escapes in unsafe access. 2092 Node* val = n->in(MemNode::ValueIn); 2093 PointsToNode* ptn = ptnode_adr(val->_idx); 2094 assert(ptn != nullptr, "node should be registered"); 2095 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 2096 // Add edge to object for unsafe access with offset. 2097 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 2098 assert(adr_ptn != nullptr, "node should be registered"); 2099 if (adr_ptn->is_Field()) { 2100 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 2101 add_edge(adr_ptn, ptn); 2102 } 2103 return true; 2104 } 2105 #ifdef ASSERT 2106 n->dump(1); 2107 assert(false, "not unsafe"); 2108 #endif 2109 return false; 2110 } 2111 2112 // Iterate over the domains for the scalarized and non scalarized calling conventions: Only move to the next element 2113 // in the non scalarized calling convention once all elements of the scalarized calling convention for that parameter 2114 // have been iterated over. So (ignoring hidden arguments such as the null marker) iterating over: 2115 // value class MyValue { 2116 // int f1; 2117 // float f2; 2118 // } 2119 // void m(Object o, MyValue v, int i) 2120 // produces the pairs: 2121 // (Object, Object), (Myvalue, int), (MyValue, float), (int, int) 2122 class DomainIterator : public StackObj { 2123 private: 2124 const TypeTuple* _domain; 2125 const TypeTuple* _domain_cc; 2126 const GrowableArray<SigEntry>* _sig_cc; 2127 2128 uint _i_domain; 2129 uint _i_domain_cc; 2130 int _i_sig_cc; 2131 uint _depth; 2132 2133 void next_helper() { 2134 if (_sig_cc == nullptr) { 2135 return; 2136 } 2137 BasicType prev_bt = _i_sig_cc > 0 ? _sig_cc->at(_i_sig_cc-1)._bt : T_ILLEGAL; 2138 while (_i_sig_cc < _sig_cc->length()) { 2139 BasicType bt = _sig_cc->at(_i_sig_cc)._bt; 2140 assert(bt != T_VOID || _sig_cc->at(_i_sig_cc-1)._bt == prev_bt, ""); 2141 if (bt == T_METADATA) { 2142 _depth++; 2143 } else if (bt == T_VOID && (prev_bt != T_LONG && prev_bt != T_DOUBLE)) { 2144 _depth--; 2145 if (_depth == 0) { 2146 _i_domain++; 2147 } 2148 } else { 2149 return; 2150 } 2151 prev_bt = bt; 2152 _i_sig_cc++; 2153 } 2154 } 2155 2156 public: 2157 2158 DomainIterator(CallJavaNode* call) : 2159 _domain(call->tf()->domain_sig()), 2160 _domain_cc(call->tf()->domain_cc()), 2161 _sig_cc(call->method()->get_sig_cc()), 2162 _i_domain(TypeFunc::Parms), 2163 _i_domain_cc(TypeFunc::Parms), 2164 _i_sig_cc(0), 2165 _depth(0) { 2166 next_helper(); 2167 } 2168 2169 bool has_next() const { 2170 assert(_sig_cc == nullptr || (_i_sig_cc < _sig_cc->length()) == (_i_domain < _domain->cnt()), "should reach end in sync"); 2171 assert((_i_domain < _domain->cnt()) == (_i_domain_cc < _domain_cc->cnt()), "should reach end in sync"); 2172 return _i_domain < _domain->cnt(); 2173 } 2174 2175 void next() { 2176 assert(_depth != 0 || _domain->field_at(_i_domain) == _domain_cc->field_at(_i_domain_cc), "should produce same non scalarized elements"); 2177 _i_sig_cc++; 2178 if (_depth == 0) { 2179 _i_domain++; 2180 } 2181 _i_domain_cc++; 2182 next_helper(); 2183 } 2184 2185 uint i_domain() const { 2186 return _i_domain; 2187 } 2188 2189 uint i_domain_cc() const { 2190 return _i_domain_cc; 2191 } 2192 2193 const Type* current_domain() const { 2194 return _domain->field_at(_i_domain); 2195 } 2196 2197 const Type* current_domain_cc() const { 2198 return _domain_cc->field_at(_i_domain_cc); 2199 } 2200 }; 2201 2202 void ConnectionGraph::add_call_node(CallNode* call) { 2203 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer"); 2204 uint call_idx = call->_idx; 2205 if (call->is_Allocate()) { 2206 Node* k = call->in(AllocateNode::KlassNode); 2207 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 2208 assert(kt != nullptr, "TypeKlassPtr required."); 2209 PointsToNode::EscapeState es = PointsToNode::NoEscape; 2210 bool scalar_replaceable = true; 2211 NOT_PRODUCT(const char* nsr_reason = ""); 2212 if (call->is_AllocateArray()) { 2213 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 2214 es = PointsToNode::GlobalEscape; 2215 } else { 2216 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 2217 if (length < 0) { 2218 // Not scalar replaceable if the length is not constant. 2219 scalar_replaceable = false; 2220 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 2221 } else if (length > EliminateAllocationArraySizeLimit) { 2222 // Not scalar replaceable if the length is too big. 2223 scalar_replaceable = false; 2224 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 2225 } 2226 } 2227 } else { // Allocate instance 2228 if (!kt->isa_instklassptr()) { // StressReflectiveCode 2229 es = PointsToNode::GlobalEscape; 2230 } else { 2231 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 2232 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 2233 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 2234 ik->is_subclass_of(_compile->env()->Reference_klass()) || 2235 !ik->can_be_instantiated() || 2236 ik->has_finalizer()) { 2237 es = PointsToNode::GlobalEscape; 2238 } else { 2239 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 2240 if (nfields > EliminateAllocationFieldsLimit) { 2241 // Not scalar replaceable if there are too many fields. 2242 scalar_replaceable = false; 2243 NOT_PRODUCT(nsr_reason = "has too many fields"); 2244 } 2245 } 2246 } 2247 } 2248 add_java_object(call, es); 2249 PointsToNode* ptn = ptnode_adr(call_idx); 2250 if (!scalar_replaceable && ptn->scalar_replaceable()) { 2251 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 2252 } 2253 } else if (call->is_CallStaticJava()) { 2254 // Call nodes could be different types: 2255 // 2256 // 1. CallDynamicJavaNode (what happened during call is unknown): 2257 // 2258 // - mapped to GlobalEscape JavaObject node if oop is returned; 2259 // 2260 // - all oop arguments are escaping globally; 2261 // 2262 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 2263 // 2264 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 2265 // 2266 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 2267 // - mapped to NoEscape JavaObject node if non-escaping object allocated 2268 // during call is returned; 2269 // - mapped to ArgEscape LocalVar node pointed to object arguments 2270 // which are returned and does not escape during call; 2271 // 2272 // - oop arguments escaping status is defined by bytecode analysis; 2273 // 2274 // For a static call, we know exactly what method is being called. 2275 // Use bytecode estimator to record whether the call's return value escapes. 2276 ciMethod* meth = call->as_CallJava()->method(); 2277 if (meth == nullptr) { 2278 const char* name = call->as_CallStaticJava()->_name; 2279 assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub() || 2280 strncmp(name, "load_unknown_inline", 19) == 0 || 2281 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "TODO: add failed case check"); 2282 // Returns a newly allocated non-escaped object. 2283 add_java_object(call, PointsToNode::NoEscape); 2284 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 2285 } else if (meth->is_boxing_method()) { 2286 // Returns boxing object 2287 PointsToNode::EscapeState es; 2288 vmIntrinsics::ID intr = meth->intrinsic_id(); 2289 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 2290 // It does not escape if object is always allocated. 2291 es = PointsToNode::NoEscape; 2292 } else { 2293 // It escapes globally if object could be loaded from cache. 2294 es = PointsToNode::GlobalEscape; 2295 } 2296 add_java_object(call, es); 2297 if (es == PointsToNode::GlobalEscape) { 2298 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 2299 } 2300 } else { 2301 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 2302 call_analyzer->copy_dependencies(_compile->dependencies()); 2303 if (call_analyzer->is_return_allocated()) { 2304 // Returns a newly allocated non-escaped object, simply 2305 // update dependency information. 2306 // Mark it as NoEscape so that objects referenced by 2307 // it's fields will be marked as NoEscape at least. 2308 add_java_object(call, PointsToNode::NoEscape); 2309 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 2310 } else { 2311 bool ret_arg = false; 2312 // Determine whether any arguments are returned. 2313 for (DomainIterator di(call->as_CallJava()); di.has_next(); di.next()) { 2314 uint arg = di.i_domain() - TypeFunc::Parms; 2315 if (di.current_domain_cc()->isa_ptr() != nullptr && 2316 call_analyzer->is_arg_returned(arg) && 2317 !meth->is_scalarized_arg(arg)) { 2318 ret_arg = true; 2319 break; 2320 } 2321 } 2322 if (ret_arg) { 2323 add_local_var(call, PointsToNode::ArgEscape); 2324 } else { 2325 // Returns unknown object. 2326 map_ideal_node(call, phantom_obj); 2327 } 2328 } 2329 } 2330 } else { 2331 // An other type of call, assume the worst case: 2332 // returned value is unknown and globally escapes. 2333 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 2334 map_ideal_node(call, phantom_obj); 2335 } 2336 } 2337 2338 void ConnectionGraph::process_call_arguments(CallNode *call) { 2339 bool is_arraycopy = false; 2340 switch (call->Opcode()) { 2341 #ifdef ASSERT 2342 case Op_Allocate: 2343 case Op_AllocateArray: 2344 case Op_Lock: 2345 case Op_Unlock: 2346 assert(false, "should be done already"); 2347 break; 2348 #endif 2349 case Op_ArrayCopy: 2350 case Op_CallLeafNoFP: 2351 // Most array copies are ArrayCopy nodes at this point but there 2352 // are still a few direct calls to the copy subroutines (See 2353 // PhaseStringOpts::copy_string()) 2354 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 2355 call->as_CallLeaf()->is_call_to_arraycopystub(); 2356 // fall through 2357 case Op_CallLeafVector: 2358 case Op_CallLeaf: { 2359 // Stub calls, objects do not escape but they are not scale replaceable. 2360 // Adjust escape state for outgoing arguments. 2361 const TypeTuple * d = call->tf()->domain_sig(); 2362 bool src_has_oops = false; 2363 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2364 const Type* at = d->field_at(i); 2365 Node *arg = call->in(i); 2366 if (arg == nullptr) { 2367 continue; 2368 } 2369 const Type *aat = _igvn->type(arg); 2370 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 2371 continue; 2372 } 2373 if (arg->is_AddP()) { 2374 // 2375 // The inline_native_clone() case when the arraycopy stub is called 2376 // after the allocation before Initialize and CheckCastPP nodes. 2377 // Or normal arraycopy for object arrays case. 2378 // 2379 // Set AddP's base (Allocate) as not scalar replaceable since 2380 // pointer to the base (with offset) is passed as argument. 2381 // 2382 arg = get_addp_base(arg); 2383 } 2384 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2385 assert(arg_ptn != nullptr, "should be registered"); 2386 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 2387 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 2388 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 2389 aat->isa_ptr() != nullptr, "expecting an Ptr"); 2390 bool arg_has_oops = aat->isa_oopptr() && 2391 (aat->isa_instptr() || 2392 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) || 2393 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr && 2394 aat->isa_aryptr()->is_flat() && 2395 aat->isa_aryptr()->elem()->inline_klass()->contains_oops())); 2396 if (i == TypeFunc::Parms) { 2397 src_has_oops = arg_has_oops; 2398 } 2399 // 2400 // src or dst could be j.l.Object when other is basic type array: 2401 // 2402 // arraycopy(char[],0,Object*,0,size); 2403 // arraycopy(Object*,0,char[],0,size); 2404 // 2405 // Don't add edges in such cases. 2406 // 2407 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 2408 arg_has_oops && (i > TypeFunc::Parms); 2409 #ifdef ASSERT 2410 if (!(is_arraycopy || 2411 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 2412 (call->as_CallLeaf()->_name != nullptr && 2413 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 2414 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 2415 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 2416 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 2417 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 2418 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 2419 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 2420 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 2421 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 2422 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 2423 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 2424 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 2425 strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 || 2426 strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 || 2427 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 2428 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 2429 strcmp(call->as_CallLeaf()->_name, "kyberNtt") == 0 || 2430 strcmp(call->as_CallLeaf()->_name, "kyberInverseNtt") == 0 || 2431 strcmp(call->as_CallLeaf()->_name, "kyberNttMult") == 0 || 2432 strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_2") == 0 || 2433 strcmp(call->as_CallLeaf()->_name, "kyberAddPoly_3") == 0 || 2434 strcmp(call->as_CallLeaf()->_name, "kyber12To16") == 0 || 2435 strcmp(call->as_CallLeaf()->_name, "kyberBarrettReduce") == 0 || 2436 strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostNtt") == 0 || 2437 strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostInverseNtt") == 0 || 2438 strcmp(call->as_CallLeaf()->_name, "dilithiumNttMult") == 0 || 2439 strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 || 2440 strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 || 2441 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 2442 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 2443 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 2444 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 2445 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 2446 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 2447 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 2448 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 2449 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 2450 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 2451 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 2452 strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 || 2453 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 2454 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 2455 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 2456 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 2457 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 2458 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 2459 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2460 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 || 2461 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 || 2462 strcmp(call->as_CallLeaf()->_name, "store_inline_type_fields_to_buf") == 0 || 2463 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 2464 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 2465 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2466 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 || 2467 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 2468 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 2469 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 || 2470 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0) 2471 ))) { 2472 call->dump(); 2473 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 2474 } 2475 #endif 2476 // Always process arraycopy's destination object since 2477 // we need to add all possible edges to references in 2478 // source object. 2479 if (arg_esc >= PointsToNode::ArgEscape && 2480 !arg_is_arraycopy_dest) { 2481 continue; 2482 } 2483 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 2484 if (call->is_ArrayCopy()) { 2485 ArrayCopyNode* ac = call->as_ArrayCopy(); 2486 if (ac->is_clonebasic() || 2487 ac->is_arraycopy_validated() || 2488 ac->is_copyof_validated() || 2489 ac->is_copyofrange_validated()) { 2490 es = PointsToNode::NoEscape; 2491 } 2492 } 2493 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2494 if (arg_is_arraycopy_dest) { 2495 Node* src = call->in(TypeFunc::Parms); 2496 if (src->is_AddP()) { 2497 src = get_addp_base(src); 2498 } 2499 PointsToNode* src_ptn = ptnode_adr(src->_idx); 2500 assert(src_ptn != nullptr, "should be registered"); 2501 // Special arraycopy edge: 2502 // Only escape state of destination object's fields affects 2503 // escape state of fields in source object. 2504 add_arraycopy(call, es, src_ptn, arg_ptn); 2505 } 2506 } 2507 } 2508 break; 2509 } 2510 case Op_CallStaticJava: { 2511 // For a static call, we know exactly what method is being called. 2512 // Use bytecode estimator to record the call's escape affects 2513 #ifdef ASSERT 2514 const char* name = call->as_CallStaticJava()->_name; 2515 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 2516 #endif 2517 ciMethod* meth = call->as_CallJava()->method(); 2518 if ((meth != nullptr) && meth->is_boxing_method()) { 2519 break; // Boxing methods do not modify any oops. 2520 } 2521 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 2522 // fall-through if not a Java method or no analyzer information 2523 if (call_analyzer != nullptr) { 2524 PointsToNode* call_ptn = ptnode_adr(call->_idx); 2525 for (DomainIterator di(call->as_CallJava()); di.has_next(); di.next()) { 2526 int k = di.i_domain() - TypeFunc::Parms; 2527 const Type* at = di.current_domain_cc(); 2528 Node* arg = call->in(di.i_domain_cc()); 2529 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2530 if (at->isa_ptr() != nullptr && 2531 call_analyzer->is_arg_returned(k) && 2532 !meth->is_scalarized_arg(k)) { 2533 // The call returns arguments. 2534 if (call_ptn != nullptr) { // Is call's result used? 2535 assert(call_ptn->is_LocalVar(), "node should be registered"); 2536 assert(arg_ptn != nullptr, "node should be registered"); 2537 add_edge(call_ptn, arg_ptn); 2538 } 2539 } 2540 if (at->isa_oopptr() != nullptr && 2541 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 2542 if (!call_analyzer->is_arg_stack(k)) { 2543 // The argument global escapes 2544 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2545 } else { 2546 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2547 if (!call_analyzer->is_arg_local(k)) { 2548 // The argument itself doesn't escape, but any fields might 2549 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2550 } 2551 } 2552 } 2553 } 2554 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 2555 // The call returns arguments. 2556 assert(call_ptn->edge_count() > 0, "sanity"); 2557 if (!call_analyzer->is_return_local()) { 2558 // Returns also unknown object. 2559 add_edge(call_ptn, phantom_obj); 2560 } 2561 } 2562 break; 2563 } 2564 } 2565 default: { 2566 // Fall-through here if not a Java method or no analyzer information 2567 // or some other type of call, assume the worst case: all arguments 2568 // globally escape. 2569 const TypeTuple* d = call->tf()->domain_cc(); 2570 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2571 const Type* at = d->field_at(i); 2572 if (at->isa_oopptr() != nullptr) { 2573 Node* arg = call->in(i); 2574 if (arg->is_AddP()) { 2575 arg = get_addp_base(arg); 2576 } 2577 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 2578 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2579 } 2580 } 2581 } 2582 } 2583 } 2584 2585 2586 // Finish Graph construction. 2587 bool ConnectionGraph::complete_connection_graph( 2588 GrowableArray<PointsToNode*>& ptnodes_worklist, 2589 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2590 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2591 GrowableArray<FieldNode*>& oop_fields_worklist) { 2592 // Normally only 1-3 passes needed to build Connection Graph depending 2593 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 2594 // Set limit to 20 to catch situation when something did go wrong and 2595 // bailout Escape Analysis. 2596 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 2597 #define GRAPH_BUILD_ITER_LIMIT 20 2598 2599 // Propagate GlobalEscape and ArgEscape escape states and check that 2600 // we still have non-escaping objects. The method pushs on _worklist 2601 // Field nodes which reference phantom_object. 2602 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2603 return false; // Nothing to do. 2604 } 2605 // Now propagate references to all JavaObject nodes. 2606 int java_objects_length = java_objects_worklist.length(); 2607 elapsedTimer build_time; 2608 build_time.start(); 2609 elapsedTimer time; 2610 bool timeout = false; 2611 int new_edges = 1; 2612 int iterations = 0; 2613 do { 2614 while ((new_edges > 0) && 2615 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 2616 double start_time = time.seconds(); 2617 time.start(); 2618 new_edges = 0; 2619 // Propagate references to phantom_object for nodes pushed on _worklist 2620 // by find_non_escaped_objects() and find_field_value(). 2621 new_edges += add_java_object_edges(phantom_obj, false); 2622 for (int next = 0; next < java_objects_length; ++next) { 2623 JavaObjectNode* ptn = java_objects_worklist.at(next); 2624 new_edges += add_java_object_edges(ptn, true); 2625 2626 #define SAMPLE_SIZE 4 2627 if ((next % SAMPLE_SIZE) == 0) { 2628 // Each 4 iterations calculate how much time it will take 2629 // to complete graph construction. 2630 time.stop(); 2631 // Poll for requests from shutdown mechanism to quiesce compiler 2632 // because Connection graph construction may take long time. 2633 CompileBroker::maybe_block(); 2634 double stop_time = time.seconds(); 2635 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 2636 double time_until_end = time_per_iter * (double)(java_objects_length - next); 2637 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 2638 timeout = true; 2639 break; // Timeout 2640 } 2641 start_time = stop_time; 2642 time.start(); 2643 } 2644 #undef SAMPLE_SIZE 2645 2646 } 2647 if (timeout) break; 2648 if (new_edges > 0) { 2649 // Update escape states on each iteration if graph was updated. 2650 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2651 return false; // Nothing to do. 2652 } 2653 } 2654 time.stop(); 2655 if (time.seconds() >= EscapeAnalysisTimeout) { 2656 timeout = true; 2657 break; 2658 } 2659 _compile->print_method(PHASE_EA_COMPLETE_CONNECTION_GRAPH_ITER, 5); 2660 } 2661 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 2662 time.start(); 2663 // Find fields which have unknown value. 2664 int fields_length = oop_fields_worklist.length(); 2665 for (int next = 0; next < fields_length; next++) { 2666 FieldNode* field = oop_fields_worklist.at(next); 2667 if (field->edge_count() == 0) { 2668 new_edges += find_field_value(field); 2669 // This code may added new edges to phantom_object. 2670 // Need an other cycle to propagate references to phantom_object. 2671 } 2672 } 2673 time.stop(); 2674 if (time.seconds() >= EscapeAnalysisTimeout) { 2675 timeout = true; 2676 break; 2677 } 2678 } else { 2679 new_edges = 0; // Bailout 2680 } 2681 } while (new_edges > 0); 2682 2683 build_time.stop(); 2684 _build_time = build_time.seconds(); 2685 _build_iterations = iterations; 2686 2687 // Bailout if passed limits. 2688 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 2689 Compile* C = _compile; 2690 if (C->log() != nullptr) { 2691 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 2692 C->log()->text("%s", timeout ? "time" : "iterations"); 2693 C->log()->end_elem(" limit'"); 2694 } 2695 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 2696 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 2697 // Possible infinite build_connection_graph loop, 2698 // bailout (no changes to ideal graph were made). 2699 return false; 2700 } 2701 2702 #undef GRAPH_BUILD_ITER_LIMIT 2703 2704 // Find fields initialized by null for non-escaping Allocations. 2705 int non_escaped_length = non_escaped_allocs_worklist.length(); 2706 for (int next = 0; next < non_escaped_length; next++) { 2707 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2708 PointsToNode::EscapeState es = ptn->escape_state(); 2709 assert(es <= PointsToNode::ArgEscape, "sanity"); 2710 if (es == PointsToNode::NoEscape) { 2711 if (find_init_values_null(ptn, _igvn) > 0) { 2712 // Adding references to null object does not change escape states 2713 // since it does not escape. Also no fields are added to null object. 2714 add_java_object_edges(null_obj, false); 2715 } 2716 } 2717 Node* n = ptn->ideal_node(); 2718 if (n->is_Allocate()) { 2719 // The object allocated by this Allocate node will never be 2720 // seen by an other thread. Mark it so that when it is 2721 // expanded no MemBarStoreStore is added. 2722 InitializeNode* ini = n->as_Allocate()->initialization(); 2723 if (ini != nullptr) 2724 ini->set_does_not_escape(); 2725 } 2726 } 2727 return true; // Finished graph construction. 2728 } 2729 2730 // Propagate GlobalEscape and ArgEscape escape states to all nodes 2731 // and check that we still have non-escaping java objects. 2732 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 2733 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2734 bool print_method) { 2735 GrowableArray<PointsToNode*> escape_worklist; 2736 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 2737 int ptnodes_length = ptnodes_worklist.length(); 2738 for (int next = 0; next < ptnodes_length; ++next) { 2739 PointsToNode* ptn = ptnodes_worklist.at(next); 2740 if (ptn->escape_state() >= PointsToNode::ArgEscape || 2741 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 2742 escape_worklist.push(ptn); 2743 } 2744 } 2745 // Set escape states to referenced nodes (edges list). 2746 while (escape_worklist.length() > 0) { 2747 PointsToNode* ptn = escape_worklist.pop(); 2748 PointsToNode::EscapeState es = ptn->escape_state(); 2749 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 2750 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 2751 es >= PointsToNode::ArgEscape) { 2752 // GlobalEscape or ArgEscape state of field means it has unknown value. 2753 if (add_edge(ptn, phantom_obj)) { 2754 // New edge was added 2755 add_field_uses_to_worklist(ptn->as_Field()); 2756 } 2757 } 2758 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2759 PointsToNode* e = i.get(); 2760 if (e->is_Arraycopy()) { 2761 assert(ptn->arraycopy_dst(), "sanity"); 2762 // Propagate only fields escape state through arraycopy edge. 2763 if (e->fields_escape_state() < field_es) { 2764 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2765 escape_worklist.push(e); 2766 } 2767 } else if (es >= field_es) { 2768 // fields_escape_state is also set to 'es' if it is less than 'es'. 2769 if (e->escape_state() < es) { 2770 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2771 escape_worklist.push(e); 2772 } 2773 } else { 2774 // Propagate field escape state. 2775 bool es_changed = false; 2776 if (e->fields_escape_state() < field_es) { 2777 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2778 es_changed = true; 2779 } 2780 if ((e->escape_state() < field_es) && 2781 e->is_Field() && ptn->is_JavaObject() && 2782 e->as_Field()->is_oop()) { 2783 // Change escape state of referenced fields. 2784 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2785 es_changed = true; 2786 } else if (e->escape_state() < es) { 2787 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2788 es_changed = true; 2789 } 2790 if (es_changed) { 2791 escape_worklist.push(e); 2792 } 2793 } 2794 if (print_method) { 2795 _compile->print_method(PHASE_EA_CONNECTION_GRAPH_PROPAGATE_ITER, 6, e->ideal_node()); 2796 } 2797 } 2798 } 2799 // Remove escaped objects from non_escaped list. 2800 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 2801 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2802 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 2803 non_escaped_allocs_worklist.delete_at(next); 2804 } 2805 if (ptn->escape_state() == PointsToNode::NoEscape) { 2806 // Find fields in non-escaped allocations which have unknown value. 2807 find_init_values_phantom(ptn); 2808 } 2809 } 2810 return (non_escaped_allocs_worklist.length() > 0); 2811 } 2812 2813 // Add all references to JavaObject node by walking over all uses. 2814 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 2815 int new_edges = 0; 2816 if (populate_worklist) { 2817 // Populate _worklist by uses of jobj's uses. 2818 for (UseIterator i(jobj); i.has_next(); i.next()) { 2819 PointsToNode* use = i.get(); 2820 if (use->is_Arraycopy()) { 2821 continue; 2822 } 2823 add_uses_to_worklist(use); 2824 if (use->is_Field() && use->as_Field()->is_oop()) { 2825 // Put on worklist all field's uses (loads) and 2826 // related field nodes (same base and offset). 2827 add_field_uses_to_worklist(use->as_Field()); 2828 } 2829 } 2830 } 2831 for (int l = 0; l < _worklist.length(); l++) { 2832 PointsToNode* use = _worklist.at(l); 2833 if (PointsToNode::is_base_use(use)) { 2834 // Add reference from jobj to field and from field to jobj (field's base). 2835 use = PointsToNode::get_use_node(use)->as_Field(); 2836 if (add_base(use->as_Field(), jobj)) { 2837 new_edges++; 2838 } 2839 continue; 2840 } 2841 assert(!use->is_JavaObject(), "sanity"); 2842 if (use->is_Arraycopy()) { 2843 if (jobj == null_obj) { // null object does not have field edges 2844 continue; 2845 } 2846 // Added edge from Arraycopy node to arraycopy's source java object 2847 if (add_edge(use, jobj)) { 2848 jobj->set_arraycopy_src(); 2849 new_edges++; 2850 } 2851 // and stop here. 2852 continue; 2853 } 2854 if (!add_edge(use, jobj)) { 2855 continue; // No new edge added, there was such edge already. 2856 } 2857 new_edges++; 2858 if (use->is_LocalVar()) { 2859 add_uses_to_worklist(use); 2860 if (use->arraycopy_dst()) { 2861 for (EdgeIterator i(use); i.has_next(); i.next()) { 2862 PointsToNode* e = i.get(); 2863 if (e->is_Arraycopy()) { 2864 if (jobj == null_obj) { // null object does not have field edges 2865 continue; 2866 } 2867 // Add edge from arraycopy's destination java object to Arraycopy node. 2868 if (add_edge(jobj, e)) { 2869 new_edges++; 2870 jobj->set_arraycopy_dst(); 2871 } 2872 } 2873 } 2874 } 2875 } else { 2876 // Added new edge to stored in field values. 2877 // Put on worklist all field's uses (loads) and 2878 // related field nodes (same base and offset). 2879 add_field_uses_to_worklist(use->as_Field()); 2880 } 2881 } 2882 _worklist.clear(); 2883 _in_worklist.reset(); 2884 return new_edges; 2885 } 2886 2887 // Put on worklist all related field nodes. 2888 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2889 assert(field->is_oop(), "sanity"); 2890 int offset = field->offset(); 2891 add_uses_to_worklist(field); 2892 // Loop over all bases of this field and push on worklist Field nodes 2893 // with the same offset and base (since they may reference the same field). 2894 for (BaseIterator i(field); i.has_next(); i.next()) { 2895 PointsToNode* base = i.get(); 2896 add_fields_to_worklist(field, base); 2897 // Check if the base was source object of arraycopy and go over arraycopy's 2898 // destination objects since values stored to a field of source object are 2899 // accessible by uses (loads) of fields of destination objects. 2900 if (base->arraycopy_src()) { 2901 for (UseIterator j(base); j.has_next(); j.next()) { 2902 PointsToNode* arycp = j.get(); 2903 if (arycp->is_Arraycopy()) { 2904 for (UseIterator k(arycp); k.has_next(); k.next()) { 2905 PointsToNode* abase = k.get(); 2906 if (abase->arraycopy_dst() && abase != base) { 2907 // Look for the same arraycopy reference. 2908 add_fields_to_worklist(field, abase); 2909 } 2910 } 2911 } 2912 } 2913 } 2914 } 2915 } 2916 2917 // Put on worklist all related field nodes. 2918 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2919 int offset = field->offset(); 2920 if (base->is_LocalVar()) { 2921 for (UseIterator j(base); j.has_next(); j.next()) { 2922 PointsToNode* f = j.get(); 2923 if (PointsToNode::is_base_use(f)) { // Field 2924 f = PointsToNode::get_use_node(f); 2925 if (f == field || !f->as_Field()->is_oop()) { 2926 continue; 2927 } 2928 int offs = f->as_Field()->offset(); 2929 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2930 add_to_worklist(f); 2931 } 2932 } 2933 } 2934 } else { 2935 assert(base->is_JavaObject(), "sanity"); 2936 if (// Skip phantom_object since it is only used to indicate that 2937 // this field's content globally escapes. 2938 (base != phantom_obj) && 2939 // null object node does not have fields. 2940 (base != null_obj)) { 2941 for (EdgeIterator i(base); i.has_next(); i.next()) { 2942 PointsToNode* f = i.get(); 2943 // Skip arraycopy edge since store to destination object field 2944 // does not update value in source object field. 2945 if (f->is_Arraycopy()) { 2946 assert(base->arraycopy_dst(), "sanity"); 2947 continue; 2948 } 2949 if (f == field || !f->as_Field()->is_oop()) { 2950 continue; 2951 } 2952 int offs = f->as_Field()->offset(); 2953 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2954 add_to_worklist(f); 2955 } 2956 } 2957 } 2958 } 2959 } 2960 2961 // Find fields which have unknown value. 2962 int ConnectionGraph::find_field_value(FieldNode* field) { 2963 // Escaped fields should have init value already. 2964 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2965 int new_edges = 0; 2966 for (BaseIterator i(field); i.has_next(); i.next()) { 2967 PointsToNode* base = i.get(); 2968 if (base->is_JavaObject()) { 2969 // Skip Allocate's fields which will be processed later. 2970 if (base->ideal_node()->is_Allocate()) { 2971 return 0; 2972 } 2973 assert(base == null_obj, "only null ptr base expected here"); 2974 } 2975 } 2976 if (add_edge(field, phantom_obj)) { 2977 // New edge was added 2978 new_edges++; 2979 add_field_uses_to_worklist(field); 2980 } 2981 return new_edges; 2982 } 2983 2984 // Find fields initializing values for allocations. 2985 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2986 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2987 PointsToNode* init_val = phantom_obj; 2988 Node* alloc = pta->ideal_node(); 2989 2990 // Do nothing for Allocate nodes since its fields values are 2991 // "known" unless they are initialized by arraycopy/clone. 2992 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2993 if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) { 2994 // Null-free inline type arrays are initialized with an init value instead of null 2995 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx); 2996 assert(init_val != nullptr, "init value should be registered"); 2997 } else { 2998 return 0; 2999 } 3000 } 3001 // Non-escaped allocation returned from Java or runtime call has unknown values in fields. 3002 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity"); 3003 #ifdef ASSERT 3004 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) { 3005 const char* name = alloc->as_CallStaticJava()->_name; 3006 assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub() || 3007 strncmp(name, "load_unknown_inline", 19) == 0 || 3008 strncmp(name, "store_inline_type_fields_to_buf", 31) == 0, "sanity"); 3009 } 3010 #endif 3011 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 3012 int new_edges = 0; 3013 for (EdgeIterator i(pta); i.has_next(); i.next()) { 3014 PointsToNode* field = i.get(); 3015 if (field->is_Field() && field->as_Field()->is_oop()) { 3016 if (add_edge(field, init_val)) { 3017 // New edge was added 3018 new_edges++; 3019 add_field_uses_to_worklist(field->as_Field()); 3020 } 3021 } 3022 } 3023 return new_edges; 3024 } 3025 3026 // Find fields initializing values for allocations. 3027 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 3028 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 3029 Node* alloc = pta->ideal_node(); 3030 // Do nothing for Call nodes since its fields values are unknown. 3031 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) { 3032 return 0; 3033 } 3034 InitializeNode* ini = alloc->as_Allocate()->initialization(); 3035 bool visited_bottom_offset = false; 3036 GrowableArray<int> offsets_worklist; 3037 int new_edges = 0; 3038 3039 // Check if an oop field's initializing value is recorded and add 3040 // a corresponding null if field's value if it is not recorded. 3041 // Connection Graph does not record a default initialization by null 3042 // captured by Initialize node. 3043 // 3044 for (EdgeIterator i(pta); i.has_next(); i.next()) { 3045 PointsToNode* field = i.get(); // Field (AddP) 3046 if (!field->is_Field() || !field->as_Field()->is_oop()) { 3047 continue; // Not oop field 3048 } 3049 int offset = field->as_Field()->offset(); 3050 if (offset == Type::OffsetBot) { 3051 if (!visited_bottom_offset) { 3052 // OffsetBot is used to reference array's element, 3053 // always add reference to null to all Field nodes since we don't 3054 // known which element is referenced. 3055 if (add_edge(field, null_obj)) { 3056 // New edge was added 3057 new_edges++; 3058 add_field_uses_to_worklist(field->as_Field()); 3059 visited_bottom_offset = true; 3060 } 3061 } 3062 } else { 3063 // Check only oop fields. 3064 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 3065 if (adr_type->isa_rawptr()) { 3066 #ifdef ASSERT 3067 // Raw pointers are used for initializing stores so skip it 3068 // since it should be recorded already 3069 Node* base = get_addp_base(field->ideal_node()); 3070 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 3071 #endif 3072 continue; 3073 } 3074 if (!offsets_worklist.contains(offset)) { 3075 offsets_worklist.append(offset); 3076 Node* value = nullptr; 3077 if (ini != nullptr) { 3078 // StoreP::value_basic_type() == T_ADDRESS 3079 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 3080 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 3081 // Make sure initializing store has the same type as this AddP. 3082 // This AddP may reference non existing field because it is on a 3083 // dead branch of bimorphic call which is not eliminated yet. 3084 if (store != nullptr && store->is_Store() && 3085 store->as_Store()->value_basic_type() == ft) { 3086 value = store->in(MemNode::ValueIn); 3087 #ifdef ASSERT 3088 if (VerifyConnectionGraph) { 3089 // Verify that AddP already points to all objects the value points to. 3090 PointsToNode* val = ptnode_adr(value->_idx); 3091 assert((val != nullptr), "should be processed already"); 3092 PointsToNode* missed_obj = nullptr; 3093 if (val->is_JavaObject()) { 3094 if (!field->points_to(val->as_JavaObject())) { 3095 missed_obj = val; 3096 } 3097 } else { 3098 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 3099 tty->print_cr("----------init store has invalid value -----"); 3100 store->dump(); 3101 val->dump(); 3102 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 3103 } 3104 for (EdgeIterator j(val); j.has_next(); j.next()) { 3105 PointsToNode* obj = j.get(); 3106 if (obj->is_JavaObject()) { 3107 if (!field->points_to(obj->as_JavaObject())) { 3108 missed_obj = obj; 3109 break; 3110 } 3111 } 3112 } 3113 } 3114 if (missed_obj != nullptr) { 3115 tty->print_cr("----------field---------------------------------"); 3116 field->dump(); 3117 tty->print_cr("----------missed reference to object------------"); 3118 missed_obj->dump(); 3119 tty->print_cr("----------object referenced by init store-------"); 3120 store->dump(); 3121 val->dump(); 3122 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 3123 } 3124 } 3125 #endif 3126 } else { 3127 // There could be initializing stores which follow allocation. 3128 // For example, a volatile field store is not collected 3129 // by Initialize node. 3130 // 3131 // Need to check for dependent loads to separate such stores from 3132 // stores which follow loads. For now, add initial value null so 3133 // that compare pointers optimization works correctly. 3134 } 3135 } 3136 if (value == nullptr) { 3137 // A field's initializing value was not recorded. Add null. 3138 if (add_edge(field, null_obj)) { 3139 // New edge was added 3140 new_edges++; 3141 add_field_uses_to_worklist(field->as_Field()); 3142 } 3143 } 3144 } 3145 } 3146 } 3147 return new_edges; 3148 } 3149 3150 // Adjust scalar_replaceable state after Connection Graph is built. 3151 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 3152 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 3153 // returns true. If one of the constraints in this method set 'jobj' to NSR 3154 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 3155 // input, 'adjust_scalar_replaceable_state' will eventually be called with 3156 // that other object and the Phi will become a reducible Phi. 3157 // There could be multiple merges involving the same jobj. 3158 Unique_Node_List candidates; 3159 3160 // Search for non-escaping objects which are not scalar replaceable 3161 // and mark them to propagate the state to referenced objects. 3162 3163 for (UseIterator i(jobj); i.has_next(); i.next()) { 3164 PointsToNode* use = i.get(); 3165 if (use->is_Arraycopy()) { 3166 continue; 3167 } 3168 if (use->is_Field()) { 3169 FieldNode* field = use->as_Field(); 3170 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 3171 // 1. An object is not scalar replaceable if the field into which it is 3172 // stored has unknown offset (stored into unknown element of an array). 3173 if (field->offset() == Type::OffsetBot) { 3174 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 3175 return; 3176 } 3177 for (BaseIterator i(field); i.has_next(); i.next()) { 3178 PointsToNode* base = i.get(); 3179 // 2. An object is not scalar replaceable if the field into which it is 3180 // stored has multiple bases one of which is null. 3181 if ((base == null_obj) && (field->base_count() > 1)) { 3182 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 3183 return; 3184 } 3185 // 2.5. An object is not scalar replaceable if the field into which it is 3186 // stored has NSR base. 3187 if (!base->scalar_replaceable()) { 3188 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 3189 return; 3190 } 3191 } 3192 } 3193 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 3194 // 3. An object is not scalar replaceable if it is merged with other objects 3195 // and we can't remove the merge 3196 for (EdgeIterator j(use); j.has_next(); j.next()) { 3197 PointsToNode* ptn = j.get(); 3198 if (ptn->is_JavaObject() && ptn != jobj) { 3199 Node* use_n = use->ideal_node(); 3200 3201 // These other local vars may point to multiple objects through a Phi 3202 // In this case we skip them and see if we can reduce the Phi. 3203 if (use_n->is_CastPP() || use_n->is_CheckCastPP()) { 3204 use_n = use_n->in(1); 3205 } 3206 3207 // If it's already a candidate or confirmed reducible merge we can skip verification 3208 if (candidates.member(use_n) || reducible_merges.member(use_n)) { 3209 continue; 3210 } 3211 3212 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 3213 candidates.push(use_n); 3214 } else { 3215 // Mark all objects as NSR if we can't remove the merge 3216 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 3217 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 3218 } 3219 } 3220 } 3221 if (!jobj->scalar_replaceable()) { 3222 return; 3223 } 3224 } 3225 3226 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 3227 if (j.get()->is_Arraycopy()) { 3228 continue; 3229 } 3230 3231 // Non-escaping object node should point only to field nodes. 3232 FieldNode* field = j.get()->as_Field(); 3233 int offset = field->as_Field()->offset(); 3234 3235 // 4. An object is not scalar replaceable if it has a field with unknown 3236 // offset (array's element is accessed in loop). 3237 if (offset == Type::OffsetBot) { 3238 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 3239 return; 3240 } 3241 // 5. Currently an object is not scalar replaceable if a LoadStore node 3242 // access its field since the field value is unknown after it. 3243 // 3244 Node* n = field->ideal_node(); 3245 3246 // Test for an unsafe access that was parsed as maybe off heap 3247 // (with a CheckCastPP to raw memory). 3248 assert(n->is_AddP(), "expect an address computation"); 3249 if (n->in(AddPNode::Base)->is_top() && 3250 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 3251 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 3252 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 3253 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 3254 return; 3255 } 3256 3257 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3258 Node* u = n->fast_out(i); 3259 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 3260 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 3261 return; 3262 } 3263 } 3264 3265 // 6. Or the address may point to more then one object. This may produce 3266 // the false positive result (set not scalar replaceable) 3267 // since the flow-insensitive escape analysis can't separate 3268 // the case when stores overwrite the field's value from the case 3269 // when stores happened on different control branches. 3270 // 3271 // Note: it will disable scalar replacement in some cases: 3272 // 3273 // Point p[] = new Point[1]; 3274 // p[0] = new Point(); // Will be not scalar replaced 3275 // 3276 // but it will save us from incorrect optimizations in next cases: 3277 // 3278 // Point p[] = new Point[1]; 3279 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 3280 // 3281 if (field->base_count() > 1 && candidates.size() == 0) { 3282 if (has_non_reducible_merge(field, reducible_merges)) { 3283 for (BaseIterator i(field); i.has_next(); i.next()) { 3284 PointsToNode* base = i.get(); 3285 // Don't take into account LocalVar nodes which 3286 // may point to only one object which should be also 3287 // this field's base by now. 3288 if (base->is_JavaObject() && base != jobj) { 3289 // Mark all bases. 3290 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 3291 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 3292 } 3293 } 3294 3295 if (!jobj->scalar_replaceable()) { 3296 return; 3297 } 3298 } 3299 } 3300 } 3301 3302 // The candidate is truly a reducible merge only if none of the other 3303 // constraints ruled it as NSR. There could be multiple merges involving the 3304 // same jobj. 3305 assert(jobj->scalar_replaceable(), "sanity"); 3306 for (uint i = 0; i < candidates.size(); i++ ) { 3307 Node* candidate = candidates.at(i); 3308 reducible_merges.push(candidate); 3309 } 3310 } 3311 3312 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) { 3313 for (BaseIterator i(field); i.has_next(); i.next()) { 3314 Node* base = i.get()->ideal_node(); 3315 if (base->is_Phi() && !reducible_merges.member(base)) { 3316 return true; 3317 } 3318 } 3319 return false; 3320 } 3321 3322 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) { 3323 assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function."); 3324 3325 // Look for 'phis' that refer to 'jobj' as the last 3326 // remaining scalar replaceable input. 3327 uint reducible_merges_cnt = reducible_merges.size(); 3328 for (uint i = 0; i < reducible_merges_cnt; i++) { 3329 Node* phi = reducible_merges.at(i); 3330 3331 // This 'Phi' will be a 'good' if it still points to 3332 // at least one scalar replaceable object. Note that 'obj' 3333 // was/should be marked as NSR before calling this function. 3334 bool good_phi = false; 3335 3336 for (uint j = 1; j < phi->req(); j++) { 3337 JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j)); 3338 if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) { 3339 good_phi = true; 3340 break; 3341 } 3342 } 3343 3344 if (!good_phi) { 3345 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);) 3346 reducible_merges.remove(i); 3347 3348 // Decrement the index because the 'remove' call above actually 3349 // moves the last entry of the list to position 'i'. 3350 i--; 3351 3352 reducible_merges_cnt--; 3353 } 3354 } 3355 } 3356 3357 // Propagate NSR (Not scalar replaceable) state. 3358 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) { 3359 int jobj_length = jobj_worklist.length(); 3360 bool found_nsr_alloc = true; 3361 while (found_nsr_alloc) { 3362 found_nsr_alloc = false; 3363 for (int next = 0; next < jobj_length; ++next) { 3364 JavaObjectNode* jobj = jobj_worklist.at(next); 3365 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 3366 PointsToNode* use = i.get(); 3367 if (use->is_Field()) { 3368 FieldNode* field = use->as_Field(); 3369 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 3370 assert(field->offset() != Type::OffsetBot, "sanity"); 3371 for (BaseIterator i(field); i.has_next(); i.next()) { 3372 PointsToNode* base = i.get(); 3373 // An object is not scalar replaceable if the field into which 3374 // it is stored has NSR base. 3375 if ((base != null_obj) && !base->scalar_replaceable()) { 3376 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 3377 // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible, 3378 // because there is no point in reducing a Phi that won't improve the number of SR 3379 // objects. 3380 revisit_reducible_phi_status(jobj, reducible_merges); 3381 found_nsr_alloc = true; 3382 break; 3383 } 3384 } 3385 } else if (use->is_LocalVar()) { 3386 Node* phi = use->ideal_node(); 3387 if (phi->Opcode() == Op_Phi && reducible_merges.member(phi) && !can_reduce_phi(phi->as_Phi())) { 3388 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is merged in a non-reducible phi")); 3389 reducible_merges.yank(phi); 3390 found_nsr_alloc = true; 3391 break; 3392 } 3393 } 3394 _compile->print_method(PHASE_EA_PROPAGATE_NSR_ITER, 5, jobj->ideal_node()); 3395 } 3396 } 3397 } 3398 } 3399 3400 #ifdef ASSERT 3401 void ConnectionGraph::verify_connection_graph( 3402 GrowableArray<PointsToNode*>& ptnodes_worklist, 3403 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 3404 GrowableArray<JavaObjectNode*>& java_objects_worklist, 3405 GrowableArray<Node*>& addp_worklist) { 3406 // Verify that graph is complete - no new edges could be added. 3407 int java_objects_length = java_objects_worklist.length(); 3408 int non_escaped_length = non_escaped_allocs_worklist.length(); 3409 int new_edges = 0; 3410 for (int next = 0; next < java_objects_length; ++next) { 3411 JavaObjectNode* ptn = java_objects_worklist.at(next); 3412 new_edges += add_java_object_edges(ptn, true); 3413 } 3414 assert(new_edges == 0, "graph was not complete"); 3415 // Verify that escape state is final. 3416 int length = non_escaped_allocs_worklist.length(); 3417 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist, /*print_method=*/ false); 3418 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 3419 (non_escaped_length == length) && 3420 (_worklist.length() == 0), "escape state was not final"); 3421 3422 // Verify fields information. 3423 int addp_length = addp_worklist.length(); 3424 for (int next = 0; next < addp_length; ++next ) { 3425 Node* n = addp_worklist.at(next); 3426 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 3427 if (field->is_oop()) { 3428 // Verify that field has all bases 3429 Node* base = get_addp_base(n); 3430 PointsToNode* ptn = ptnode_adr(base->_idx); 3431 if (ptn->is_JavaObject()) { 3432 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 3433 } else { 3434 assert(ptn->is_LocalVar(), "sanity"); 3435 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3436 PointsToNode* e = i.get(); 3437 if (e->is_JavaObject()) { 3438 assert(field->has_base(e->as_JavaObject()), "sanity"); 3439 } 3440 } 3441 } 3442 // Verify that all fields have initializing values. 3443 if (field->edge_count() == 0) { 3444 tty->print_cr("----------field does not have references----------"); 3445 field->dump(); 3446 for (BaseIterator i(field); i.has_next(); i.next()) { 3447 PointsToNode* base = i.get(); 3448 tty->print_cr("----------field has next base---------------------"); 3449 base->dump(); 3450 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 3451 tty->print_cr("----------base has fields-------------------------"); 3452 for (EdgeIterator j(base); j.has_next(); j.next()) { 3453 j.get()->dump(); 3454 } 3455 tty->print_cr("----------base has references---------------------"); 3456 for (UseIterator j(base); j.has_next(); j.next()) { 3457 j.get()->dump(); 3458 } 3459 } 3460 } 3461 for (UseIterator i(field); i.has_next(); i.next()) { 3462 i.get()->dump(); 3463 } 3464 assert(field->edge_count() > 0, "sanity"); 3465 } 3466 } 3467 } 3468 } 3469 #endif 3470 3471 // Optimize ideal graph. 3472 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 3473 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 3474 Compile* C = _compile; 3475 PhaseIterGVN* igvn = _igvn; 3476 if (EliminateLocks) { 3477 // Mark locks before changing ideal graph. 3478 int cnt = C->macro_count(); 3479 for (int i = 0; i < cnt; i++) { 3480 Node *n = C->macro_node(i); 3481 if (n->is_AbstractLock()) { // Lock and Unlock nodes 3482 AbstractLockNode* alock = n->as_AbstractLock(); 3483 if (!alock->is_non_esc_obj()) { 3484 const Type* obj_type = igvn->type(alock->obj_node()); 3485 if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) { 3486 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 3487 // The lock could be marked eliminated by lock coarsening 3488 // code during first IGVN before EA. Replace coarsened flag 3489 // to eliminate all associated locks/unlocks. 3490 #ifdef ASSERT 3491 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 3492 #endif 3493 alock->set_non_esc_obj(); 3494 } 3495 } 3496 } 3497 } 3498 } 3499 3500 if (OptimizePtrCompare) { 3501 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 3502 Node *n = ptr_cmp_worklist.at(i); 3503 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 3504 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2)); 3505 if (tcmp->singleton()) { 3506 Node* cmp = igvn->makecon(tcmp); 3507 #ifndef PRODUCT 3508 if (PrintOptimizePtrCompare) { 3509 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 3510 if (Verbose) { 3511 n->dump(1); 3512 } 3513 } 3514 #endif 3515 igvn->replace_node(n, cmp); 3516 } 3517 } 3518 } 3519 3520 // For MemBarStoreStore nodes added in library_call.cpp, check 3521 // escape status of associated AllocateNode and optimize out 3522 // MemBarStoreStore node if the allocated object never escapes. 3523 for (int i = 0; i < storestore_worklist.length(); i++) { 3524 Node* storestore = storestore_worklist.at(i); 3525 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 3526 if (alloc->is_Allocate() && not_global_escape(alloc)) { 3527 if (alloc->in(AllocateNode::InlineType) != nullptr) { 3528 // Non-escaping inline type buffer allocations don't require a membar 3529 storestore->as_MemBar()->remove(_igvn); 3530 } else { 3531 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 3532 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 3533 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 3534 igvn->register_new_node_with_optimizer(mb); 3535 igvn->replace_node(storestore, mb); 3536 } 3537 } 3538 } 3539 } 3540 3541 // Atomic flat accesses on non-escaping objects can be optimized to non-atomic accesses 3542 void ConnectionGraph::optimize_flat_accesses(GrowableArray<SafePointNode*>& sfn_worklist) { 3543 PhaseIterGVN& igvn = *_igvn; 3544 bool delay = igvn.delay_transform(); 3545 igvn.set_delay_transform(true); 3546 igvn.C->for_each_flat_access([&](Node* n) { 3547 Node* base = n->is_LoadFlat() ? n->as_LoadFlat()->base() : n->as_StoreFlat()->base(); 3548 if (!not_global_escape(base)) { 3549 return; 3550 } 3551 3552 bool expanded; 3553 if (n->is_LoadFlat()) { 3554 expanded = n->as_LoadFlat()->expand_non_atomic(igvn); 3555 } else { 3556 expanded = n->as_StoreFlat()->expand_non_atomic(igvn); 3557 } 3558 if (expanded) { 3559 sfn_worklist.remove(n->as_SafePoint()); 3560 igvn.C->remove_flat_access(n); 3561 } 3562 }); 3563 igvn.set_delay_transform(delay); 3564 } 3565 3566 // Optimize objects compare. 3567 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) { 3568 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 3569 if (!OptimizePtrCompare) { 3570 return UNKNOWN; 3571 } 3572 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 3573 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 3574 3575 PointsToNode* ptn1 = ptnode_adr(left->_idx); 3576 PointsToNode* ptn2 = ptnode_adr(right->_idx); 3577 JavaObjectNode* jobj1 = unique_java_object(left); 3578 JavaObjectNode* jobj2 = unique_java_object(right); 3579 3580 // The use of this method during allocation merge reduction may cause 'left' 3581 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or 3582 // that doesn't reference an unique java object. 3583 if (ptn1 == nullptr || ptn2 == nullptr || 3584 jobj1 == nullptr || jobj2 == nullptr) { 3585 return UNKNOWN; 3586 } 3587 3588 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 3589 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 3590 3591 // Check simple cases first. 3592 if (jobj1 != nullptr) { 3593 if (jobj1->escape_state() == PointsToNode::NoEscape) { 3594 if (jobj1 == jobj2) { 3595 // Comparing the same not escaping object. 3596 return EQ; 3597 } 3598 Node* obj = jobj1->ideal_node(); 3599 // Comparing not escaping allocation. 3600 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3601 !ptn2->points_to(jobj1)) { 3602 return NE; // This includes nullness check. 3603 } 3604 } 3605 } 3606 if (jobj2 != nullptr) { 3607 if (jobj2->escape_state() == PointsToNode::NoEscape) { 3608 Node* obj = jobj2->ideal_node(); 3609 // Comparing not escaping allocation. 3610 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3611 !ptn1->points_to(jobj2)) { 3612 return NE; // This includes nullness check. 3613 } 3614 } 3615 } 3616 if (jobj1 != nullptr && jobj1 != phantom_obj && 3617 jobj2 != nullptr && jobj2 != phantom_obj && 3618 jobj1->ideal_node()->is_Con() && 3619 jobj2->ideal_node()->is_Con()) { 3620 // Klass or String constants compare. Need to be careful with 3621 // compressed pointers - compare types of ConN and ConP instead of nodes. 3622 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 3623 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 3624 if (t1->make_ptr() == t2->make_ptr()) { 3625 return EQ; 3626 } else { 3627 return NE; 3628 } 3629 } 3630 if (ptn1->meet(ptn2)) { 3631 return UNKNOWN; // Sets are not disjoint 3632 } 3633 3634 // Sets are disjoint. 3635 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 3636 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 3637 bool set1_has_null_ptr = ptn1->points_to(null_obj); 3638 bool set2_has_null_ptr = ptn2->points_to(null_obj); 3639 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 3640 (set2_has_unknown_ptr && set1_has_null_ptr)) { 3641 // Check nullness of unknown object. 3642 return UNKNOWN; 3643 } 3644 3645 // Disjointness by itself is not sufficient since 3646 // alias analysis is not complete for escaped objects. 3647 // Disjoint sets are definitely unrelated only when 3648 // at least one set has only not escaping allocations. 3649 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 3650 if (ptn1->non_escaping_allocation()) { 3651 return NE; 3652 } 3653 } 3654 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 3655 if (ptn2->non_escaping_allocation()) { 3656 return NE; 3657 } 3658 } 3659 return UNKNOWN; 3660 } 3661 3662 // Connection Graph construction functions. 3663 3664 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 3665 PointsToNode* ptadr = _nodes.at(n->_idx); 3666 if (ptadr != nullptr) { 3667 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 3668 return; 3669 } 3670 Compile* C = _compile; 3671 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 3672 map_ideal_node(n, ptadr); 3673 } 3674 3675 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 3676 PointsToNode* ptadr = _nodes.at(n->_idx); 3677 if (ptadr != nullptr) { 3678 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 3679 return ptadr; 3680 } 3681 Compile* C = _compile; 3682 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 3683 map_ideal_node(n, ptadr); 3684 return ptadr; 3685 } 3686 3687 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 3688 PointsToNode* ptadr = _nodes.at(n->_idx); 3689 if (ptadr != nullptr) { 3690 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 3691 return; 3692 } 3693 bool unsafe = false; 3694 bool is_oop = is_oop_field(n, offset, &unsafe); 3695 if (unsafe) { 3696 es = PointsToNode::GlobalEscape; 3697 } 3698 Compile* C = _compile; 3699 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 3700 map_ideal_node(n, field); 3701 } 3702 3703 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 3704 PointsToNode* src, PointsToNode* dst) { 3705 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 3706 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 3707 PointsToNode* ptadr = _nodes.at(n->_idx); 3708 if (ptadr != nullptr) { 3709 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 3710 return; 3711 } 3712 Compile* C = _compile; 3713 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 3714 map_ideal_node(n, ptadr); 3715 // Add edge from arraycopy node to source object. 3716 (void)add_edge(ptadr, src); 3717 src->set_arraycopy_src(); 3718 // Add edge from destination object to arraycopy node. 3719 (void)add_edge(dst, ptadr); 3720 dst->set_arraycopy_dst(); 3721 } 3722 3723 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 3724 const Type* adr_type = n->as_AddP()->bottom_type(); 3725 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 3726 BasicType bt = T_INT; 3727 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 3728 // Check only oop fields. 3729 if (!adr_type->isa_aryptr() || 3730 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 3731 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 3732 // OffsetBot is used to reference array's element. Ignore first AddP. 3733 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 3734 bt = T_OBJECT; 3735 } 3736 } 3737 } else if (offset != oopDesc::klass_offset_in_bytes()) { 3738 if (adr_type->isa_instptr()) { 3739 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 3740 if (field != nullptr) { 3741 bt = field->layout_type(); 3742 } else { 3743 // Check for unsafe oop field access 3744 if (has_oop_node_outs(n)) { 3745 bt = T_OBJECT; 3746 (*unsafe) = true; 3747 } 3748 } 3749 } else if (adr_type->isa_aryptr()) { 3750 if (offset == arrayOopDesc::length_offset_in_bytes()) { 3751 // Ignore array length load. 3752 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 3753 // Ignore first AddP. 3754 } else { 3755 const Type* elemtype = adr_type->is_aryptr()->elem(); 3756 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) { 3757 ciInlineKlass* vk = elemtype->inline_klass(); 3758 field_offset += vk->payload_offset(); 3759 ciField* field = vk->get_field_by_offset(field_offset, false); 3760 if (field != nullptr) { 3761 bt = field->layout_type(); 3762 } else { 3763 assert(field_offset == vk->payload_offset() + vk->null_marker_offset_in_payload(), "no field or null marker of %s at offset %d", vk->name()->as_utf8(), field_offset); 3764 bt = T_BOOLEAN; 3765 } 3766 } else { 3767 bt = elemtype->array_element_basic_type(); 3768 } 3769 } 3770 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 3771 // Allocation initialization, ThreadLocal field access, unsafe access 3772 if (has_oop_node_outs(n)) { 3773 bt = T_OBJECT; 3774 } 3775 } 3776 } 3777 // Note: T_NARROWOOP is not classed as a real reference type 3778 bool res = (is_reference_type(bt) || bt == T_NARROWOOP); 3779 assert(!has_oop_node_outs(n) || res, "sanity: AddP has oop outs, needs to be treated as oop field"); 3780 return res; 3781 } 3782 3783 bool ConnectionGraph::has_oop_node_outs(Node* n) { 3784 return n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3785 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3786 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3787 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n); 3788 } 3789 3790 // Returns unique pointed java object or null. 3791 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 3792 // If the node was created after the escape computation we can't answer. 3793 uint idx = n->_idx; 3794 if (idx >= nodes_size()) { 3795 return nullptr; 3796 } 3797 PointsToNode* ptn = ptnode_adr(idx); 3798 if (ptn == nullptr) { 3799 return nullptr; 3800 } 3801 if (ptn->is_JavaObject()) { 3802 return ptn->as_JavaObject(); 3803 } 3804 assert(ptn->is_LocalVar(), "sanity"); 3805 // Check all java objects it points to. 3806 JavaObjectNode* jobj = nullptr; 3807 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3808 PointsToNode* e = i.get(); 3809 if (e->is_JavaObject()) { 3810 if (jobj == nullptr) { 3811 jobj = e->as_JavaObject(); 3812 } else if (jobj != e) { 3813 return nullptr; 3814 } 3815 } 3816 } 3817 return jobj; 3818 } 3819 3820 // Return true if this node points only to non-escaping allocations. 3821 bool PointsToNode::non_escaping_allocation() { 3822 if (is_JavaObject()) { 3823 Node* n = ideal_node(); 3824 if (n->is_Allocate() || n->is_CallStaticJava()) { 3825 return (escape_state() == PointsToNode::NoEscape); 3826 } else { 3827 return false; 3828 } 3829 } 3830 assert(is_LocalVar(), "sanity"); 3831 // Check all java objects it points to. 3832 for (EdgeIterator i(this); i.has_next(); i.next()) { 3833 PointsToNode* e = i.get(); 3834 if (e->is_JavaObject()) { 3835 Node* n = e->ideal_node(); 3836 if ((e->escape_state() != PointsToNode::NoEscape) || 3837 !(n->is_Allocate() || n->is_CallStaticJava())) { 3838 return false; 3839 } 3840 } 3841 } 3842 return true; 3843 } 3844 3845 // Return true if we know the node does not escape globally. 3846 bool ConnectionGraph::not_global_escape(Node *n) { 3847 assert(!_collecting, "should not call during graph construction"); 3848 // If the node was created after the escape computation we can't answer. 3849 uint idx = n->_idx; 3850 if (idx >= nodes_size()) { 3851 return false; 3852 } 3853 PointsToNode* ptn = ptnode_adr(idx); 3854 if (ptn == nullptr) { 3855 return false; // not in congraph (e.g. ConI) 3856 } 3857 PointsToNode::EscapeState es = ptn->escape_state(); 3858 // If we have already computed a value, return it. 3859 if (es >= PointsToNode::GlobalEscape) { 3860 return false; 3861 } 3862 if (ptn->is_JavaObject()) { 3863 return true; // (es < PointsToNode::GlobalEscape); 3864 } 3865 assert(ptn->is_LocalVar(), "sanity"); 3866 // Check all java objects it points to. 3867 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3868 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 3869 return false; 3870 } 3871 } 3872 return true; 3873 } 3874 3875 // Return true if locked object does not escape globally 3876 // and locked code region (identified by BoxLockNode) is balanced: 3877 // all compiled code paths have corresponding Lock/Unlock pairs. 3878 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) { 3879 if (alock->is_balanced() && not_global_escape(alock->obj_node())) { 3880 if (EliminateNestedLocks) { 3881 // We can mark whole locking region as Local only when only 3882 // one object is used for locking. 3883 alock->box_node()->as_BoxLock()->set_local(); 3884 } 3885 return true; 3886 } 3887 return false; 3888 } 3889 3890 // Helper functions 3891 3892 // Return true if this node points to specified node or nodes it points to. 3893 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 3894 if (is_JavaObject()) { 3895 return (this == ptn); 3896 } 3897 assert(is_LocalVar() || is_Field(), "sanity"); 3898 for (EdgeIterator i(this); i.has_next(); i.next()) { 3899 if (i.get() == ptn) { 3900 return true; 3901 } 3902 } 3903 return false; 3904 } 3905 3906 // Return true if one node points to an other. 3907 bool PointsToNode::meet(PointsToNode* ptn) { 3908 if (this == ptn) { 3909 return true; 3910 } else if (ptn->is_JavaObject()) { 3911 return this->points_to(ptn->as_JavaObject()); 3912 } else if (this->is_JavaObject()) { 3913 return ptn->points_to(this->as_JavaObject()); 3914 } 3915 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 3916 int ptn_count = ptn->edge_count(); 3917 for (EdgeIterator i(this); i.has_next(); i.next()) { 3918 PointsToNode* this_e = i.get(); 3919 for (int j = 0; j < ptn_count; j++) { 3920 if (this_e == ptn->edge(j)) { 3921 return true; 3922 } 3923 } 3924 } 3925 return false; 3926 } 3927 3928 #ifdef ASSERT 3929 // Return true if bases point to this java object. 3930 bool FieldNode::has_base(JavaObjectNode* jobj) const { 3931 for (BaseIterator i(this); i.has_next(); i.next()) { 3932 if (i.get() == jobj) { 3933 return true; 3934 } 3935 } 3936 return false; 3937 } 3938 #endif 3939 3940 bool ConnectionGraph::is_captured_store_address(Node* addp) { 3941 // Handle simple case first. 3942 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 3943 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 3944 return true; 3945 } else if (addp->in(AddPNode::Address)->is_Phi()) { 3946 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 3947 Node* addp_use = addp->fast_out(i); 3948 if (addp_use->is_Store()) { 3949 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 3950 if (addp_use->fast_out(j)->is_Initialize()) { 3951 return true; 3952 } 3953 } 3954 } 3955 } 3956 } 3957 return false; 3958 } 3959 3960 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 3961 const Type *adr_type = phase->type(adr); 3962 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 3963 // We are computing a raw address for a store captured by an Initialize 3964 // compute an appropriate address type. AddP cases #3 and #5 (see below). 3965 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 3966 assert(offs != Type::OffsetBot || 3967 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 3968 "offset must be a constant or it is initialization of array"); 3969 return offs; 3970 } 3971 return adr_type->is_ptr()->flat_offset(); 3972 } 3973 3974 Node* ConnectionGraph::get_addp_base(Node *addp) { 3975 assert(addp->is_AddP(), "must be AddP"); 3976 // 3977 // AddP cases for Base and Address inputs: 3978 // case #1. Direct object's field reference: 3979 // Allocate 3980 // | 3981 // Proj #5 ( oop result ) 3982 // | 3983 // CheckCastPP (cast to instance type) 3984 // | | 3985 // AddP ( base == address ) 3986 // 3987 // case #2. Indirect object's field reference: 3988 // Phi 3989 // | 3990 // CastPP (cast to instance type) 3991 // | | 3992 // AddP ( base == address ) 3993 // 3994 // case #3. Raw object's field reference for Initialize node. 3995 // Could have an additional Phi merging multiple allocations. 3996 // Allocate 3997 // | 3998 // Proj #5 ( oop result ) 3999 // top | 4000 // \ | 4001 // AddP ( base == top ) 4002 // 4003 // case #4. Array's element reference: 4004 // {CheckCastPP | CastPP} 4005 // | | | 4006 // | AddP ( array's element offset ) 4007 // | | 4008 // AddP ( array's offset ) 4009 // 4010 // case #5. Raw object's field reference for arraycopy stub call: 4011 // The inline_native_clone() case when the arraycopy stub is called 4012 // after the allocation before Initialize and CheckCastPP nodes. 4013 // Allocate 4014 // | 4015 // Proj #5 ( oop result ) 4016 // | | 4017 // AddP ( base == address ) 4018 // 4019 // case #6. Constant Pool, ThreadLocal, CastX2P, Klass, OSR buffer buf or 4020 // Raw object's field reference: 4021 // {ConP, ThreadLocal, CastX2P, raw Load, Parm0} 4022 // top | 4023 // \ | 4024 // AddP ( base == top ) 4025 // 4026 // case #7. Klass's field reference. 4027 // LoadKlass 4028 // | | 4029 // AddP ( base == address ) 4030 // 4031 // case #8. narrow Klass's field reference. 4032 // LoadNKlass 4033 // | 4034 // DecodeN 4035 // | | 4036 // AddP ( base == address ) 4037 // 4038 // case #9. Mixed unsafe access 4039 // {instance} 4040 // | 4041 // CheckCastPP (raw) 4042 // top | 4043 // \ | 4044 // AddP ( base == top ) 4045 // 4046 // case #10. Klass fetched with 4047 // LibraryCallKit::load_*_refined_array_klass() 4048 // which has en extra Phi. 4049 // LoadKlass LoadKlass 4050 // | | 4051 // CastPP CastPP 4052 // \ / 4053 // Phi 4054 // top | 4055 // \ | 4056 // AddP ( base == top ) 4057 // 4058 Node *base = addp->in(AddPNode::Base); 4059 if (base->uncast()->is_top()) { // The AddP case #3, #6, #9, and #10. 4060 base = addp->in(AddPNode::Address); 4061 while (base->is_AddP()) { 4062 // Case #6 (unsafe access) may have several chained AddP nodes. 4063 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 4064 base = base->in(AddPNode::Address); 4065 } 4066 if (base->Opcode() == Op_CheckCastPP && 4067 base->bottom_type()->isa_rawptr() && 4068 _igvn->type(base->in(1))->isa_oopptr()) { 4069 base = base->in(1); // Case #9 4070 } else { 4071 // Case #3, #6, and #10 4072 Node* uncast_base = base->uncast(); 4073 int opcode = uncast_base->Opcode(); 4074 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 4075 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 4076 (_igvn->C->is_osr_compilation() && uncast_base->is_Parm() && uncast_base->as_Parm()->_con == TypeFunc::Parms)|| 4077 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 4078 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_klassptr() != nullptr)) || 4079 is_captured_store_address(addp) || 4080 is_load_array_klass_related(uncast_base), "sanity"); 4081 } 4082 } 4083 return base; 4084 } 4085 4086 #ifdef ASSERT 4087 // Case #10 4088 bool ConnectionGraph::is_load_array_klass_related(const Node* uncast_base) { 4089 if (!uncast_base->is_Phi() || uncast_base->req() != 3) { 4090 return false; 4091 } 4092 Node* in1 = uncast_base->in(1); 4093 Node* in2 = uncast_base->in(2); 4094 return in1->uncast()->Opcode() == Op_LoadKlass && 4095 in2->uncast()->Opcode() == Op_LoadKlass; 4096 } 4097 #endif 4098 4099 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 4100 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 4101 Node* addp2 = addp->raw_out(0); 4102 if (addp->outcnt() == 1 && addp2->is_AddP() && 4103 addp2->in(AddPNode::Base) == n && 4104 addp2->in(AddPNode::Address) == addp) { 4105 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 4106 // 4107 // Find array's offset to push it on worklist first and 4108 // as result process an array's element offset first (pushed second) 4109 // to avoid CastPP for the array's offset. 4110 // Otherwise the inserted CastPP (LocalVar) will point to what 4111 // the AddP (Field) points to. Which would be wrong since 4112 // the algorithm expects the CastPP has the same point as 4113 // as AddP's base CheckCastPP (LocalVar). 4114 // 4115 // ArrayAllocation 4116 // | 4117 // CheckCastPP 4118 // | 4119 // memProj (from ArrayAllocation CheckCastPP) 4120 // | || 4121 // | || Int (element index) 4122 // | || | ConI (log(element size)) 4123 // | || | / 4124 // | || LShift 4125 // | || / 4126 // | AddP (array's element offset) 4127 // | | 4128 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 4129 // | / / 4130 // AddP (array's offset) 4131 // | 4132 // Load/Store (memory operation on array's element) 4133 // 4134 return addp2; 4135 } 4136 return nullptr; 4137 } 4138 4139 // 4140 // Adjust the type and inputs of an AddP which computes the 4141 // address of a field of an instance 4142 // 4143 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 4144 PhaseGVN* igvn = _igvn; 4145 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 4146 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 4147 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 4148 if (t == nullptr) { 4149 // We are computing a raw address for a store captured by an Initialize 4150 // compute an appropriate address type (cases #3 and #5). 4151 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 4152 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 4153 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 4154 assert(offs != Type::OffsetBot, "offset must be a constant"); 4155 if (base_t->isa_aryptr() != nullptr) { 4156 // In the case of a flat inline type array, each field has its 4157 // own slice so we need to extract the field being accessed from 4158 // the address computation 4159 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 4160 } else { 4161 t = base_t->add_offset(offs)->is_oopptr(); 4162 } 4163 } 4164 int inst_id = base_t->instance_id(); 4165 assert(!t->is_known_instance() || t->instance_id() == inst_id, 4166 "old type must be non-instance or match new type"); 4167 4168 // The type 't' could be subclass of 'base_t'. 4169 // As result t->offset() could be large then base_t's size and it will 4170 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 4171 // constructor verifies correctness of the offset. 4172 // 4173 // It could happened on subclass's branch (from the type profiling 4174 // inlining) which was not eliminated during parsing since the exactness 4175 // of the allocation type was not propagated to the subclass type check. 4176 // 4177 // Or the type 't' could be not related to 'base_t' at all. 4178 // It could happen when CHA type is different from MDO type on a dead path 4179 // (for example, from instanceof check) which is not collapsed during parsing. 4180 // 4181 // Do nothing for such AddP node and don't process its users since 4182 // this code branch will go away. 4183 // 4184 if (!t->is_known_instance() && 4185 !base_t->maybe_java_subtype_of(t)) { 4186 return false; // bail out 4187 } 4188 const TypePtr* tinst = base_t->add_offset(t->offset()); 4189 if (tinst->isa_aryptr() && t->isa_aryptr()) { 4190 // In the case of a flat inline type array, each field has its 4191 // own slice so we need to keep track of the field being accessed. 4192 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 4193 // Keep array properties (not flat/null-free) 4194 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr()); 4195 if (tinst == nullptr) { 4196 return false; // Skip dead path with inconsistent properties 4197 } 4198 } 4199 4200 // Do NOT remove the next line: ensure a new alias index is allocated 4201 // for the instance type. Note: C++ will not remove it since the call 4202 // has side effect. 4203 int alias_idx = _compile->get_alias_index(tinst); 4204 igvn->set_type(addp, tinst); 4205 // record the allocation in the node map 4206 set_map(addp, get_map(base->_idx)); 4207 // Set addp's Base and Address to 'base'. 4208 Node *abase = addp->in(AddPNode::Base); 4209 Node *adr = addp->in(AddPNode::Address); 4210 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 4211 adr->in(0)->_idx == (uint)inst_id) { 4212 // Skip AddP cases #3 and #5. 4213 } else { 4214 assert(!abase->is_top(), "sanity"); // AddP case #3 4215 if (abase != base) { 4216 igvn->hash_delete(addp); 4217 addp->set_req(AddPNode::Base, base); 4218 if (abase == adr) { 4219 addp->set_req(AddPNode::Address, base); 4220 } else { 4221 // AddP case #4 (adr is array's element offset AddP node) 4222 #ifdef ASSERT 4223 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 4224 assert(adr->is_AddP() && atype != nullptr && 4225 atype->instance_id() == inst_id, "array's element offset should be processed first"); 4226 #endif 4227 } 4228 igvn->hash_insert(addp); 4229 } 4230 } 4231 // Put on IGVN worklist since at least addp's type was changed above. 4232 record_for_optimizer(addp); 4233 return true; 4234 } 4235 4236 // 4237 // Create a new version of orig_phi if necessary. Returns either the newly 4238 // created phi or an existing phi. Sets create_new to indicate whether a new 4239 // phi was created. Cache the last newly created phi in the node map. 4240 // 4241 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 4242 Compile *C = _compile; 4243 PhaseGVN* igvn = _igvn; 4244 new_created = false; 4245 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 4246 // nothing to do if orig_phi is bottom memory or matches alias_idx 4247 if (phi_alias_idx == alias_idx) { 4248 return orig_phi; 4249 } 4250 // Have we recently created a Phi for this alias index? 4251 PhiNode *result = get_map_phi(orig_phi->_idx); 4252 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 4253 return result; 4254 } 4255 // Previous check may fail when the same wide memory Phi was split into Phis 4256 // for different memory slices. Search all Phis for this region. 4257 if (result != nullptr) { 4258 Node* region = orig_phi->in(0); 4259 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 4260 Node* phi = region->fast_out(i); 4261 if (phi->is_Phi() && 4262 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 4263 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 4264 return phi->as_Phi(); 4265 } 4266 } 4267 } 4268 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 4269 if (C->do_escape_analysis() == true && !C->failing()) { 4270 // Retry compilation without escape analysis. 4271 // If this is the first failure, the sentinel string will "stick" 4272 // to the Compile object, and the C2Compiler will see it and retry. 4273 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4274 } 4275 return nullptr; 4276 } 4277 orig_phi_worklist.append_if_missing(orig_phi); 4278 const TypePtr *atype = C->get_adr_type(alias_idx); 4279 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 4280 C->copy_node_notes_to(result, orig_phi); 4281 igvn->set_type(result, result->bottom_type()); 4282 record_for_optimizer(result); 4283 set_map(orig_phi, result); 4284 new_created = true; 4285 return result; 4286 } 4287 4288 // 4289 // Return a new version of Memory Phi "orig_phi" with the inputs having the 4290 // specified alias index. 4291 // 4292 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) { 4293 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 4294 Compile *C = _compile; 4295 PhaseGVN* igvn = _igvn; 4296 bool new_phi_created; 4297 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 4298 if (!new_phi_created) { 4299 return result; 4300 } 4301 GrowableArray<PhiNode *> phi_list; 4302 GrowableArray<uint> cur_input; 4303 PhiNode *phi = orig_phi; 4304 uint idx = 1; 4305 bool finished = false; 4306 while(!finished) { 4307 while (idx < phi->req()) { 4308 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1); 4309 if (mem != nullptr && mem->is_Phi()) { 4310 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 4311 if (new_phi_created) { 4312 // found an phi for which we created a new split, push current one on worklist and begin 4313 // processing new one 4314 phi_list.push(phi); 4315 cur_input.push(idx); 4316 phi = mem->as_Phi(); 4317 result = newphi; 4318 idx = 1; 4319 continue; 4320 } else { 4321 mem = newphi; 4322 } 4323 } 4324 if (C->failing()) { 4325 return nullptr; 4326 } 4327 result->set_req(idx++, mem); 4328 } 4329 #ifdef ASSERT 4330 // verify that the new Phi has an input for each input of the original 4331 assert( phi->req() == result->req(), "must have same number of inputs."); 4332 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 4333 #endif 4334 // Check if all new phi's inputs have specified alias index. 4335 // Otherwise use old phi. 4336 for (uint i = 1; i < phi->req(); i++) { 4337 Node* in = result->in(i); 4338 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 4339 } 4340 // we have finished processing a Phi, see if there are any more to do 4341 finished = (phi_list.length() == 0 ); 4342 if (!finished) { 4343 phi = phi_list.pop(); 4344 idx = cur_input.pop(); 4345 PhiNode *prev_result = get_map_phi(phi->_idx); 4346 prev_result->set_req(idx++, result); 4347 result = prev_result; 4348 } 4349 } 4350 return result; 4351 } 4352 4353 // 4354 // The next methods are derived from methods in MemNode. 4355 // 4356 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 4357 Node *mem = mmem; 4358 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 4359 // means an array I have not precisely typed yet. Do not do any 4360 // alias stuff with it any time soon. 4361 if (toop->base() != Type::AnyPtr && 4362 !(toop->isa_instptr() && 4363 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 4364 toop->offset() == Type::OffsetBot)) { 4365 mem = mmem->memory_at(alias_idx); 4366 // Update input if it is progress over what we have now 4367 } 4368 return mem; 4369 } 4370 4371 // 4372 // Move memory users to their memory slices. 4373 // 4374 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 4375 Compile* C = _compile; 4376 PhaseGVN* igvn = _igvn; 4377 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 4378 assert(tp != nullptr, "ptr type"); 4379 int alias_idx = C->get_alias_index(tp); 4380 int general_idx = C->get_general_index(alias_idx); 4381 4382 // Move users first 4383 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4384 Node* use = n->fast_out(i); 4385 if (use->is_MergeMem()) { 4386 MergeMemNode* mmem = use->as_MergeMem(); 4387 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 4388 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 4389 continue; // Nothing to do 4390 } 4391 // Replace previous general reference to mem node. 4392 uint orig_uniq = C->unique(); 4393 Node* m = find_inst_mem(n, general_idx, orig_phis); 4394 assert(orig_uniq == C->unique(), "no new nodes"); 4395 mmem->set_memory_at(general_idx, m); 4396 --imax; 4397 --i; 4398 } else if (use->is_MemBar()) { 4399 assert(!use->is_Initialize(), "initializing stores should not be moved"); 4400 if (use->req() > MemBarNode::Precedent && 4401 use->in(MemBarNode::Precedent) == n) { 4402 // Don't move related membars. 4403 record_for_optimizer(use); 4404 continue; 4405 } 4406 tp = use->as_MemBar()->adr_type()->isa_ptr(); 4407 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 4408 alias_idx == general_idx) { 4409 continue; // Nothing to do 4410 } 4411 // Move to general memory slice. 4412 uint orig_uniq = C->unique(); 4413 Node* m = find_inst_mem(n, general_idx, orig_phis); 4414 assert(orig_uniq == C->unique(), "no new nodes"); 4415 igvn->hash_delete(use); 4416 imax -= use->replace_edge(n, m, igvn); 4417 igvn->hash_insert(use); 4418 record_for_optimizer(use); 4419 --i; 4420 #ifdef ASSERT 4421 } else if (use->is_Mem()) { 4422 // Memory nodes should have new memory input. 4423 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 4424 assert(tp != nullptr, "ptr type"); 4425 int idx = C->get_alias_index(tp); 4426 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 4427 "Following memory nodes should have new memory input or be on the same memory slice"); 4428 } else if (use->is_Phi()) { 4429 // Phi nodes should be split and moved already. 4430 tp = use->as_Phi()->adr_type()->isa_ptr(); 4431 assert(tp != nullptr, "ptr type"); 4432 int idx = C->get_alias_index(tp); 4433 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 4434 } else { 4435 use->dump(); 4436 assert(false, "should not be here"); 4437 #endif 4438 } 4439 } 4440 } 4441 4442 // 4443 // Search memory chain of "mem" to find a MemNode whose address 4444 // is the specified alias index. 4445 // 4446 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000 4447 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, uint rec_depth) { 4448 if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) { 4449 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4450 return nullptr; 4451 } 4452 if (orig_mem == nullptr) { 4453 return orig_mem; 4454 } 4455 Compile* C = _compile; 4456 PhaseGVN* igvn = _igvn; 4457 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 4458 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 4459 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 4460 Node *prev = nullptr; 4461 Node *result = orig_mem; 4462 while (prev != result) { 4463 prev = result; 4464 if (result == start_mem) { 4465 break; // hit one of our sentinels 4466 } 4467 if (result->is_Mem()) { 4468 const Type *at = igvn->type(result->in(MemNode::Address)); 4469 if (at == Type::TOP) { 4470 break; // Dead 4471 } 4472 assert (at->isa_ptr() != nullptr, "pointer type required."); 4473 int idx = C->get_alias_index(at->is_ptr()); 4474 if (idx == alias_idx) { 4475 break; // Found 4476 } 4477 if (!is_instance && (at->isa_oopptr() == nullptr || 4478 !at->is_oopptr()->is_known_instance())) { 4479 break; // Do not skip store to general memory slice. 4480 } 4481 result = result->in(MemNode::Memory); 4482 } 4483 if (!is_instance) { 4484 continue; // don't search further for non-instance types 4485 } 4486 // skip over a call which does not affect this memory slice 4487 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 4488 Node *proj_in = result->in(0); 4489 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 4490 break; // hit one of our sentinels 4491 } else if (proj_in->is_Call()) { 4492 // ArrayCopy node processed here as well 4493 CallNode *call = proj_in->as_Call(); 4494 if (!call->may_modify(toop, igvn)) { 4495 result = call->in(TypeFunc::Memory); 4496 } 4497 } else if (proj_in->is_Initialize()) { 4498 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 4499 // Stop if this is the initialization for the object instance which 4500 // which contains this memory slice, otherwise skip over it. 4501 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 4502 result = proj_in->in(TypeFunc::Memory); 4503 } else if (C->get_alias_index(result->adr_type()) != alias_idx) { 4504 assert(C->get_general_index(alias_idx) == C->get_alias_index(result->adr_type()), "should be projection for the same field/array element"); 4505 result = get_map(result->_idx); 4506 assert(result != nullptr, "new projection should have been allocated"); 4507 break; 4508 } 4509 } else if (proj_in->is_MemBar()) { 4510 // Check if there is an array copy for a clone 4511 // Step over GC barrier when ReduceInitialCardMarks is disabled 4512 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4513 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 4514 4515 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 4516 // Stop if it is a clone 4517 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 4518 if (ac->may_modify(toop, igvn)) { 4519 break; 4520 } 4521 } 4522 result = proj_in->in(TypeFunc::Memory); 4523 } 4524 } else if (result->is_MergeMem()) { 4525 MergeMemNode *mmem = result->as_MergeMem(); 4526 result = step_through_mergemem(mmem, alias_idx, toop); 4527 if (result == mmem->base_memory()) { 4528 // Didn't find instance memory, search through general slice recursively. 4529 result = mmem->memory_at(C->get_general_index(alias_idx)); 4530 result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1); 4531 if (C->failing()) { 4532 return nullptr; 4533 } 4534 mmem->set_memory_at(alias_idx, result); 4535 } 4536 } else if (result->is_Phi() && 4537 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 4538 Node *un = result->as_Phi()->unique_input(igvn); 4539 if (un != nullptr) { 4540 orig_phis.append_if_missing(result->as_Phi()); 4541 result = un; 4542 } else { 4543 break; 4544 } 4545 } else if (result->is_ClearArray()) { 4546 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 4547 // Can not bypass initialization of the instance 4548 // we are looking for. 4549 break; 4550 } 4551 // Otherwise skip it (the call updated 'result' value). 4552 } else if (result->Opcode() == Op_SCMemProj) { 4553 Node* mem = result->in(0); 4554 Node* adr = nullptr; 4555 if (mem->is_LoadStore()) { 4556 adr = mem->in(MemNode::Address); 4557 } else { 4558 assert(mem->Opcode() == Op_EncodeISOArray || 4559 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 4560 adr = mem->in(3); // Memory edge corresponds to destination array 4561 } 4562 const Type *at = igvn->type(adr); 4563 if (at != Type::TOP) { 4564 assert(at->isa_ptr() != nullptr, "pointer type required."); 4565 int idx = C->get_alias_index(at->is_ptr()); 4566 if (idx == alias_idx) { 4567 // Assert in debug mode 4568 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 4569 break; // In product mode return SCMemProj node 4570 } 4571 } 4572 result = mem->in(MemNode::Memory); 4573 } else if (result->Opcode() == Op_StrInflatedCopy) { 4574 Node* adr = result->in(3); // Memory edge corresponds to destination array 4575 const Type *at = igvn->type(adr); 4576 if (at != Type::TOP) { 4577 assert(at->isa_ptr() != nullptr, "pointer type required."); 4578 int idx = C->get_alias_index(at->is_ptr()); 4579 if (idx == alias_idx) { 4580 // Assert in debug mode 4581 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 4582 break; // In product mode return SCMemProj node 4583 } 4584 } 4585 result = result->in(MemNode::Memory); 4586 } 4587 } 4588 if (result->is_Phi()) { 4589 PhiNode *mphi = result->as_Phi(); 4590 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 4591 const TypePtr *t = mphi->adr_type(); 4592 if (!is_instance) { 4593 // Push all non-instance Phis on the orig_phis worklist to update inputs 4594 // during Phase 4 if needed. 4595 orig_phis.append_if_missing(mphi); 4596 } else if (C->get_alias_index(t) != alias_idx) { 4597 // Create a new Phi with the specified alias index type. 4598 result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1); 4599 } 4600 } 4601 // the result is either MemNode, PhiNode, InitializeNode. 4602 return result; 4603 } 4604 4605 // 4606 // Convert the types of non-escaped object to instance types where possible, 4607 // propagate the new type information through the graph, and update memory 4608 // edges and MergeMem inputs to reflect the new type. 4609 // 4610 // We start with allocations (and calls which may be allocations) on alloc_worklist. 4611 // The processing is done in 4 phases: 4612 // 4613 // Phase 1: Process possible allocations from alloc_worklist. Create instance 4614 // types for the CheckCastPP for allocations where possible. 4615 // Propagate the new types through users as follows: 4616 // casts and Phi: push users on alloc_worklist 4617 // AddP: cast Base and Address inputs to the instance type 4618 // push any AddP users on alloc_worklist and push any memnode 4619 // users onto memnode_worklist. 4620 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4621 // search the Memory chain for a store with the appropriate type 4622 // address type. If a Phi is found, create a new version with 4623 // the appropriate memory slices from each of the Phi inputs. 4624 // For stores, process the users as follows: 4625 // MemNode: push on memnode_worklist 4626 // MergeMem: push on mergemem_worklist 4627 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 4628 // moving the first node encountered of each instance type to the 4629 // the input corresponding to its alias index. 4630 // appropriate memory slice. 4631 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 4632 // 4633 // In the following example, the CheckCastPP nodes are the cast of allocation 4634 // results and the allocation of node 29 is non-escaped and eligible to be an 4635 // instance type. 4636 // 4637 // We start with: 4638 // 4639 // 7 Parm #memory 4640 // 10 ConI "12" 4641 // 19 CheckCastPP "Foo" 4642 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4643 // 29 CheckCastPP "Foo" 4644 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 4645 // 4646 // 40 StoreP 25 7 20 ... alias_index=4 4647 // 50 StoreP 35 40 30 ... alias_index=4 4648 // 60 StoreP 45 50 20 ... alias_index=4 4649 // 70 LoadP _ 60 30 ... alias_index=4 4650 // 80 Phi 75 50 60 Memory alias_index=4 4651 // 90 LoadP _ 80 30 ... alias_index=4 4652 // 100 LoadP _ 80 20 ... alias_index=4 4653 // 4654 // 4655 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 4656 // and creating a new alias index for node 30. This gives: 4657 // 4658 // 7 Parm #memory 4659 // 10 ConI "12" 4660 // 19 CheckCastPP "Foo" 4661 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4662 // 29 CheckCastPP "Foo" iid=24 4663 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4664 // 4665 // 40 StoreP 25 7 20 ... alias_index=4 4666 // 50 StoreP 35 40 30 ... alias_index=6 4667 // 60 StoreP 45 50 20 ... alias_index=4 4668 // 70 LoadP _ 60 30 ... alias_index=6 4669 // 80 Phi 75 50 60 Memory alias_index=4 4670 // 90 LoadP _ 80 30 ... alias_index=6 4671 // 100 LoadP _ 80 20 ... alias_index=4 4672 // 4673 // In phase 2, new memory inputs are computed for the loads and stores, 4674 // And a new version of the phi is created. In phase 4, the inputs to 4675 // node 80 are updated and then the memory nodes are updated with the 4676 // values computed in phase 2. This results in: 4677 // 4678 // 7 Parm #memory 4679 // 10 ConI "12" 4680 // 19 CheckCastPP "Foo" 4681 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4682 // 29 CheckCastPP "Foo" iid=24 4683 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4684 // 4685 // 40 StoreP 25 7 20 ... alias_index=4 4686 // 50 StoreP 35 7 30 ... alias_index=6 4687 // 60 StoreP 45 40 20 ... alias_index=4 4688 // 70 LoadP _ 50 30 ... alias_index=6 4689 // 80 Phi 75 40 60 Memory alias_index=4 4690 // 120 Phi 75 50 50 Memory alias_index=6 4691 // 90 LoadP _ 120 30 ... alias_index=6 4692 // 100 LoadP _ 80 20 ... alias_index=4 4693 // 4694 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 4695 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 4696 GrowableArray<MergeMemNode*> &mergemem_worklist, 4697 Unique_Node_List &reducible_merges) { 4698 DEBUG_ONLY(Unique_Node_List reduced_merges;) 4699 GrowableArray<Node *> memnode_worklist; 4700 GrowableArray<PhiNode *> orig_phis; 4701 PhaseIterGVN *igvn = _igvn; 4702 uint new_index_start = (uint) _compile->num_alias_types(); 4703 VectorSet visited; 4704 ideal_nodes.clear(); // Reset for use with set_map/get_map. 4705 4706 // Phase 1: Process possible allocations from alloc_worklist. 4707 // Create instance types for the CheckCastPP for allocations where possible. 4708 // 4709 // (Note: don't forget to change the order of the second AddP node on 4710 // the alloc_worklist if the order of the worklist processing is changed, 4711 // see the comment in find_second_addp().) 4712 // 4713 while (alloc_worklist.length() != 0) { 4714 Node *n = alloc_worklist.pop(); 4715 uint ni = n->_idx; 4716 if (n->is_Call()) { 4717 CallNode *alloc = n->as_Call(); 4718 // copy escape information to call node 4719 PointsToNode* ptn = ptnode_adr(alloc->_idx); 4720 PointsToNode::EscapeState es = ptn->escape_state(); 4721 // We have an allocation or call which returns a Java object, 4722 // see if it is non-escaped. 4723 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 4724 continue; 4725 } 4726 // Find CheckCastPP for the allocate or for the return value of a call 4727 n = alloc->result_cast(); 4728 if (n == nullptr) { // No uses except Initialize node 4729 if (alloc->is_Allocate()) { 4730 // Set the scalar_replaceable flag for allocation 4731 // so it could be eliminated if it has no uses. 4732 alloc->as_Allocate()->_is_scalar_replaceable = true; 4733 } 4734 continue; 4735 } 4736 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 4737 // we could reach here for allocate case if one init is associated with many allocs. 4738 if (alloc->is_Allocate()) { 4739 alloc->as_Allocate()->_is_scalar_replaceable = false; 4740 } 4741 continue; 4742 } 4743 4744 // The inline code for Object.clone() casts the allocation result to 4745 // java.lang.Object and then to the actual type of the allocated 4746 // object. Detect this case and use the second cast. 4747 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 4748 // the allocation result is cast to java.lang.Object and then 4749 // to the actual Array type. 4750 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 4751 && (alloc->is_AllocateArray() || 4752 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 4753 Node *cast2 = nullptr; 4754 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4755 Node *use = n->fast_out(i); 4756 if (use->is_CheckCastPP()) { 4757 cast2 = use; 4758 break; 4759 } 4760 } 4761 if (cast2 != nullptr) { 4762 n = cast2; 4763 } else { 4764 // Non-scalar replaceable if the allocation type is unknown statically 4765 // (reflection allocation), the object can't be restored during 4766 // deoptimization without precise type. 4767 continue; 4768 } 4769 } 4770 4771 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 4772 if (t == nullptr) { 4773 continue; // not a TypeOopPtr 4774 } 4775 if (!t->klass_is_exact()) { 4776 continue; // not an unique type 4777 } 4778 if (alloc->is_Allocate()) { 4779 // Set the scalar_replaceable flag for allocation 4780 // so it could be eliminated. 4781 alloc->as_Allocate()->_is_scalar_replaceable = true; 4782 } 4783 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 4784 // in order for an object to be scalar-replaceable, it must be: 4785 // - a direct allocation (not a call returning an object) 4786 // - non-escaping 4787 // - eligible to be a unique type 4788 // - not determined to be ineligible by escape analysis 4789 set_map(alloc, n); 4790 set_map(n, alloc); 4791 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 4792 igvn->hash_delete(n); 4793 igvn->set_type(n, tinst); 4794 n->raise_bottom_type(tinst); 4795 igvn->hash_insert(n); 4796 record_for_optimizer(n); 4797 // Allocate an alias index for the header fields. Accesses to 4798 // the header emitted during macro expansion wouldn't have 4799 // correct memory state otherwise. 4800 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 4801 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 4802 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 4803 // Add a new NarrowMem projection for each existing NarrowMem projection with new adr type 4804 InitializeNode* init = alloc->as_Allocate()->initialization(); 4805 assert(init != nullptr, "can't find Initialization node for this Allocate node"); 4806 auto process_narrow_proj = [&](NarrowMemProjNode* proj) { 4807 const TypePtr* adr_type = proj->adr_type(); 4808 const TypePtr* new_adr_type = tinst->with_offset(adr_type->offset()); 4809 if (adr_type->isa_aryptr()) { 4810 // In the case of a flat inline type array, each field has its own slice so we need a 4811 // NarrowMemProj for each field of the flat array elements 4812 new_adr_type = new_adr_type->is_aryptr()->with_field_offset(adr_type->is_aryptr()->field_offset().get()); 4813 } 4814 if (adr_type != new_adr_type && !init->already_has_narrow_mem_proj_with_adr_type(new_adr_type)) { 4815 DEBUG_ONLY( uint alias_idx = _compile->get_alias_index(new_adr_type); ) 4816 assert(_compile->get_general_index(alias_idx) == _compile->get_alias_index(adr_type), "new adr type should be narrowed down from existing adr type"); 4817 NarrowMemProjNode* new_proj = new NarrowMemProjNode(init, new_adr_type); 4818 igvn->set_type(new_proj, new_proj->bottom_type()); 4819 record_for_optimizer(new_proj); 4820 set_map(proj, new_proj); // record it so ConnectionGraph::find_inst_mem() can find it 4821 } 4822 }; 4823 init->for_each_narrow_mem_proj_with_new_uses(process_narrow_proj); 4824 4825 // First, put on the worklist all Field edges from Connection Graph 4826 // which is more accurate than putting immediate users from Ideal Graph. 4827 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 4828 PointsToNode* tgt = e.get(); 4829 if (tgt->is_Arraycopy()) { 4830 continue; 4831 } 4832 Node* use = tgt->ideal_node(); 4833 assert(tgt->is_Field() && use->is_AddP(), 4834 "only AddP nodes are Field edges in CG"); 4835 if (use->outcnt() > 0) { // Don't process dead nodes 4836 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 4837 if (addp2 != nullptr) { 4838 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4839 alloc_worklist.append_if_missing(addp2); 4840 } 4841 alloc_worklist.append_if_missing(use); 4842 } 4843 } 4844 4845 // An allocation may have an Initialize which has raw stores. Scan 4846 // the users of the raw allocation result and push AddP users 4847 // on alloc_worklist. 4848 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 4849 assert (raw_result != nullptr, "must have an allocation result"); 4850 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 4851 Node *use = raw_result->fast_out(i); 4852 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 4853 Node* addp2 = find_second_addp(use, raw_result); 4854 if (addp2 != nullptr) { 4855 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4856 alloc_worklist.append_if_missing(addp2); 4857 } 4858 alloc_worklist.append_if_missing(use); 4859 } else if (use->is_MemBar()) { 4860 memnode_worklist.append_if_missing(use); 4861 } 4862 } 4863 } 4864 } else if (n->is_AddP()) { 4865 if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) { 4866 // This AddP will go away when we reduce the Phi 4867 continue; 4868 } 4869 Node* addp_base = get_addp_base(n); 4870 JavaObjectNode* jobj = unique_java_object(addp_base); 4871 if (jobj == nullptr || jobj == phantom_obj) { 4872 #ifdef ASSERT 4873 ptnode_adr(get_addp_base(n)->_idx)->dump(); 4874 ptnode_adr(n->_idx)->dump(); 4875 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4876 #endif 4877 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4878 return; 4879 } 4880 Node *base = get_map(jobj->idx()); // CheckCastPP node 4881 if (!split_AddP(n, base)) continue; // wrong type from dead path 4882 } else if (n->is_Phi() || 4883 n->is_CheckCastPP() || 4884 n->is_EncodeP() || 4885 n->is_DecodeN() || 4886 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 4887 if (visited.test_set(n->_idx)) { 4888 assert(n->is_Phi(), "loops only through Phi's"); 4889 continue; // already processed 4890 } 4891 // Reducible Phi's will be removed from the graph after split_unique_types 4892 // finishes. For now we just try to split out the SR inputs of the merge. 4893 Node* parent = n->in(1); 4894 if (reducible_merges.member(n)) { 4895 reduce_phi(n->as_Phi(), alloc_worklist); 4896 #ifdef ASSERT 4897 if (VerifyReduceAllocationMerges) { 4898 reduced_merges.push(n); 4899 } 4900 #endif 4901 continue; 4902 } else if (reducible_merges.member(parent)) { 4903 // 'n' is an user of a reducible merge (a Phi). It will be simplified as 4904 // part of reduce_merge. 4905 continue; 4906 } 4907 JavaObjectNode* jobj = unique_java_object(n); 4908 if (jobj == nullptr || jobj == phantom_obj) { 4909 #ifdef ASSERT 4910 ptnode_adr(n->_idx)->dump(); 4911 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4912 #endif 4913 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4914 return; 4915 } else { 4916 Node *val = get_map(jobj->idx()); // CheckCastPP node 4917 TypeNode *tn = n->as_Type(); 4918 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 4919 assert(tinst != nullptr && tinst->is_known_instance() && 4920 tinst->instance_id() == jobj->idx() , "instance type expected."); 4921 4922 const Type *tn_type = igvn->type(tn); 4923 const TypeOopPtr *tn_t; 4924 if (tn_type->isa_narrowoop()) { 4925 tn_t = tn_type->make_ptr()->isa_oopptr(); 4926 } else { 4927 tn_t = tn_type->isa_oopptr(); 4928 } 4929 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 4930 if (tn_t->isa_aryptr()) { 4931 // Keep array properties (not flat/null-free) 4932 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr()); 4933 if (tinst == nullptr) { 4934 continue; // Skip dead path with inconsistent properties 4935 } 4936 } 4937 if (tn_type->isa_narrowoop()) { 4938 tn_type = tinst->make_narrowoop(); 4939 } else { 4940 tn_type = tinst; 4941 } 4942 igvn->hash_delete(tn); 4943 igvn->set_type(tn, tn_type); 4944 tn->set_type(tn_type); 4945 igvn->hash_insert(tn); 4946 record_for_optimizer(n); 4947 } else { 4948 assert(tn_type == TypePtr::NULL_PTR || 4949 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)), 4950 "unexpected type"); 4951 continue; // Skip dead path with different type 4952 } 4953 } 4954 } else { 4955 DEBUG_ONLY(n->dump();) 4956 assert(false, "EA: unexpected node"); 4957 continue; 4958 } 4959 // push allocation's users on appropriate worklist 4960 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4961 Node *use = n->fast_out(i); 4962 if (use->is_Mem() && use->in(MemNode::Address) == n) { 4963 // Load/store to instance's field 4964 memnode_worklist.append_if_missing(use); 4965 } else if (use->is_MemBar()) { 4966 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4967 memnode_worklist.append_if_missing(use); 4968 } 4969 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 4970 Node* addp2 = find_second_addp(use, n); 4971 if (addp2 != nullptr) { 4972 alloc_worklist.append_if_missing(addp2); 4973 } 4974 alloc_worklist.append_if_missing(use); 4975 } else if (use->is_Phi() || 4976 use->is_CheckCastPP() || 4977 use->is_EncodeNarrowPtr() || 4978 use->is_DecodeNarrowPtr() || 4979 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 4980 alloc_worklist.append_if_missing(use); 4981 #ifdef ASSERT 4982 } else if (use->is_Mem()) { 4983 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 4984 } else if (use->is_MergeMem()) { 4985 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4986 } else if (use->is_SafePoint()) { 4987 // Look for MergeMem nodes for calls which reference unique allocation 4988 // (through CheckCastPP nodes) even for debug info. 4989 Node* m = use->in(TypeFunc::Memory); 4990 if (m->is_MergeMem()) { 4991 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4992 } 4993 } else if (use->Opcode() == Op_EncodeISOArray) { 4994 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4995 // EncodeISOArray overwrites destination array 4996 memnode_worklist.append_if_missing(use); 4997 } 4998 } else if (use->Opcode() == Op_Return) { 4999 // Allocation is referenced by field of returned inline type 5000 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode"); 5001 } else { 5002 uint op = use->Opcode(); 5003 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 5004 (use->in(MemNode::Memory) == n)) { 5005 // They overwrite memory edge corresponding to destination array, 5006 memnode_worklist.append_if_missing(use); 5007 } else if (!(op == Op_CmpP || op == Op_Conv2B || 5008 op == Op_CastP2X || 5009 op == Op_FastLock || op == Op_AryEq || 5010 op == Op_StrComp || op == Op_CountPositives || 5011 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 5012 op == Op_StrEquals || op == Op_VectorizedHashCode || 5013 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 5014 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck || 5015 op == Op_ReinterpretS2HF || 5016 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 5017 n->dump(); 5018 use->dump(); 5019 assert(false, "EA: missing allocation reference path"); 5020 } 5021 #endif 5022 } 5023 } 5024 5025 } 5026 5027 #ifdef ASSERT 5028 if (VerifyReduceAllocationMerges) { 5029 for (uint i = 0; i < reducible_merges.size(); i++) { 5030 Node* phi = reducible_merges.at(i); 5031 5032 if (!reduced_merges.member(phi)) { 5033 phi->dump(2); 5034 phi->dump(-2); 5035 assert(false, "This reducible merge wasn't reduced."); 5036 } 5037 5038 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts. 5039 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 5040 Node* use = phi->fast_out(j); 5041 if (!use->is_SafePoint() && !use->is_CastPP()) { 5042 phi->dump(2); 5043 phi->dump(-2); 5044 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 5045 } 5046 } 5047 } 5048 } 5049 #endif 5050 5051 // Go over all ArrayCopy nodes and if one of the inputs has a unique 5052 // type, record it in the ArrayCopy node so we know what memory this 5053 // node uses/modified. 5054 for (int next = 0; next < arraycopy_worklist.length(); next++) { 5055 ArrayCopyNode* ac = arraycopy_worklist.at(next); 5056 Node* dest = ac->in(ArrayCopyNode::Dest); 5057 if (dest->is_AddP()) { 5058 dest = get_addp_base(dest); 5059 } 5060 JavaObjectNode* jobj = unique_java_object(dest); 5061 if (jobj != nullptr) { 5062 Node *base = get_map(jobj->idx()); 5063 if (base != nullptr) { 5064 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 5065 ac->_dest_type = base_t; 5066 } 5067 } 5068 Node* src = ac->in(ArrayCopyNode::Src); 5069 if (src->is_AddP()) { 5070 src = get_addp_base(src); 5071 } 5072 jobj = unique_java_object(src); 5073 if (jobj != nullptr) { 5074 Node* base = get_map(jobj->idx()); 5075 if (base != nullptr) { 5076 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 5077 ac->_src_type = base_t; 5078 } 5079 } 5080 } 5081 5082 // New alias types were created in split_AddP(). 5083 uint new_index_end = (uint) _compile->num_alias_types(); 5084 5085 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_1, 5); 5086 5087 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 5088 // compute new values for Memory inputs (the Memory inputs are not 5089 // actually updated until phase 4.) 5090 if (memnode_worklist.length() == 0) 5091 return; // nothing to do 5092 while (memnode_worklist.length() != 0) { 5093 Node *n = memnode_worklist.pop(); 5094 if (visited.test_set(n->_idx)) { 5095 continue; 5096 } 5097 if (n->is_Phi() || n->is_ClearArray()) { 5098 // we don't need to do anything, but the users must be pushed 5099 } else if (n->is_MemBar()) { // MemBar nodes 5100 if (!n->is_Initialize()) { // memory projections for Initialize pushed below (so we get to all their uses) 5101 // we don't need to do anything, but the users must be pushed 5102 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 5103 if (n == nullptr) { 5104 continue; 5105 } 5106 } 5107 } else if (n->is_CallLeaf()) { 5108 // Runtime calls with narrow memory input (no MergeMem node) 5109 // get the memory projection 5110 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory); 5111 if (n == nullptr) { 5112 continue; 5113 } 5114 } else if (n->Opcode() == Op_StrInflatedCopy) { 5115 // Check direct uses of StrInflatedCopy. 5116 // It is memory type Node - no special SCMemProj node. 5117 } else if (n->Opcode() == Op_StrCompressedCopy || 5118 n->Opcode() == Op_EncodeISOArray) { 5119 // get the memory projection 5120 n = n->find_out_with(Op_SCMemProj); 5121 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 5122 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr && 5123 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 5124 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); 5125 } else if (n->is_Proj()) { 5126 assert(n->in(0)->is_Initialize(), "we only push memory projections for Initialize"); 5127 } else { 5128 #ifdef ASSERT 5129 if (!n->is_Mem()) { 5130 n->dump(); 5131 } 5132 assert(n->is_Mem(), "memory node required."); 5133 #endif 5134 Node *addr = n->in(MemNode::Address); 5135 const Type *addr_t = igvn->type(addr); 5136 if (addr_t == Type::TOP) { 5137 continue; 5138 } 5139 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 5140 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 5141 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 5142 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 5143 if (_compile->failing()) { 5144 return; 5145 } 5146 if (mem != n->in(MemNode::Memory)) { 5147 // We delay the memory edge update since we need old one in 5148 // MergeMem code below when instances memory slices are separated. 5149 set_map(n, mem); 5150 } 5151 if (n->is_Load()) { 5152 continue; // don't push users 5153 } else if (n->is_LoadStore()) { 5154 // get the memory projection 5155 n = n->find_out_with(Op_SCMemProj); 5156 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 5157 } 5158 } 5159 // push user on appropriate worklist 5160 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 5161 Node *use = n->fast_out(i); 5162 if (use->is_Phi() || use->is_ClearArray()) { 5163 memnode_worklist.append_if_missing(use); 5164 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 5165 memnode_worklist.append_if_missing(use); 5166 } else if (use->is_MemBar() || use->is_CallLeaf()) { 5167 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 5168 memnode_worklist.append_if_missing(use); 5169 } 5170 } else if (use->is_Proj()) { 5171 assert(n->is_Initialize(), "We only push projections of Initialize"); 5172 if (use->as_Proj()->_con == TypeFunc::Memory) { // Ignore precedent edge 5173 memnode_worklist.append_if_missing(use); 5174 } 5175 #ifdef ASSERT 5176 } else if (use->is_Mem()) { 5177 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 5178 } else if (use->is_MergeMem()) { 5179 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 5180 } else if (use->Opcode() == Op_EncodeISOArray) { 5181 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 5182 // EncodeISOArray overwrites destination array 5183 memnode_worklist.append_if_missing(use); 5184 } 5185 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr && 5186 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 5187 // store_unknown_inline overwrites destination array 5188 memnode_worklist.append_if_missing(use); 5189 } else { 5190 uint op = use->Opcode(); 5191 if ((use->in(MemNode::Memory) == n) && 5192 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 5193 // They overwrite memory edge corresponding to destination array, 5194 memnode_worklist.append_if_missing(use); 5195 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 5196 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 5197 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 5198 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) { 5199 n->dump(); 5200 use->dump(); 5201 assert(false, "EA: missing memory path"); 5202 } 5203 #endif 5204 } 5205 } 5206 } 5207 5208 // Phase 3: Process MergeMem nodes from mergemem_worklist. 5209 // Walk each memory slice moving the first node encountered of each 5210 // instance type to the input corresponding to its alias index. 5211 uint length = mergemem_worklist.length(); 5212 for( uint next = 0; next < length; ++next ) { 5213 MergeMemNode* nmm = mergemem_worklist.at(next); 5214 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 5215 // Note: we don't want to use MergeMemStream here because we only want to 5216 // scan inputs which exist at the start, not ones we add during processing. 5217 // Note 2: MergeMem may already contains instance memory slices added 5218 // during find_inst_mem() call when memory nodes were processed above. 5219 igvn->hash_delete(nmm); 5220 uint nslices = MIN2(nmm->req(), new_index_start); 5221 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 5222 Node* mem = nmm->in(i); 5223 Node* cur = nullptr; 5224 if (mem == nullptr || mem->is_top()) { 5225 continue; 5226 } 5227 // First, update mergemem by moving memory nodes to corresponding slices 5228 // if their type became more precise since this mergemem was created. 5229 while (mem->is_Mem()) { 5230 const Type* at = igvn->type(mem->in(MemNode::Address)); 5231 if (at != Type::TOP) { 5232 assert (at->isa_ptr() != nullptr, "pointer type required."); 5233 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 5234 if (idx == i) { 5235 if (cur == nullptr) { 5236 cur = mem; 5237 } 5238 } else { 5239 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 5240 nmm->set_memory_at(idx, mem); 5241 } 5242 } 5243 } 5244 mem = mem->in(MemNode::Memory); 5245 } 5246 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 5247 // Find any instance of the current type if we haven't encountered 5248 // already a memory slice of the instance along the memory chain. 5249 for (uint ni = new_index_start; ni < new_index_end; ni++) { 5250 if((uint)_compile->get_general_index(ni) == i) { 5251 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 5252 if (nmm->is_empty_memory(m)) { 5253 Node* result = find_inst_mem(mem, ni, orig_phis); 5254 if (_compile->failing()) { 5255 return; 5256 } 5257 nmm->set_memory_at(ni, result); 5258 } 5259 } 5260 } 5261 } 5262 // Find the rest of instances values 5263 for (uint ni = new_index_start; ni < new_index_end; ni++) { 5264 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 5265 Node* result = step_through_mergemem(nmm, ni, tinst); 5266 if (result == nmm->base_memory()) { 5267 // Didn't find instance memory, search through general slice recursively. 5268 result = nmm->memory_at(_compile->get_general_index(ni)); 5269 result = find_inst_mem(result, ni, orig_phis); 5270 if (_compile->failing()) { 5271 return; 5272 } 5273 nmm->set_memory_at(ni, result); 5274 } 5275 } 5276 5277 // If we have crossed the 3/4 point of max node limit it's too risky 5278 // to continue with EA/SR because we might hit the max node limit. 5279 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) { 5280 if (_compile->do_reduce_allocation_merges()) { 5281 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 5282 } else if (_invocation > 0) { 5283 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis()); 5284 } else { 5285 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 5286 } 5287 return; 5288 } 5289 5290 igvn->hash_insert(nmm); 5291 record_for_optimizer(nmm); 5292 } 5293 5294 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5); 5295 5296 // Phase 4: Update the inputs of non-instance memory Phis and 5297 // the Memory input of memnodes 5298 // First update the inputs of any non-instance Phi's from 5299 // which we split out an instance Phi. Note we don't have 5300 // to recursively process Phi's encountered on the input memory 5301 // chains as is done in split_memory_phi() since they will 5302 // also be processed here. 5303 for (int j = 0; j < orig_phis.length(); j++) { 5304 PhiNode *phi = orig_phis.at(j); 5305 int alias_idx = _compile->get_alias_index(phi->adr_type()); 5306 igvn->hash_delete(phi); 5307 for (uint i = 1; i < phi->req(); i++) { 5308 Node *mem = phi->in(i); 5309 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 5310 if (_compile->failing()) { 5311 return; 5312 } 5313 if (mem != new_mem) { 5314 phi->set_req(i, new_mem); 5315 } 5316 } 5317 igvn->hash_insert(phi); 5318 record_for_optimizer(phi); 5319 } 5320 5321 // Update the memory inputs of MemNodes with the value we computed 5322 // in Phase 2 and move stores memory users to corresponding memory slices. 5323 // Disable memory split verification code until the fix for 6984348. 5324 // Currently it produces false negative results since it does not cover all cases. 5325 #if 0 // ifdef ASSERT 5326 visited.Reset(); 5327 Node_Stack old_mems(arena, _compile->unique() >> 2); 5328 #endif 5329 for (uint i = 0; i < ideal_nodes.size(); i++) { 5330 Node* n = ideal_nodes.at(i); 5331 Node* nmem = get_map(n->_idx); 5332 assert(nmem != nullptr, "sanity"); 5333 if (n->is_Mem()) { 5334 #if 0 // ifdef ASSERT 5335 Node* old_mem = n->in(MemNode::Memory); 5336 if (!visited.test_set(old_mem->_idx)) { 5337 old_mems.push(old_mem, old_mem->outcnt()); 5338 } 5339 #endif 5340 assert(n->in(MemNode::Memory) != nmem, "sanity"); 5341 if (!n->is_Load()) { 5342 // Move memory users of a store first. 5343 move_inst_mem(n, orig_phis); 5344 } 5345 // Now update memory input 5346 igvn->hash_delete(n); 5347 n->set_req(MemNode::Memory, nmem); 5348 igvn->hash_insert(n); 5349 record_for_optimizer(n); 5350 } else { 5351 assert(n->is_Allocate() || n->is_CheckCastPP() || 5352 n->is_AddP() || n->is_Phi() || n->is_NarrowMemProj(), "unknown node used for set_map()"); 5353 } 5354 } 5355 #if 0 // ifdef ASSERT 5356 // Verify that memory was split correctly 5357 while (old_mems.is_nonempty()) { 5358 Node* old_mem = old_mems.node(); 5359 uint old_cnt = old_mems.index(); 5360 old_mems.pop(); 5361 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 5362 } 5363 #endif 5364 _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_4, 5); 5365 } 5366 5367 #ifndef PRODUCT 5368 int ConnectionGraph::_no_escape_counter = 0; 5369 int ConnectionGraph::_arg_escape_counter = 0; 5370 int ConnectionGraph::_global_escape_counter = 0; 5371 5372 static const char *node_type_names[] = { 5373 "UnknownType", 5374 "JavaObject", 5375 "LocalVar", 5376 "Field", 5377 "Arraycopy" 5378 }; 5379 5380 static const char *esc_names[] = { 5381 "UnknownEscape", 5382 "NoEscape", 5383 "ArgEscape", 5384 "GlobalEscape" 5385 }; 5386 5387 const char* PointsToNode::esc_name() const { 5388 return esc_names[(int)escape_state()]; 5389 } 5390 5391 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 5392 NodeType nt = node_type(); 5393 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 5394 if (print_state) { 5395 EscapeState es = escape_state(); 5396 EscapeState fields_es = fields_escape_state(); 5397 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 5398 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 5399 out->print("NSR "); 5400 } 5401 } 5402 } 5403 5404 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 5405 dump_header(print_state, out); 5406 if (is_Field()) { 5407 FieldNode* f = (FieldNode*)this; 5408 if (f->is_oop()) { 5409 out->print("oop "); 5410 } 5411 if (f->offset() > 0) { 5412 out->print("+%d ", f->offset()); 5413 } 5414 out->print("("); 5415 for (BaseIterator i(f); i.has_next(); i.next()) { 5416 PointsToNode* b = i.get(); 5417 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 5418 } 5419 out->print(" )"); 5420 } 5421 out->print("["); 5422 for (EdgeIterator i(this); i.has_next(); i.next()) { 5423 PointsToNode* e = i.get(); 5424 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 5425 } 5426 out->print(" ["); 5427 for (UseIterator i(this); i.has_next(); i.next()) { 5428 PointsToNode* u = i.get(); 5429 bool is_base = false; 5430 if (PointsToNode::is_base_use(u)) { 5431 is_base = true; 5432 u = PointsToNode::get_use_node(u)->as_Field(); 5433 } 5434 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 5435 } 5436 out->print(" ]] "); 5437 if (_node == nullptr) { 5438 out->print("<null>%s", newline ? "\n" : ""); 5439 } else { 5440 _node->dump(newline ? "\n" : "", false, out); 5441 } 5442 } 5443 5444 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 5445 bool first = true; 5446 int ptnodes_length = ptnodes_worklist.length(); 5447 for (int i = 0; i < ptnodes_length; i++) { 5448 PointsToNode *ptn = ptnodes_worklist.at(i); 5449 if (ptn == nullptr || !ptn->is_JavaObject()) { 5450 continue; 5451 } 5452 PointsToNode::EscapeState es = ptn->escape_state(); 5453 if ((es != PointsToNode::NoEscape) && !Verbose) { 5454 continue; 5455 } 5456 Node* n = ptn->ideal_node(); 5457 if (n->is_Allocate() || (n->is_CallStaticJava() && 5458 n->as_CallStaticJava()->is_boxing_method())) { 5459 if (first) { 5460 tty->cr(); 5461 tty->print("======== Connection graph for "); 5462 _compile->method()->print_short_name(); 5463 tty->cr(); 5464 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 5465 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 5466 tty->cr(); 5467 first = false; 5468 } 5469 ptn->dump(); 5470 // Print all locals and fields which reference this allocation 5471 for (UseIterator j(ptn); j.has_next(); j.next()) { 5472 PointsToNode* use = j.get(); 5473 if (use->is_LocalVar()) { 5474 use->dump(Verbose); 5475 } else if (Verbose) { 5476 use->dump(); 5477 } 5478 } 5479 tty->cr(); 5480 } 5481 } 5482 } 5483 5484 void ConnectionGraph::print_statistics() { 5485 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", AtomicAccess::load(&_no_escape_counter), AtomicAccess::load(&_arg_escape_counter), AtomicAccess::load(&_global_escape_counter)); 5486 } 5487 5488 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 5489 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 5490 return; 5491 } 5492 for (int next = 0; next < java_objects_worklist.length(); ++next) { 5493 JavaObjectNode* ptn = java_objects_worklist.at(next); 5494 if (ptn->ideal_node()->is_Allocate()) { 5495 if (ptn->escape_state() == PointsToNode::NoEscape) { 5496 AtomicAccess::inc(&ConnectionGraph::_no_escape_counter); 5497 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 5498 AtomicAccess::inc(&ConnectionGraph::_arg_escape_counter); 5499 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 5500 AtomicAccess::inc(&ConnectionGraph::_global_escape_counter); 5501 } else { 5502 assert(false, "Unexpected Escape State"); 5503 } 5504 } 5505 } 5506 } 5507 5508 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 5509 if (_compile->directive()->TraceEscapeAnalysisOption) { 5510 assert(ptn != nullptr, "should not be null"); 5511 assert(reason != nullptr, "should not be null"); 5512 ptn->dump_header(true); 5513 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 5514 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 5515 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 5516 } 5517 } 5518 5519 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 5520 if (_compile->directive()->TraceEscapeAnalysisOption) { 5521 stringStream ss; 5522 ss.print("propagated from: "); 5523 from->dump(true, &ss, false); 5524 return ss.as_string(); 5525 } else { 5526 return nullptr; 5527 } 5528 } 5529 5530 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 5531 if (_compile->directive()->TraceEscapeAnalysisOption) { 5532 stringStream ss; 5533 ss.print("escapes as arg to:"); 5534 call->dump("", false, &ss); 5535 return ss.as_string(); 5536 } else { 5537 return nullptr; 5538 } 5539 } 5540 5541 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 5542 if (_compile->directive()->TraceEscapeAnalysisOption) { 5543 stringStream ss; 5544 ss.print("is merged with other object: "); 5545 other->dump_header(true, &ss); 5546 return ss.as_string(); 5547 } else { 5548 return nullptr; 5549 } 5550 } 5551 5552 #endif 5553 5554 void ConnectionGraph::record_for_optimizer(Node *n) { 5555 _igvn->_worklist.push(n); 5556 _igvn->add_users_to_worklist(n); 5557 } --- EOF ---