1 /* 2 * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/metaspace.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "opto/c2compiler.hpp" 35 #include "opto/arraycopynode.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/compile.hpp" 39 #include "opto/escape.hpp" 40 #include "opto/inlinetypenode.hpp" 41 #include "opto/macro.hpp" 42 #include "opto/locknode.hpp" 43 #include "opto/phaseX.hpp" 44 #include "opto/movenode.hpp" 45 #include "opto/narrowptrnode.hpp" 46 #include "opto/castnode.hpp" 47 #include "opto/rootnode.hpp" 48 #include "utilities/macros.hpp" 49 50 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 51 // If ReduceAllocationMerges is enabled we might call split_through_phi during 52 // split_unique_types and that will create additional nodes that need to be 53 // pushed to the ConnectionGraph. The code below bumps the initial capacity of 54 // _nodes by 10% to account for these additional nodes. If capacity is exceeded 55 // the array will be reallocated. 56 _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr), 57 _in_worklist(C->comp_arena()), 58 _next_pidx(0), 59 _collecting(true), 60 _verify(false), 61 _compile(C), 62 _igvn(igvn), 63 _invocation(invocation), 64 _build_iterations(0), 65 _build_time(0.), 66 _node_map(C->comp_arena()) { 67 // Add unknown java object. 68 add_java_object(C->top(), PointsToNode::GlobalEscape); 69 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 70 set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object")); 71 // Add ConP and ConN null oop nodes 72 Node* oop_null = igvn->zerocon(T_OBJECT); 73 assert(oop_null->_idx < nodes_size(), "should be created already"); 74 add_java_object(oop_null, PointsToNode::NoEscape); 75 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 76 set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object")); 77 if (UseCompressedOops) { 78 Node* noop_null = igvn->zerocon(T_NARROWOOP); 79 assert(noop_null->_idx < nodes_size(), "should be created already"); 80 map_ideal_node(noop_null, null_obj); 81 } 82 } 83 84 bool ConnectionGraph::has_candidates(Compile *C) { 85 // EA brings benefits only when the code has allocations and/or locks which 86 // are represented by ideal Macro nodes. 87 int cnt = C->macro_count(); 88 for (int i = 0; i < cnt; i++) { 89 Node *n = C->macro_node(i); 90 if (n->is_Allocate()) { 91 return true; 92 } 93 if (n->is_Lock()) { 94 Node* obj = n->as_Lock()->obj_node()->uncast(); 95 if (!(obj->is_Parm() || obj->is_Con())) { 96 return true; 97 } 98 } 99 if (n->is_CallStaticJava() && 100 n->as_CallStaticJava()->is_boxing_method()) { 101 return true; 102 } 103 } 104 return false; 105 } 106 107 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 108 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 109 ResourceMark rm; 110 111 // Add ConP and ConN null oop nodes before ConnectionGraph construction 112 // to create space for them in ConnectionGraph::_nodes[]. 113 Node* oop_null = igvn->zerocon(T_OBJECT); 114 Node* noop_null = igvn->zerocon(T_NARROWOOP); 115 int invocation = 0; 116 if (C->congraph() != nullptr) { 117 invocation = C->congraph()->_invocation + 1; 118 } 119 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 120 // Perform escape analysis 121 if (congraph->compute_escape()) { 122 // There are non escaping objects. 123 C->set_congraph(congraph); 124 } 125 // Cleanup. 126 if (oop_null->outcnt() == 0) { 127 igvn->hash_delete(oop_null); 128 } 129 if (noop_null->outcnt() == 0) { 130 igvn->hash_delete(noop_null); 131 } 132 } 133 134 bool ConnectionGraph::compute_escape() { 135 Compile* C = _compile; 136 PhaseGVN* igvn = _igvn; 137 138 // Worklists used by EA. 139 Unique_Node_List delayed_worklist; 140 Unique_Node_List reducible_merges; 141 GrowableArray<Node*> alloc_worklist; 142 GrowableArray<Node*> ptr_cmp_worklist; 143 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 144 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 145 GrowableArray<PointsToNode*> ptnodes_worklist; 146 GrowableArray<JavaObjectNode*> java_objects_worklist; 147 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 148 GrowableArray<FieldNode*> oop_fields_worklist; 149 GrowableArray<SafePointNode*> sfn_worklist; 150 GrowableArray<MergeMemNode*> mergemem_worklist; 151 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 152 153 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 154 155 // 1. Populate Connection Graph (CG) with PointsTo nodes. 156 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 157 // Initialize worklist 158 if (C->root() != nullptr) { 159 ideal_nodes.push(C->root()); 160 } 161 // Processed ideal nodes are unique on ideal_nodes list 162 // but several ideal nodes are mapped to the phantom_obj. 163 // To avoid duplicated entries on the following worklists 164 // add the phantom_obj only once to them. 165 ptnodes_worklist.append(phantom_obj); 166 java_objects_worklist.append(phantom_obj); 167 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 168 Node* n = ideal_nodes.at(next); 169 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && 170 !n->in(MemNode::Address)->is_AddP() && 171 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { 172 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA 173 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); 174 _igvn->register_new_node_with_optimizer(addp); 175 _igvn->replace_input_of(n, MemNode::Address, addp); 176 ideal_nodes.push(addp); 177 _nodes.at_put_grow(addp->_idx, nullptr, nullptr); 178 } 179 // Create PointsTo nodes and add them to Connection Graph. Called 180 // only once per ideal node since ideal_nodes is Unique_Node list. 181 add_node_to_connection_graph(n, &delayed_worklist); 182 PointsToNode* ptn = ptnode_adr(n->_idx); 183 if (ptn != nullptr && ptn != phantom_obj) { 184 ptnodes_worklist.append(ptn); 185 if (ptn->is_JavaObject()) { 186 java_objects_worklist.append(ptn->as_JavaObject()); 187 if ((n->is_Allocate() || n->is_CallStaticJava()) && 188 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 189 // Only allocations and java static calls results are interesting. 190 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 191 } 192 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 193 oop_fields_worklist.append(ptn->as_Field()); 194 } 195 } 196 // Collect some interesting nodes for further use. 197 switch (n->Opcode()) { 198 case Op_MergeMem: 199 // Collect all MergeMem nodes to add memory slices for 200 // scalar replaceable objects in split_unique_types(). 201 mergemem_worklist.append(n->as_MergeMem()); 202 break; 203 case Op_CmpP: 204 case Op_CmpN: 205 // Collect compare pointers nodes. 206 if (OptimizePtrCompare) { 207 ptr_cmp_worklist.append(n); 208 } 209 break; 210 case Op_MemBarStoreStore: 211 // Collect all MemBarStoreStore nodes so that depending on the 212 // escape status of the associated Allocate node some of them 213 // may be eliminated. 214 if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) { 215 storestore_worklist.append(n->as_MemBarStoreStore()); 216 } 217 break; 218 case Op_MemBarRelease: 219 if (n->req() > MemBarNode::Precedent) { 220 record_for_optimizer(n); 221 } 222 break; 223 #ifdef ASSERT 224 case Op_AddP: 225 // Collect address nodes for graph verification. 226 addp_worklist.append(n); 227 break; 228 #endif 229 case Op_ArrayCopy: 230 // Keep a list of ArrayCopy nodes so if one of its input is non 231 // escaping, we can record a unique type 232 arraycopy_worklist.append(n->as_ArrayCopy()); 233 break; 234 default: 235 // not interested now, ignore... 236 break; 237 } 238 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 239 Node* m = n->fast_out(i); // Get user 240 ideal_nodes.push(m); 241 } 242 if (n->is_SafePoint()) { 243 sfn_worklist.append(n->as_SafePoint()); 244 } 245 } 246 247 #ifndef PRODUCT 248 if (_compile->directive()->TraceEscapeAnalysisOption) { 249 tty->print("+++++ Initial worklist for "); 250 _compile->method()->print_name(); 251 tty->print_cr(" (ea_inv=%d)", _invocation); 252 for (int i = 0; i < ptnodes_worklist.length(); i++) { 253 PointsToNode* ptn = ptnodes_worklist.at(i); 254 ptn->dump(); 255 } 256 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 257 } 258 #endif 259 260 if (non_escaped_allocs_worklist.length() == 0) { 261 _collecting = false; 262 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 263 return false; // Nothing to do. 264 } 265 // Add final simple edges to graph. 266 while(delayed_worklist.size() > 0) { 267 Node* n = delayed_worklist.pop(); 268 add_final_edges(n); 269 } 270 271 #ifdef ASSERT 272 if (VerifyConnectionGraph) { 273 // Verify that no new simple edges could be created and all 274 // local vars has edges. 275 _verify = true; 276 int ptnodes_length = ptnodes_worklist.length(); 277 for (int next = 0; next < ptnodes_length; ++next) { 278 PointsToNode* ptn = ptnodes_worklist.at(next); 279 add_final_edges(ptn->ideal_node()); 280 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 281 ptn->dump(); 282 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 283 } 284 } 285 _verify = false; 286 } 287 #endif 288 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 289 // processing, calls to CI to resolve symbols (types, fields, methods) 290 // referenced in bytecode. During symbol resolution VM may throw 291 // an exception which CI cleans and converts to compilation failure. 292 if (C->failing()) { 293 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 294 return false; 295 } 296 297 // 2. Finish Graph construction by propagating references to all 298 // java objects through graph. 299 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 300 java_objects_worklist, oop_fields_worklist)) { 301 // All objects escaped or hit time or iterations limits. 302 _collecting = false; 303 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 304 return false; 305 } 306 307 // 3. Adjust scalar_replaceable state of nonescaping objects and push 308 // scalar replaceable allocations on alloc_worklist for processing 309 // in split_unique_types(). 310 GrowableArray<JavaObjectNode*> jobj_worklist; 311 int non_escaped_length = non_escaped_allocs_worklist.length(); 312 bool found_nsr_alloc = false; 313 for (int next = 0; next < non_escaped_length; next++) { 314 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 315 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 316 Node* n = ptn->ideal_node(); 317 if (n->is_Allocate()) { 318 n->as_Allocate()->_is_non_escaping = noescape; 319 } 320 if (noescape && ptn->scalar_replaceable()) { 321 adjust_scalar_replaceable_state(ptn, reducible_merges); 322 if (ptn->scalar_replaceable()) { 323 jobj_worklist.push(ptn); 324 } else { 325 found_nsr_alloc = true; 326 } 327 } 328 } 329 330 // Propagate NSR (Not Scalar Replaceable) state. 331 if (found_nsr_alloc) { 332 find_scalar_replaceable_allocs(jobj_worklist); 333 } 334 335 // alloc_worklist will be processed in reverse push order. 336 // Therefore the reducible Phis will be processed for last and that's what we 337 // want because by then the scalarizable inputs of the merge will already have 338 // an unique instance type. 339 for (uint i = 0; i < reducible_merges.size(); i++ ) { 340 Node* n = reducible_merges.at(i); 341 alloc_worklist.append(n); 342 } 343 344 for (int next = 0; next < jobj_worklist.length(); ++next) { 345 JavaObjectNode* jobj = jobj_worklist.at(next); 346 if (jobj->scalar_replaceable()) { 347 alloc_worklist.append(jobj->ideal_node()); 348 } 349 } 350 351 #ifdef ASSERT 352 if (VerifyConnectionGraph) { 353 // Verify that graph is complete - no new edges could be added or needed. 354 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 355 java_objects_worklist, addp_worklist); 356 } 357 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 358 assert(null_obj->escape_state() == PointsToNode::NoEscape && 359 null_obj->edge_count() == 0 && 360 !null_obj->arraycopy_src() && 361 !null_obj->arraycopy_dst(), "sanity"); 362 #endif 363 364 _collecting = false; 365 366 } // TracePhase t3("connectionGraph") 367 368 // 4. Optimize ideal graph based on EA information. 369 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 370 if (has_non_escaping_obj) { 371 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 372 } 373 374 #ifndef PRODUCT 375 if (PrintEscapeAnalysis) { 376 dump(ptnodes_worklist); // Dump ConnectionGraph 377 } 378 #endif 379 380 #ifdef ASSERT 381 if (VerifyConnectionGraph) { 382 int alloc_length = alloc_worklist.length(); 383 for (int next = 0; next < alloc_length; ++next) { 384 Node* n = alloc_worklist.at(next); 385 PointsToNode* ptn = ptnode_adr(n->_idx); 386 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 387 } 388 } 389 390 if (VerifyReduceAllocationMerges) { 391 for (uint i = 0; i < reducible_merges.size(); i++ ) { 392 Node* n = reducible_merges.at(i); 393 if (!can_reduce_phi(n->as_Phi())) { 394 TraceReduceAllocationMerges = true; 395 n->dump(2); 396 n->dump(-2); 397 assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT."); 398 } 399 } 400 } 401 #endif 402 403 // 5. Separate memory graph for scalar replaceable allcations. 404 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 405 if (has_scalar_replaceable_candidates && EliminateAllocations) { 406 assert(C->do_aliasing(), "Aliasing should be enabled"); 407 // Now use the escape information to create unique types for 408 // scalar replaceable objects. 409 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges); 410 if (C->failing()) { 411 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 412 return false; 413 } 414 C->print_method(PHASE_AFTER_EA, 2); 415 416 #ifdef ASSERT 417 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 418 tty->print("=== No allocations eliminated for "); 419 C->method()->print_short_name(); 420 if (!EliminateAllocations) { 421 tty->print(" since EliminateAllocations is off ==="); 422 } else if(!has_scalar_replaceable_candidates) { 423 tty->print(" since there are no scalar replaceable candidates ==="); 424 } 425 tty->cr(); 426 #endif 427 } 428 429 // 6. Reduce allocation merges used as debug information. This is done after 430 // split_unique_types because the methods used to create SafePointScalarObject 431 // need to traverse the memory graph to find values for object fields. We also 432 // set to null the scalarized inputs of reducible Phis so that the Allocate 433 // that they point can be later scalar replaced. 434 bool delay = _igvn->delay_transform(); 435 _igvn->set_delay_transform(true); 436 for (uint i = 0; i < reducible_merges.size(); i++) { 437 Node* n = reducible_merges.at(i); 438 if (n->outcnt() > 0) { 439 if (!reduce_phi_on_safepoints(n->as_Phi())) { 440 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 441 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 442 return false; 443 } 444 445 // Now we set the scalar replaceable inputs of ophi to null, which is 446 // the last piece that would prevent it from being scalar replaceable. 447 reset_scalar_replaceable_entries(n->as_Phi()); 448 } 449 } 450 _igvn->set_delay_transform(delay); 451 452 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 453 // java calls if they pass ArgEscape objects as parameters. 454 if (has_non_escaping_obj && 455 (C->env()->should_retain_local_variables() || 456 C->env()->jvmti_can_get_owned_monitor_info() || 457 C->env()->jvmti_can_walk_any_space() || 458 DeoptimizeObjectsALot)) { 459 int sfn_length = sfn_worklist.length(); 460 for (int next = 0; next < sfn_length; next++) { 461 SafePointNode* sfn = sfn_worklist.at(next); 462 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 463 if (sfn->is_CallJava()) { 464 CallJavaNode* call = sfn->as_CallJava(); 465 call->set_arg_escape(has_arg_escape(call)); 466 } 467 } 468 } 469 470 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 471 return has_non_escaping_obj; 472 } 473 474 // Check if it's profitable to reduce the Phi passed as parameter. Returns true 475 // if at least one scalar replaceable allocation participates in the merge. 476 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const { 477 bool found_sr_allocate = false; 478 479 for (uint i = 1; i < ophi->req(); i++) { 480 JavaObjectNode* ptn = unique_java_object(ophi->in(i)); 481 if (ptn != nullptr && ptn->scalar_replaceable()) { 482 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 483 484 // Don't handle arrays. 485 if (alloc->Opcode() != Op_Allocate) { 486 assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation."); 487 continue; 488 } 489 490 if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) { 491 found_sr_allocate = true; 492 } else { 493 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);) 494 ptn->set_scalar_replaceable(false); 495 } 496 } 497 } 498 499 NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);) 500 return found_sr_allocate; 501 } 502 503 // We can reduce the Cmp if it's a comparison between the Phi and a constant. 504 // I require the 'other' input to be a constant so that I can move the Cmp 505 // around safely. 506 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const { 507 assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name()); 508 Node* left = cmp->in(1); 509 Node* right = cmp->in(2); 510 511 return (left == n || right == n) && 512 (left->is_Con() || right->is_Con()) && 513 cmp->outcnt() == 1; 514 } 515 516 // We are going to check if any of the SafePointScalarMerge entries 517 // in the SafePoint reference the Phi that we are checking. 518 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const { 519 JVMState *jvms = sfpt->jvms(); 520 521 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 522 Node* sfpt_in = sfpt->in(i); 523 if (sfpt_in->is_SafePointScalarMerge()) { 524 SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge(); 525 Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms)); 526 if (nsr_ptr == n) { 527 return true; 528 } 529 } 530 } 531 532 return false; 533 } 534 535 // Check if we are able to untangle the merge. The following patterns are 536 // supported: 537 // - Phi -> SafePoints 538 // - Phi -> CmpP/N 539 // - Phi -> AddP -> Load 540 // - Phi -> CastPP -> SafePoints 541 // - Phi -> CastPP -> AddP -> Load 542 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const { 543 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 544 Node* use = n->fast_out(i); 545 546 if (use->is_SafePoint()) { 547 if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) { 548 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);) 549 return false; 550 } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) { 551 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);) 552 return false; 553 } 554 } else if (use->is_AddP()) { 555 Node* addp = use; 556 for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) { 557 Node* use_use = addp->fast_out(j); 558 const Type* load_type = _igvn->type(use_use); 559 560 if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) { 561 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());) 562 return false; 563 } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) { 564 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());) 565 return false; 566 } 567 } 568 } else if (nesting > 0) { 569 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);) 570 return false; 571 } else if (use->is_CastPP()) { 572 const Type* cast_t = _igvn->type(use); 573 if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) { 574 #ifndef PRODUCT 575 if (TraceReduceAllocationMerges) { 576 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation); 577 use->dump(); 578 } 579 #endif 580 return false; 581 } 582 583 bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0); 584 if (!is_trivial_control) { 585 // If it's not a trivial control then we check if we can reduce the 586 // CmpP/N used by the If controlling the cast. 587 if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) { 588 Node* iff = use->in(0)->in(0); 589 // We may have Opaque4 node between If and Bool nodes. 590 // Bail out in such case - we need to preserve Opaque4 for correct 591 // processing predicates after loop opts. 592 bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp(); 593 if (can_reduce) { 594 Node* iff_cmp = iff->in(1)->in(1); 595 int opc = iff_cmp->Opcode(); 596 can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp); 597 } 598 if (!can_reduce) { 599 #ifndef PRODUCT 600 if (TraceReduceAllocationMerges) { 601 tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx); 602 n->dump(5); 603 } 604 #endif 605 return false; 606 } 607 } 608 } 609 610 if (!can_reduce_check_users(use, nesting+1)) { 611 return false; 612 } 613 } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) { 614 if (!can_reduce_cmp(n, use)) { 615 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);) 616 return false; 617 } 618 } else { 619 NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());) 620 return false; 621 } 622 } 623 624 return true; 625 } 626 627 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is 628 // only used in some certain code shapes. Check comments in 629 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more 630 // details. 631 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const { 632 // If there was an error attempting to reduce allocation merges for this 633 // method we might have disabled the compilation and be retrying with RAM 634 // disabled. 635 if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) { 636 return false; 637 } 638 639 const Type* phi_t = _igvn->type(ophi); 640 if (phi_t == nullptr || 641 phi_t->make_ptr() == nullptr || 642 phi_t->make_ptr()->isa_aryptr() != nullptr) { 643 return false; 644 } 645 646 if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) { 647 return false; 648 } 649 650 NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); }) 651 return true; 652 } 653 654 // This method will return a CmpP/N that we need to use on the If controlling a 655 // CastPP after it was split. This method is only called on bases that are 656 // nullable therefore we always need a controlling if for the splitted CastPP. 657 // 658 // 'curr_ctrl' is the control of the CastPP that we want to split through phi. 659 // If the CastPP currently doesn't have a control then the CmpP/N will be 660 // against the NULL constant, otherwise it will be against the constant input of 661 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later 662 // case because we have constraints on it and because the CastPP has a control 663 // input. 664 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) { 665 const Type* t = base->bottom_type(); 666 Node* con = nullptr; 667 668 if (curr_ctrl == nullptr || curr_ctrl->is_Region()) { 669 con = _igvn->zerocon(t->basic_type()); 670 } else { 671 // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp 672 assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name()); 673 Node* bol = curr_ctrl->in(0)->in(1); 674 assert(bol->is_Bool(), "unexpected node %s", bol->Name()); 675 Node* curr_cmp = bol->in(1); 676 assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name()); 677 con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2); 678 } 679 680 return CmpNode::make(base, con, t->basic_type()); 681 } 682 683 // This method 'specializes' the CastPP passed as parameter to the base passed 684 // as parameter. Note that the existing CastPP input is a Phi. "Specialize" 685 // means that the CastPP now will be specific for a given base instead of a Phi. 686 // An If-Then-Else-Region block is inserted to control the CastPP. The control 687 // of the CastPP is a copy of the current one (if there is one) or a check 688 // against NULL. 689 // 690 // Before: 691 // 692 // C1 C2 ... Cn 693 // \ | / 694 // \ | / 695 // \ | / 696 // \ | / 697 // \ | / 698 // \ | / 699 // \|/ 700 // Region B1 B2 ... Bn 701 // | \ | / 702 // | \ | / 703 // | \ | / 704 // | \ | / 705 // | \ | / 706 // | \ | / 707 // ---------------> Phi 708 // | 709 // X | 710 // | | 711 // | | 712 // ------> CastPP 713 // 714 // After (only partial illustration; base = B2, current_control = C2): 715 // 716 // C2 717 // | 718 // If 719 // / \ 720 // / \ 721 // T F 722 // /\ / 723 // / \ / 724 // / \ / 725 // C1 CastPP Reg Cn 726 // | | | 727 // | | | 728 // | | | 729 // -------------- | ---------- 730 // | | | 731 // Region 732 // 733 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) { 734 Node* control_successor = current_control->unique_ctrl_out(); 735 Node* cmp = _igvn->transform(specialize_cmp(base, castpp->in(0))); 736 Node* bol = _igvn->transform(new BoolNode(cmp, BoolTest::ne)); 737 IfNode* if_ne = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If(); 738 Node* not_eq_control = _igvn->transform(new IfTrueNode(if_ne)); 739 Node* yes_eq_control = _igvn->transform(new IfFalseNode(if_ne)); 740 Node* end_region = _igvn->transform(new RegionNode(3)); 741 742 // Insert the new if-else-region block into the graph 743 end_region->set_req(1, not_eq_control); 744 end_region->set_req(2, yes_eq_control); 745 control_successor->replace_edge(current_control, end_region, _igvn); 746 747 _igvn->_worklist.push(current_control); 748 _igvn->_worklist.push(control_successor); 749 750 return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr)); 751 } 752 753 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *> &alloc_worklist) { 754 const Type* load_type = _igvn->type(curr_load); 755 Node* nsr_value = _igvn->zerocon(load_type->basic_type()); 756 Node* memory = curr_load->in(MemNode::Memory); 757 758 // The data_phi merging the loads needs to be nullable if 759 // we are loading pointers. 760 if (load_type->make_ptr() != nullptr) { 761 if (load_type->isa_narrowoop()) { 762 load_type = load_type->meet(TypeNarrowOop::NULL_PTR); 763 } else if (load_type->isa_ptr()) { 764 load_type = load_type->meet(TypePtr::NULL_PTR); 765 } else { 766 assert(false, "Unexpected load ptr type."); 767 } 768 } 769 770 Node* data_phi = PhiNode::make(region, nsr_value, load_type); 771 772 for (int i = 1; i < bases_for_loads->length(); i++) { 773 Node* base = bases_for_loads->at(i); 774 Node* cmp_region = nullptr; 775 if (base != nullptr) { 776 if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node 777 cmp_region = base->unique_ctrl_out_or_null(); 778 assert(cmp_region != nullptr, "There should be."); 779 base = base->find_out_with(Op_CastPP); 780 } 781 782 Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset))); 783 Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory; 784 Node* load = curr_load->clone(); 785 load->set_req(0, nullptr); 786 load->set_req(1, mem); 787 load->set_req(2, addr); 788 789 if (cmp_region != nullptr) { // see comment on previous if 790 Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type); 791 intermediate_phi->set_req(1, _igvn->transform(load)); 792 load = intermediate_phi; 793 } 794 795 data_phi->set_req(i, _igvn->transform(load)); 796 } else { 797 // Just use the default, which is already in phi 798 } 799 } 800 801 // Takes care of updating CG and split_unique_types worklists due 802 // to cloned AddP->Load. 803 updates_after_load_split(data_phi, curr_load, alloc_worklist); 804 805 return _igvn->transform(data_phi); 806 } 807 808 // This method only reduces CastPP fields loads; SafePoints are handled 809 // separately. The idea here is basically to clone the CastPP and place copies 810 // on each input of the Phi, including non-scalar replaceable inputs. 811 // Experimentation shows that the resulting IR graph is simpler that way than if 812 // we just split the cast through scalar-replaceable inputs. 813 // 814 // The reduction process requires that CastPP's control be one of: 815 // 1) no control, 816 // 2) the same region as Ophi, or 817 // 3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant. 818 // 819 // After splitting the CastPP we'll put it under an If-Then-Else-Region control 820 // flow. If the CastPP originally had an IfTrue/False control input then we'll 821 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll 822 // juse use a CmpP/N against the NULL constant. 823 // 824 // The If-Then-Else-Region isn't always needed. For instance, if input to 825 // splitted cast was not nullable (or if it was the NULL constant) then we don't 826 // need (shouldn't) use a CastPP at all. 827 // 828 // After the casts are splitted we'll split the AddP->Loads through the Phi and 829 // connect them to the just split CastPPs. 830 // 831 // Before (CastPP control is same as Phi): 832 // 833 // Region Allocate Null Call 834 // | \ | / 835 // | \ | / 836 // | \ | / 837 // | \ | / 838 // | \ | / 839 // | \ | / 840 // ------------------> Phi # Oop Phi 841 // | | 842 // | | 843 // | | 844 // | | 845 // ----------------> CastPP 846 // | 847 // AddP 848 // | 849 // Load 850 // 851 // After (Very much simplified): 852 // 853 // Call NULL 854 // \ / 855 // CmpP 856 // | 857 // Bool#NE 858 // | 859 // If 860 // / \ 861 // T F 862 // / \ / 863 // / R 864 // CastPP | 865 // | | 866 // AddP | 867 // | | 868 // Load | 869 // \ | 0 870 // Allocate \ | / 871 // \ \ | / 872 // AddP Phi 873 // \ / 874 // Load / 875 // \ 0 / 876 // \ | / 877 // \|/ 878 // Phi # "Field" Phi 879 // 880 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 881 Node* ophi = curr_castpp->in(1); 882 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 883 884 // Identify which base should be used for AddP->Load later when spliting the 885 // CastPP->Loads through ophi. Three kind of values may be stored in this 886 // array, depending on the nullability status of the corresponding input in 887 // ophi. 888 // 889 // - nullptr: Meaning that the base is actually the NULL constant and therefore 890 // we won't try to load from it. 891 // 892 // - CFG Node: Meaning that the base is a CastPP that was specialized for 893 // this input of Ophi. I.e., we added an If->Then->Else-Region 894 // that will 'activate' the CastPp only when the input is not Null. 895 // 896 // - Other Node: Meaning that the base is not nullable and therefore we'll try 897 // to load directly from it. 898 GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr); 899 900 for (uint i = 1; i < ophi->req(); i++) { 901 Node* base = ophi->in(i); 902 const Type* base_t = _igvn->type(base); 903 904 if (base_t->maybe_null()) { 905 if (base->is_Con()) { 906 // Nothing todo as bases_for_loads[i] is already nullptr 907 } else { 908 Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i)); 909 bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag 910 } 911 } else { 912 bases_for_loads.at_put(i, base); 913 } 914 } 915 916 // Now let's split the CastPP->Loads through the Phi 917 for (int i = curr_castpp->outcnt()-1; i >= 0;) { 918 Node* use = curr_castpp->raw_out(i); 919 if (use->is_AddP()) { 920 for (int j = use->outcnt()-1; j >= 0;) { 921 Node* use_use = use->raw_out(j); 922 assert(use_use->is_Load(), "Expected this to be a Load node."); 923 924 // We can't make an unconditional load from a nullable input. The 925 // 'split_castpp_load_through_phi` method will add an 926 // 'If-Then-Else-Region` around nullable bases and only load from them 927 // when the input is not null. 928 Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist); 929 _igvn->replace_node(use_use, phi); 930 931 --j; 932 j = MIN2(j, (int)use->outcnt()-1); 933 } 934 935 _igvn->remove_dead_node(use); 936 } 937 --i; 938 i = MIN2(i, (int)curr_castpp->outcnt()-1); 939 } 940 } 941 942 // This method split a given CmpP/N through the Phi used in one of its inputs. 943 // As a result we convert a comparison with a pointer to a comparison with an 944 // integer. 945 // The only requirement is that one of the inputs of the CmpP/N must be a Phi 946 // while the other must be a constant. 947 // The splitting process is basically just cloning the CmpP/N above the input 948 // Phi. However, some (most) of the cloned CmpP/Ns won't be requred because we 949 // can prove at compile time the result of the comparison. 950 // 951 // Before: 952 // 953 // in1 in2 ... inN 954 // \ | / 955 // \ | / 956 // \ | / 957 // \ | / 958 // \ | / 959 // \ | / 960 // Phi 961 // | Other 962 // | / 963 // | / 964 // | / 965 // CmpP/N 966 // 967 // After: 968 // 969 // in1 Other in2 Other inN Other 970 // | | | | | | 971 // \ | | | | | 972 // \ / | / | / 973 // CmpP/N CmpP/N CmpP/N 974 // Bool Bool Bool 975 // \ | / 976 // \ | / 977 // \ | / 978 // \ | / 979 // \ | / 980 // \ | / 981 // \ | / 982 // \ | / 983 // Phi 984 // | 985 // | Zero 986 // | / 987 // | / 988 // | / 989 // CmpI 990 // 991 // 992 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) { 993 Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1); 994 assert(ophi->is_Phi(), "Expected this to be a Phi node."); 995 996 Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2); 997 Node* zero = _igvn->intcon(0); 998 BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test; 999 1000 // This Phi will merge the result of the Cmps split through the Phi 1001 Node* res_phi = _igvn->transform(PhiNode::make(ophi->in(0), zero, TypeInt::INT)); 1002 1003 for (uint i=1; i<ophi->req(); i++) { 1004 Node* ophi_input = ophi->in(i); 1005 Node* res_phi_input = nullptr; 1006 1007 const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other); 1008 if (tcmp->singleton()) { 1009 res_phi_input = _igvn->makecon(tcmp); 1010 } else { 1011 Node* ncmp = _igvn->transform(cmp->clone()); 1012 ncmp->set_req(1, ophi_input); 1013 ncmp->set_req(2, other); 1014 Node* bol = _igvn->transform(new BoolNode(ncmp, mask)); 1015 res_phi_input = bol->as_Bool()->as_int_value(_igvn); 1016 } 1017 1018 res_phi->set_req(i, res_phi_input); 1019 } 1020 1021 Node* new_cmp = _igvn->transform(new CmpINode(res_phi, zero)); 1022 _igvn->replace_node(cmp, new_cmp); 1023 } 1024 1025 // Push the newly created AddP on alloc_worklist and patch 1026 // the connection graph. Note that the changes in the CG below 1027 // won't affect the ES of objects since the new nodes have the 1028 // same status as the old ones. 1029 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *> &alloc_worklist) { 1030 assert(data_phi != nullptr, "Output of split_through_phi is null."); 1031 assert(data_phi != previous_load, "Output of split_through_phi is same as input."); 1032 assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi."); 1033 1034 if (data_phi == nullptr || !data_phi->is_Phi()) { 1035 // Make this a retry? 1036 return ; 1037 } 1038 1039 Node* previous_addp = previous_load->in(MemNode::Address); 1040 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1041 for (uint i = 1; i < data_phi->req(); i++) { 1042 Node* new_load = data_phi->in(i); 1043 1044 if (new_load->is_Phi()) { 1045 // new_load is currently the "intermediate_phi" from an specialized 1046 // CastPP. 1047 new_load = new_load->in(1); 1048 } 1049 1050 // "new_load" might actually be a constant, parameter, etc. 1051 if (new_load->is_Load()) { 1052 Node* new_addp = new_load->in(MemNode::Address); 1053 Node* base = get_addp_base(new_addp); 1054 1055 // The base might not be something that we can create an unique 1056 // type for. If that's the case we are done with that input. 1057 PointsToNode* jobj_ptn = unique_java_object(base); 1058 if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) { 1059 continue; 1060 } 1061 1062 // Push to alloc_worklist since the base has an unique_type 1063 alloc_worklist.append_if_missing(new_addp); 1064 1065 // Now let's add the node to the connection graph 1066 _nodes.at_grow(new_addp->_idx, nullptr); 1067 add_field(new_addp, fn->escape_state(), fn->offset()); 1068 add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx)); 1069 1070 // If the load doesn't load an object then it won't be 1071 // part of the connection graph 1072 PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx); 1073 if (curr_load_ptn != nullptr) { 1074 _nodes.at_grow(new_load->_idx, nullptr); 1075 add_local_var(new_load, curr_load_ptn->escape_state()); 1076 add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field()); 1077 } 1078 } 1079 } 1080 } 1081 1082 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *> &alloc_worklist) { 1083 // We'll pass this to 'split_through_phi' so that it'll do the split even 1084 // though the load doesn't have an unique instance type. 1085 bool ignore_missing_instance_id = true; 1086 1087 // All AddPs are present in the connection graph 1088 FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field(); 1089 1090 // Iterate over AddP looking for a Load 1091 for (int k = previous_addp->outcnt()-1; k >= 0;) { 1092 Node* previous_load = previous_addp->raw_out(k); 1093 if (previous_load->is_Load()) { 1094 Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id); 1095 1096 // Takes care of updating CG and split_unique_types worklists due to cloned 1097 // AddP->Load. 1098 updates_after_load_split(data_phi, previous_load, alloc_worklist); 1099 1100 _igvn->replace_node(previous_load, data_phi); 1101 } 1102 --k; 1103 k = MIN2(k, (int)previous_addp->outcnt()-1); 1104 } 1105 1106 // Remove the old AddP from the processing list because it's dead now 1107 assert(previous_addp->outcnt() == 0, "AddP should be dead now."); 1108 alloc_worklist.remove_if_existing(previous_addp); 1109 } 1110 1111 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the 1112 // selector is: 1113 // -> a '-1' constant, the i'th input of the original Phi is NSR. 1114 // -> a 'x' constant >=0, the i'th input of of original Phi will be SR and 1115 // the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects 1116 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const { 1117 Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1)); 1118 Node* selector = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT)); 1119 uint number_of_sr_objects = 0; 1120 for (uint i = 1; i < ophi->req(); i++) { 1121 Node* base = ophi->in(i); 1122 JavaObjectNode* ptn = unique_java_object(base); 1123 1124 if (ptn != nullptr && ptn->scalar_replaceable()) { 1125 Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects)); 1126 selector->set_req(i, sr_obj_idx); 1127 number_of_sr_objects++; 1128 } 1129 } 1130 1131 return selector->as_Phi(); 1132 } 1133 1134 // Returns true if the AddP node 'n' has at least one base that is a reducible 1135 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is 1136 // checked instead. 1137 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) { 1138 PointsToNode* ptn = ptnode_adr(n->_idx); 1139 if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) { 1140 return false; 1141 } 1142 1143 for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) { 1144 Node* base = i.get()->ideal_node(); 1145 1146 if (reducible_merges.member(base)) { 1147 return true; 1148 } 1149 1150 if (base->is_CastPP() || base->is_CheckCastPP()) { 1151 base = base->in(1); 1152 if (reducible_merges.member(base)) { 1153 return true; 1154 } 1155 } 1156 } 1157 1158 return false; 1159 } 1160 1161 // This method will call its helper method to reduce SafePoint nodes that use 1162 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same 1163 // "version" of Phi use the same debug information (regarding the Phi). 1164 // Therefore, I collect all safepoints and patch them all at once. 1165 // 1166 // The safepoints using the Phi node have to be processed before safepoints of 1167 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the 1168 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the 1169 // safepoint. If we process CastPP's safepoints before Phi's safepoints the 1170 // algorithm that process Phi's safepoints will think that the added Phi 1171 // reference is a regular reference. 1172 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) { 1173 PhiNode* selector = create_selector(ophi); 1174 Unique_Node_List safepoints; 1175 Unique_Node_List casts; 1176 1177 // Just collect the users of the Phis for later processing 1178 // in the needed order. 1179 for (uint i = 0; i < ophi->outcnt(); i++) { 1180 Node* use = ophi->raw_out(i); 1181 if (use->is_SafePoint()) { 1182 safepoints.push(use); 1183 } else if (use->is_CastPP()) { 1184 casts.push(use); 1185 } else { 1186 assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left."); 1187 } 1188 } 1189 1190 // Need to process safepoints using the Phi first 1191 if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) { 1192 return false; 1193 } 1194 1195 // Now process CastPP->safepoints 1196 for (uint i = 0; i < casts.size(); i++) { 1197 Node* cast = casts.at(i); 1198 Unique_Node_List cast_sfpts; 1199 1200 for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) { 1201 Node* use_use = cast->fast_out(j); 1202 if (use_use->is_SafePoint()) { 1203 cast_sfpts.push(use_use); 1204 } else { 1205 assert(use_use->outcnt() == 0, "Only SafePoint users should be left."); 1206 } 1207 } 1208 1209 if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) { 1210 return false; 1211 } 1212 } 1213 1214 return true; 1215 } 1216 1217 // This method will create a SafePointScalarMERGEnode for each SafePoint in 1218 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a 1219 // SafePointScalarObjectNode for each scalar replaceable input. Each 1220 // SafePointScalarMergeNode may describe multiple scalar replaced objects - 1221 // check detailed description in SafePointScalarMergeNode class header. 1222 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) { 1223 PhaseMacroExpand mexp(*_igvn); 1224 Node* original_sfpt_parent = cast != nullptr ? cast : ophi; 1225 const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr(); 1226 1227 Node* nsr_merge_pointer = ophi; 1228 if (cast != nullptr) { 1229 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1230 nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr)); 1231 } 1232 1233 for (uint spi = 0; spi < safepoints.size(); spi++) { 1234 SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint(); 1235 JVMState *jvms = sfpt->jvms(); 1236 uint merge_idx = (sfpt->req() - jvms->scloff()); 1237 int debug_start = jvms->debug_start(); 1238 1239 SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx); 1240 smerge->init_req(0, _compile->root()); 1241 _igvn->register_new_node_with_optimizer(smerge); 1242 1243 // The next two inputs are: 1244 // (1) A copy of the original pointer to NSR objects. 1245 // (2) A selector, used to decide if we need to rematerialize an object 1246 // or use the pointer to a NSR object. 1247 // See more details of these fields in the declaration of SafePointScalarMergeNode 1248 sfpt->add_req(nsr_merge_pointer); 1249 sfpt->add_req(selector); 1250 1251 for (uint i = 1; i < ophi->req(); i++) { 1252 Node* base = ophi->in(i); 1253 JavaObjectNode* ptn = unique_java_object(base); 1254 1255 // If the base is not scalar replaceable we don't need to register information about 1256 // it at this time. 1257 if (ptn == nullptr || !ptn->scalar_replaceable()) { 1258 continue; 1259 } 1260 1261 AllocateNode* alloc = ptn->ideal_node()->as_Allocate(); 1262 Unique_Node_List value_worklist; 1263 #ifdef ASSERT 1264 const Type* res_type = alloc->result_cast()->bottom_type(); 1265 if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) { 1266 PhiNode* phi = ophi->as_Phi(); 1267 assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity"); 1268 } 1269 #endif 1270 SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist); 1271 if (sobj == nullptr) { 1272 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1273 return false; 1274 } 1275 1276 // Now make a pass over the debug information replacing any references 1277 // to the allocated object with "sobj" 1278 Node* ccpp = alloc->result_cast(); 1279 sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn); 1280 1281 // Register the scalarized object as a candidate for reallocation 1282 smerge->add_req(sobj); 1283 1284 // Scalarize inline types that were added to the safepoint. 1285 // Don't allow linking a constant oop (if available) for flat array elements 1286 // because Deoptimization::reassign_flat_array_elements needs field values. 1287 const bool allow_oop = !merge_t->is_flat(); 1288 for (uint j = 0; j < value_worklist.size(); ++j) { 1289 InlineTypeNode* vt = value_worklist.at(j)->as_InlineType(); 1290 vt->make_scalar_in_safepoints(_igvn, allow_oop); 1291 } 1292 } 1293 1294 // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge" 1295 sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn); 1296 1297 // The call to 'replace_edges_in_range' above might have removed the 1298 // reference to ophi that we need at _merge_pointer_idx. The line below make 1299 // sure the reference is maintained. 1300 sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer); 1301 _igvn->_worklist.push(sfpt); 1302 } 1303 1304 return true; 1305 } 1306 1307 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *> &alloc_worklist, GrowableArray<Node *> &memnode_worklist) { 1308 bool delay = _igvn->delay_transform(); 1309 _igvn->set_delay_transform(true); 1310 _igvn->hash_delete(ophi); 1311 1312 // Copying all users first because some will be removed and others won't. 1313 // Ophi also may acquire some new users as part of Cast reduction. 1314 // CastPPs also need to be processed before CmpPs. 1315 Unique_Node_List castpps; 1316 Unique_Node_List others; 1317 for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) { 1318 Node* use = ophi->fast_out(i); 1319 1320 if (use->is_CastPP()) { 1321 castpps.push(use); 1322 } else if (use->is_AddP() || use->is_Cmp()) { 1323 others.push(use); 1324 } else if (use->is_SafePoint()) { 1325 // processed later 1326 } else { 1327 assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt()); 1328 } 1329 } 1330 1331 // CastPPs need to be processed before Cmps because during the process of 1332 // splitting CastPPs we make reference to the inputs of the Cmp that is used 1333 // by the If controlling the CastPP. 1334 for (uint i = 0; i < castpps.size(); i++) { 1335 reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist); 1336 } 1337 1338 for (uint i = 0; i < others.size(); i++) { 1339 Node* use = others.at(i); 1340 1341 if (use->is_AddP()) { 1342 reduce_phi_on_field_access(use, alloc_worklist); 1343 } else if(use->is_Cmp()) { 1344 reduce_phi_on_cmp(use); 1345 } 1346 } 1347 1348 _igvn->set_delay_transform(delay); 1349 } 1350 1351 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) { 1352 Node* null_ptr = _igvn->makecon(TypePtr::NULL_PTR); 1353 const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr(); 1354 const Type* new_t = merge_t->meet(TypePtr::NULL_PTR); 1355 Node* new_phi = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t)); 1356 1357 for (uint i = 1; i < ophi->req(); i++) { 1358 Node* base = ophi->in(i); 1359 JavaObjectNode* ptn = unique_java_object(base); 1360 1361 if (ptn != nullptr && ptn->scalar_replaceable()) { 1362 new_phi->set_req(i, null_ptr); 1363 } else { 1364 new_phi->set_req(i, ophi->in(i)); 1365 } 1366 } 1367 1368 for (int i = ophi->outcnt()-1; i >= 0;) { 1369 Node* out = ophi->raw_out(i); 1370 1371 if (out->is_ConstraintCast()) { 1372 const Type* out_t = _igvn->type(out)->make_ptr(); 1373 const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR); 1374 bool change = out_new_t != out_t; 1375 1376 for (int j = out->outcnt()-1; change && j >= 0; --j) { 1377 Node* out2 = out->raw_out(j); 1378 if (!out2->is_SafePoint()) { 1379 change = false; 1380 break; 1381 } 1382 } 1383 1384 if (change) { 1385 Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr); 1386 _igvn->replace_node(out, new_cast); 1387 _igvn->register_new_node_with_optimizer(new_cast); 1388 } 1389 } 1390 1391 --i; 1392 i = MIN2(i, (int)ophi->outcnt()-1); 1393 } 1394 1395 _igvn->replace_node(ophi, new_phi); 1396 } 1397 1398 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) { 1399 if (!C->do_reduce_allocation_merges()) return; 1400 1401 Unique_Node_List ideal_nodes; 1402 ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space 1403 ideal_nodes.push(root); 1404 1405 for (uint next = 0; next < ideal_nodes.size(); ++next) { 1406 Node* n = ideal_nodes.at(next); 1407 1408 if (n->is_SafePointScalarMerge()) { 1409 SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge(); 1410 1411 // Validate inputs of merge 1412 for (uint i = 1; i < merge->req(); i++) { 1413 if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) { 1414 assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject."); 1415 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1416 } 1417 } 1418 1419 // Validate users of merge 1420 for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) { 1421 Node* sfpt = merge->fast_out(i); 1422 if (sfpt->is_SafePoint()) { 1423 int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms()); 1424 1425 if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) { 1426 assert(false, "SafePointScalarMerge nodes can't be nested."); 1427 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1428 } 1429 } else { 1430 assert(false, "Only safepoints can use SafePointScalarMerge nodes."); 1431 C->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 1432 } 1433 } 1434 } 1435 1436 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1437 Node* m = n->fast_out(i); 1438 ideal_nodes.push(m); 1439 } 1440 } 1441 } 1442 1443 // Returns true if there is an object in the scope of sfn that does not escape globally. 1444 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 1445 Compile* C = _compile; 1446 for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { 1447 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 1448 DeoptimizeObjectsALot) { 1449 // Jvmti agents can access locals. Must provide info about local objects at runtime. 1450 int num_locs = jvms->loc_size(); 1451 for (int idx = 0; idx < num_locs; idx++) { 1452 Node* l = sfn->local(jvms, idx); 1453 if (not_global_escape(l)) { 1454 return true; 1455 } 1456 } 1457 } 1458 if (C->env()->jvmti_can_get_owned_monitor_info() || 1459 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 1460 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 1461 int num_mon = jvms->nof_monitors(); 1462 for (int idx = 0; idx < num_mon; idx++) { 1463 Node* m = sfn->monitor_obj(jvms, idx); 1464 if (m != nullptr && not_global_escape(m)) { 1465 return true; 1466 } 1467 } 1468 } 1469 } 1470 return false; 1471 } 1472 1473 // Returns true if at least one of the arguments to the call is an object 1474 // that does not escape globally. 1475 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 1476 if (call->method() != nullptr) { 1477 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 1478 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 1479 Node* p = call->in(idx); 1480 if (not_global_escape(p)) { 1481 return true; 1482 } 1483 } 1484 } else { 1485 const char* name = call->as_CallStaticJava()->_name; 1486 assert(name != nullptr, "no name"); 1487 // no arg escapes through uncommon traps 1488 if (strcmp(name, "uncommon_trap") != 0) { 1489 // process_call_arguments() assumes that all arguments escape globally 1490 const TypeTuple* d = call->tf()->domain_sig(); 1491 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1492 const Type* at = d->field_at(i); 1493 if (at->isa_oopptr() != nullptr) { 1494 return true; 1495 } 1496 } 1497 } 1498 } 1499 return false; 1500 } 1501 1502 1503 1504 // Utility function for nodes that load an object 1505 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1506 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1507 // ThreadLocal has RawPtr type. 1508 const Type* t = _igvn->type(n); 1509 if (t->make_ptr() != nullptr) { 1510 Node* adr = n->in(MemNode::Address); 1511 #ifdef ASSERT 1512 if (!adr->is_AddP()) { 1513 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 1514 } else { 1515 assert((ptnode_adr(adr->_idx) == nullptr || 1516 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 1517 } 1518 #endif 1519 add_local_var_and_edge(n, PointsToNode::NoEscape, 1520 adr, delayed_worklist); 1521 } 1522 } 1523 1524 // Populate Connection Graph with PointsTo nodes and create simple 1525 // connection graph edges. 1526 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 1527 assert(!_verify, "this method should not be called for verification"); 1528 PhaseGVN* igvn = _igvn; 1529 uint n_idx = n->_idx; 1530 PointsToNode* n_ptn = ptnode_adr(n_idx); 1531 if (n_ptn != nullptr) { 1532 return; // No need to redefine PointsTo node during first iteration. 1533 } 1534 int opcode = n->Opcode(); 1535 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 1536 if (gc_handled) { 1537 return; // Ignore node if already handled by GC. 1538 } 1539 1540 if (n->is_Call()) { 1541 // Arguments to allocation and locking don't escape. 1542 if (n->is_AbstractLock()) { 1543 // Put Lock and Unlock nodes on IGVN worklist to process them during 1544 // first IGVN optimization when escape information is still available. 1545 record_for_optimizer(n); 1546 } else if (n->is_Allocate()) { 1547 add_call_node(n->as_Call()); 1548 record_for_optimizer(n); 1549 } else { 1550 if (n->is_CallStaticJava()) { 1551 const char* name = n->as_CallStaticJava()->_name; 1552 if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { 1553 return; // Skip uncommon traps 1554 } 1555 } 1556 // Don't mark as processed since call's arguments have to be processed. 1557 delayed_worklist->push(n); 1558 // Check if a call returns an object. 1559 if ((n->as_Call()->returns_pointer() && 1560 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || 1561 (n->is_CallStaticJava() && 1562 n->as_CallStaticJava()->is_boxing_method())) { 1563 add_call_node(n->as_Call()); 1564 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) { 1565 bool returns_oop = false; 1566 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 1567 ProjNode* pn = n->fast_out(i)->as_Proj(); 1568 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 1569 returns_oop = true; 1570 } 1571 } 1572 if (returns_oop) { 1573 add_call_node(n->as_Call()); 1574 } 1575 } 1576 } 1577 return; 1578 } 1579 // Put this check here to process call arguments since some call nodes 1580 // point to phantom_obj. 1581 if (n_ptn == phantom_obj || n_ptn == null_obj) { 1582 return; // Skip predefined nodes. 1583 } 1584 switch (opcode) { 1585 case Op_AddP: { 1586 Node* base = get_addp_base(n); 1587 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1588 // Field nodes are created for all field types. They are used in 1589 // adjust_scalar_replaceable_state() and split_unique_types(). 1590 // Note, non-oop fields will have only base edges in Connection 1591 // Graph because such fields are not used for oop loads and stores. 1592 int offset = address_offset(n, igvn); 1593 add_field(n, PointsToNode::NoEscape, offset); 1594 if (ptn_base == nullptr) { 1595 delayed_worklist->push(n); // Process it later. 1596 } else { 1597 n_ptn = ptnode_adr(n_idx); 1598 add_base(n_ptn->as_Field(), ptn_base); 1599 } 1600 break; 1601 } 1602 case Op_CastX2P: { 1603 map_ideal_node(n, phantom_obj); 1604 break; 1605 } 1606 case Op_InlineType: 1607 case Op_CastPP: 1608 case Op_CheckCastPP: 1609 case Op_EncodeP: 1610 case Op_DecodeN: 1611 case Op_EncodePKlass: 1612 case Op_DecodeNKlass: { 1613 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 1614 break; 1615 } 1616 case Op_CMoveP: { 1617 add_local_var(n, PointsToNode::NoEscape); 1618 // Do not add edges during first iteration because some could be 1619 // not defined yet. 1620 delayed_worklist->push(n); 1621 break; 1622 } 1623 case Op_ConP: 1624 case Op_ConN: 1625 case Op_ConNKlass: { 1626 // assume all oop constants globally escape except for null 1627 PointsToNode::EscapeState es; 1628 const Type* t = igvn->type(n); 1629 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 1630 es = PointsToNode::NoEscape; 1631 } else { 1632 es = PointsToNode::GlobalEscape; 1633 } 1634 PointsToNode* ptn_con = add_java_object(n, es); 1635 set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer")); 1636 break; 1637 } 1638 case Op_CreateEx: { 1639 // assume that all exception objects globally escape 1640 map_ideal_node(n, phantom_obj); 1641 break; 1642 } 1643 case Op_LoadKlass: 1644 case Op_LoadNKlass: { 1645 // Unknown class is loaded 1646 map_ideal_node(n, phantom_obj); 1647 break; 1648 } 1649 case Op_LoadP: 1650 case Op_LoadN: { 1651 add_objload_to_connection_graph(n, delayed_worklist); 1652 break; 1653 } 1654 case Op_Parm: { 1655 map_ideal_node(n, phantom_obj); 1656 break; 1657 } 1658 case Op_PartialSubtypeCheck: { 1659 // Produces Null or notNull and is used in only in CmpP so 1660 // phantom_obj could be used. 1661 map_ideal_node(n, phantom_obj); // Result is unknown 1662 break; 1663 } 1664 case Op_Phi: { 1665 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1666 // ThreadLocal has RawPtr type. 1667 const Type* t = n->as_Phi()->type(); 1668 if (t->make_ptr() != nullptr) { 1669 add_local_var(n, PointsToNode::NoEscape); 1670 // Do not add edges during first iteration because some could be 1671 // not defined yet. 1672 delayed_worklist->push(n); 1673 } 1674 break; 1675 } 1676 case Op_Proj: { 1677 // we are only interested in the oop result projection from a call 1678 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 1679 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 1680 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1681 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1682 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 1683 } 1684 break; 1685 } 1686 case Op_Rethrow: // Exception object escapes 1687 case Op_Return: { 1688 if (n->req() > TypeFunc::Parms && 1689 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 1690 // Treat Return value as LocalVar with GlobalEscape escape state. 1691 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 1692 } 1693 break; 1694 } 1695 case Op_CompareAndExchangeP: 1696 case Op_CompareAndExchangeN: 1697 case Op_GetAndSetP: 1698 case Op_GetAndSetN: { 1699 add_objload_to_connection_graph(n, delayed_worklist); 1700 // fall-through 1701 } 1702 case Op_StoreP: 1703 case Op_StoreN: 1704 case Op_StoreNKlass: 1705 case Op_WeakCompareAndSwapP: 1706 case Op_WeakCompareAndSwapN: 1707 case Op_CompareAndSwapP: 1708 case Op_CompareAndSwapN: { 1709 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 1710 break; 1711 } 1712 case Op_AryEq: 1713 case Op_CountPositives: 1714 case Op_StrComp: 1715 case Op_StrEquals: 1716 case Op_StrIndexOf: 1717 case Op_StrIndexOfChar: 1718 case Op_StrInflatedCopy: 1719 case Op_StrCompressedCopy: 1720 case Op_VectorizedHashCode: 1721 case Op_EncodeISOArray: { 1722 add_local_var(n, PointsToNode::ArgEscape); 1723 delayed_worklist->push(n); // Process it later. 1724 break; 1725 } 1726 case Op_ThreadLocal: { 1727 PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape); 1728 set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer")); 1729 break; 1730 } 1731 case Op_Blackhole: { 1732 // All blackhole pointer arguments are globally escaping. 1733 // Only do this if there is at least one pointer argument. 1734 // Do not add edges during first iteration because some could be 1735 // not defined yet, defer to final step. 1736 for (uint i = 0; i < n->req(); i++) { 1737 Node* in = n->in(i); 1738 if (in != nullptr) { 1739 const Type* at = _igvn->type(in); 1740 if (!at->isa_ptr()) continue; 1741 1742 add_local_var(n, PointsToNode::GlobalEscape); 1743 delayed_worklist->push(n); 1744 break; 1745 } 1746 } 1747 break; 1748 } 1749 default: 1750 ; // Do nothing for nodes not related to EA. 1751 } 1752 return; 1753 } 1754 1755 // Add final simple edges to graph. 1756 void ConnectionGraph::add_final_edges(Node *n) { 1757 PointsToNode* n_ptn = ptnode_adr(n->_idx); 1758 #ifdef ASSERT 1759 if (_verify && n_ptn->is_JavaObject()) 1760 return; // This method does not change graph for JavaObject. 1761 #endif 1762 1763 if (n->is_Call()) { 1764 process_call_arguments(n->as_Call()); 1765 return; 1766 } 1767 assert(n->is_Store() || n->is_LoadStore() || 1768 ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)), 1769 "node should be registered already"); 1770 int opcode = n->Opcode(); 1771 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 1772 if (gc_handled) { 1773 return; // Ignore node if already handled by GC. 1774 } 1775 switch (opcode) { 1776 case Op_AddP: { 1777 Node* base = get_addp_base(n); 1778 PointsToNode* ptn_base = ptnode_adr(base->_idx); 1779 assert(ptn_base != nullptr, "field's base should be registered"); 1780 add_base(n_ptn->as_Field(), ptn_base); 1781 break; 1782 } 1783 case Op_InlineType: 1784 case Op_CastPP: 1785 case Op_CheckCastPP: 1786 case Op_EncodeP: 1787 case Op_DecodeN: 1788 case Op_EncodePKlass: 1789 case Op_DecodeNKlass: { 1790 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); 1791 break; 1792 } 1793 case Op_CMoveP: { 1794 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 1795 Node* in = n->in(i); 1796 if (in == nullptr) { 1797 continue; // ignore null 1798 } 1799 Node* uncast_in = in->uncast(); 1800 if (uncast_in->is_top() || uncast_in == n) { 1801 continue; // ignore top or inputs which go back this node 1802 } 1803 PointsToNode* ptn = ptnode_adr(in->_idx); 1804 assert(ptn != nullptr, "node should be registered"); 1805 add_edge(n_ptn, ptn); 1806 } 1807 break; 1808 } 1809 case Op_LoadP: 1810 case Op_LoadN: { 1811 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1812 // ThreadLocal has RawPtr type. 1813 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1814 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1815 break; 1816 } 1817 case Op_Phi: { 1818 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 1819 // ThreadLocal has RawPtr type. 1820 assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); 1821 for (uint i = 1; i < n->req(); i++) { 1822 Node* in = n->in(i); 1823 if (in == nullptr) { 1824 continue; // ignore null 1825 } 1826 Node* uncast_in = in->uncast(); 1827 if (uncast_in->is_top() || uncast_in == n) { 1828 continue; // ignore top or inputs which go back this node 1829 } 1830 PointsToNode* ptn = ptnode_adr(in->_idx); 1831 assert(ptn != nullptr, "node should be registered"); 1832 add_edge(n_ptn, ptn); 1833 } 1834 break; 1835 } 1836 case Op_Proj: { 1837 // we are only interested in the oop result projection from a call 1838 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 1839 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 1840 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); 1841 break; 1842 } 1843 case Op_Rethrow: // Exception object escapes 1844 case Op_Return: { 1845 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 1846 "Unexpected node type"); 1847 // Treat Return value as LocalVar with GlobalEscape escape state. 1848 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); 1849 break; 1850 } 1851 case Op_CompareAndExchangeP: 1852 case Op_CompareAndExchangeN: 1853 case Op_GetAndSetP: 1854 case Op_GetAndSetN:{ 1855 assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); 1856 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); 1857 // fall-through 1858 } 1859 case Op_CompareAndSwapP: 1860 case Op_CompareAndSwapN: 1861 case Op_WeakCompareAndSwapP: 1862 case Op_WeakCompareAndSwapN: 1863 case Op_StoreP: 1864 case Op_StoreN: 1865 case Op_StoreNKlass:{ 1866 add_final_edges_unsafe_access(n, opcode); 1867 break; 1868 } 1869 case Op_VectorizedHashCode: 1870 case Op_AryEq: 1871 case Op_CountPositives: 1872 case Op_StrComp: 1873 case Op_StrEquals: 1874 case Op_StrIndexOf: 1875 case Op_StrIndexOfChar: 1876 case Op_StrInflatedCopy: 1877 case Op_StrCompressedCopy: 1878 case Op_EncodeISOArray: { 1879 // char[]/byte[] arrays passed to string intrinsic do not escape but 1880 // they are not scalar replaceable. Adjust escape state for them. 1881 // Start from in(2) edge since in(1) is memory edge. 1882 for (uint i = 2; i < n->req(); i++) { 1883 Node* adr = n->in(i); 1884 const Type* at = _igvn->type(adr); 1885 if (!adr->is_top() && at->isa_ptr()) { 1886 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 1887 at->isa_ptr() != nullptr, "expecting a pointer"); 1888 if (adr->is_AddP()) { 1889 adr = get_addp_base(adr); 1890 } 1891 PointsToNode* ptn = ptnode_adr(adr->_idx); 1892 assert(ptn != nullptr, "node should be registered"); 1893 add_edge(n_ptn, ptn); 1894 } 1895 } 1896 break; 1897 } 1898 case Op_Blackhole: { 1899 // All blackhole pointer arguments are globally escaping. 1900 for (uint i = 0; i < n->req(); i++) { 1901 Node* in = n->in(i); 1902 if (in != nullptr) { 1903 const Type* at = _igvn->type(in); 1904 if (!at->isa_ptr()) continue; 1905 1906 if (in->is_AddP()) { 1907 in = get_addp_base(in); 1908 } 1909 1910 PointsToNode* ptn = ptnode_adr(in->_idx); 1911 assert(ptn != nullptr, "should be defined already"); 1912 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 1913 add_edge(n_ptn, ptn); 1914 } 1915 } 1916 break; 1917 } 1918 default: { 1919 // This method should be called only for EA specific nodes which may 1920 // miss some edges when they were created. 1921 #ifdef ASSERT 1922 n->dump(1); 1923 #endif 1924 guarantee(false, "unknown node"); 1925 } 1926 } 1927 return; 1928 } 1929 1930 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 1931 Node* adr = n->in(MemNode::Address); 1932 const Type* adr_type = _igvn->type(adr); 1933 adr_type = adr_type->make_ptr(); 1934 if (adr_type == nullptr) { 1935 return; // skip dead nodes 1936 } 1937 if (adr_type->isa_oopptr() 1938 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1939 && adr_type == TypeRawPtr::NOTNULL 1940 && is_captured_store_address(adr))) { 1941 delayed_worklist->push(n); // Process it later. 1942 #ifdef ASSERT 1943 assert (adr->is_AddP(), "expecting an AddP"); 1944 if (adr_type == TypeRawPtr::NOTNULL) { 1945 // Verify a raw address for a store captured by Initialize node. 1946 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 1947 assert(offs != Type::OffsetBot, "offset must be a constant"); 1948 } 1949 #endif 1950 } else { 1951 // Ignore copy the displaced header to the BoxNode (OSR compilation). 1952 if (adr->is_BoxLock()) { 1953 return; 1954 } 1955 // Stored value escapes in unsafe access. 1956 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1957 delayed_worklist->push(n); // Process unsafe access later. 1958 return; 1959 } 1960 #ifdef ASSERT 1961 n->dump(1); 1962 assert(false, "not unsafe"); 1963 #endif 1964 } 1965 } 1966 1967 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 1968 Node* adr = n->in(MemNode::Address); 1969 const Type *adr_type = _igvn->type(adr); 1970 adr_type = adr_type->make_ptr(); 1971 #ifdef ASSERT 1972 if (adr_type == nullptr) { 1973 n->dump(1); 1974 assert(adr_type != nullptr, "dead node should not be on list"); 1975 return true; 1976 } 1977 #endif 1978 1979 if (adr_type->isa_oopptr() 1980 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 1981 && adr_type == TypeRawPtr::NOTNULL 1982 && is_captured_store_address(adr))) { 1983 // Point Address to Value 1984 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 1985 assert(adr_ptn != nullptr && 1986 adr_ptn->as_Field()->is_oop(), "node should be registered"); 1987 Node* val = n->in(MemNode::ValueIn); 1988 PointsToNode* ptn = ptnode_adr(val->_idx); 1989 assert(ptn != nullptr, "node should be registered"); 1990 add_edge(adr_ptn, ptn); 1991 return true; 1992 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 1993 // Stored value escapes in unsafe access. 1994 Node* val = n->in(MemNode::ValueIn); 1995 PointsToNode* ptn = ptnode_adr(val->_idx); 1996 assert(ptn != nullptr, "node should be registered"); 1997 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 1998 // Add edge to object for unsafe access with offset. 1999 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 2000 assert(adr_ptn != nullptr, "node should be registered"); 2001 if (adr_ptn->is_Field()) { 2002 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 2003 add_edge(adr_ptn, ptn); 2004 } 2005 return true; 2006 } 2007 #ifdef ASSERT 2008 n->dump(1); 2009 assert(false, "not unsafe"); 2010 #endif 2011 return false; 2012 } 2013 2014 void ConnectionGraph::add_call_node(CallNode* call) { 2015 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer"); 2016 uint call_idx = call->_idx; 2017 if (call->is_Allocate()) { 2018 Node* k = call->in(AllocateNode::KlassNode); 2019 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 2020 assert(kt != nullptr, "TypeKlassPtr required."); 2021 PointsToNode::EscapeState es = PointsToNode::NoEscape; 2022 bool scalar_replaceable = true; 2023 NOT_PRODUCT(const char* nsr_reason = ""); 2024 if (call->is_AllocateArray()) { 2025 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 2026 es = PointsToNode::GlobalEscape; 2027 } else { 2028 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 2029 if (length < 0) { 2030 // Not scalar replaceable if the length is not constant. 2031 scalar_replaceable = false; 2032 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 2033 } else if (length > EliminateAllocationArraySizeLimit) { 2034 // Not scalar replaceable if the length is too big. 2035 scalar_replaceable = false; 2036 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 2037 } 2038 } 2039 } else { // Allocate instance 2040 if (!kt->isa_instklassptr()) { // StressReflectiveCode 2041 es = PointsToNode::GlobalEscape; 2042 } else { 2043 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 2044 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 2045 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 2046 ik->is_subclass_of(_compile->env()->Reference_klass()) || 2047 !ik->can_be_instantiated() || 2048 ik->has_finalizer()) { 2049 es = PointsToNode::GlobalEscape; 2050 } else { 2051 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 2052 if (nfields > EliminateAllocationFieldsLimit) { 2053 // Not scalar replaceable if there are too many fields. 2054 scalar_replaceable = false; 2055 NOT_PRODUCT(nsr_reason = "has too many fields"); 2056 } 2057 } 2058 } 2059 } 2060 add_java_object(call, es); 2061 PointsToNode* ptn = ptnode_adr(call_idx); 2062 if (!scalar_replaceable && ptn->scalar_replaceable()) { 2063 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 2064 } 2065 } else if (call->is_CallStaticJava()) { 2066 // Call nodes could be different types: 2067 // 2068 // 1. CallDynamicJavaNode (what happened during call is unknown): 2069 // 2070 // - mapped to GlobalEscape JavaObject node if oop is returned; 2071 // 2072 // - all oop arguments are escaping globally; 2073 // 2074 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 2075 // 2076 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 2077 // 2078 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 2079 // - mapped to NoEscape JavaObject node if non-escaping object allocated 2080 // during call is returned; 2081 // - mapped to ArgEscape LocalVar node pointed to object arguments 2082 // which are returned and does not escape during call; 2083 // 2084 // - oop arguments escaping status is defined by bytecode analysis; 2085 // 2086 // For a static call, we know exactly what method is being called. 2087 // Use bytecode estimator to record whether the call's return value escapes. 2088 ciMethod* meth = call->as_CallJava()->method(); 2089 if (meth == nullptr) { 2090 const char* name = call->as_CallStaticJava()->_name; 2091 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 || 2092 strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "TODO: add failed case check"); 2093 // Returns a newly allocated non-escaped object. 2094 add_java_object(call, PointsToNode::NoEscape); 2095 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 2096 } else if (meth->is_boxing_method()) { 2097 // Returns boxing object 2098 PointsToNode::EscapeState es; 2099 vmIntrinsics::ID intr = meth->intrinsic_id(); 2100 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 2101 // It does not escape if object is always allocated. 2102 es = PointsToNode::NoEscape; 2103 } else { 2104 // It escapes globally if object could be loaded from cache. 2105 es = PointsToNode::GlobalEscape; 2106 } 2107 add_java_object(call, es); 2108 if (es == PointsToNode::GlobalEscape) { 2109 set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache")); 2110 } 2111 } else { 2112 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 2113 call_analyzer->copy_dependencies(_compile->dependencies()); 2114 if (call_analyzer->is_return_allocated()) { 2115 // Returns a newly allocated non-escaped object, simply 2116 // update dependency information. 2117 // Mark it as NoEscape so that objects referenced by 2118 // it's fields will be marked as NoEscape at least. 2119 add_java_object(call, PointsToNode::NoEscape); 2120 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 2121 } else { 2122 // Determine whether any arguments are returned. 2123 const TypeTuple* d = call->tf()->domain_cc(); 2124 bool ret_arg = false; 2125 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2126 if (d->field_at(i)->isa_ptr() != nullptr && 2127 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 2128 ret_arg = true; 2129 break; 2130 } 2131 } 2132 if (ret_arg) { 2133 add_local_var(call, PointsToNode::ArgEscape); 2134 } else { 2135 // Returns unknown object. 2136 map_ideal_node(call, phantom_obj); 2137 } 2138 } 2139 } 2140 } else { 2141 // An other type of call, assume the worst case: 2142 // returned value is unknown and globally escapes. 2143 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 2144 map_ideal_node(call, phantom_obj); 2145 } 2146 } 2147 2148 void ConnectionGraph::process_call_arguments(CallNode *call) { 2149 bool is_arraycopy = false; 2150 switch (call->Opcode()) { 2151 #ifdef ASSERT 2152 case Op_Allocate: 2153 case Op_AllocateArray: 2154 case Op_Lock: 2155 case Op_Unlock: 2156 assert(false, "should be done already"); 2157 break; 2158 #endif 2159 case Op_ArrayCopy: 2160 case Op_CallLeafNoFP: 2161 // Most array copies are ArrayCopy nodes at this point but there 2162 // are still a few direct calls to the copy subroutines (See 2163 // PhaseStringOpts::copy_string()) 2164 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 2165 call->as_CallLeaf()->is_call_to_arraycopystub(); 2166 // fall through 2167 case Op_CallLeafVector: 2168 case Op_CallLeaf: { 2169 // Stub calls, objects do not escape but they are not scale replaceable. 2170 // Adjust escape state for outgoing arguments. 2171 const TypeTuple * d = call->tf()->domain_sig(); 2172 bool src_has_oops = false; 2173 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2174 const Type* at = d->field_at(i); 2175 Node *arg = call->in(i); 2176 if (arg == nullptr) { 2177 continue; 2178 } 2179 const Type *aat = _igvn->type(arg); 2180 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 2181 continue; 2182 } 2183 if (arg->is_AddP()) { 2184 // 2185 // The inline_native_clone() case when the arraycopy stub is called 2186 // after the allocation before Initialize and CheckCastPP nodes. 2187 // Or normal arraycopy for object arrays case. 2188 // 2189 // Set AddP's base (Allocate) as not scalar replaceable since 2190 // pointer to the base (with offset) is passed as argument. 2191 // 2192 arg = get_addp_base(arg); 2193 } 2194 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2195 assert(arg_ptn != nullptr, "should be registered"); 2196 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 2197 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 2198 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 2199 aat->isa_ptr() != nullptr, "expecting an Ptr"); 2200 bool arg_has_oops = aat->isa_oopptr() && 2201 (aat->isa_instptr() || 2202 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) || 2203 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr && 2204 aat->isa_aryptr()->is_flat() && 2205 aat->isa_aryptr()->elem()->inline_klass()->contains_oops())); 2206 if (i == TypeFunc::Parms) { 2207 src_has_oops = arg_has_oops; 2208 } 2209 // 2210 // src or dst could be j.l.Object when other is basic type array: 2211 // 2212 // arraycopy(char[],0,Object*,0,size); 2213 // arraycopy(Object*,0,char[],0,size); 2214 // 2215 // Don't add edges in such cases. 2216 // 2217 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 2218 arg_has_oops && (i > TypeFunc::Parms); 2219 #ifdef ASSERT 2220 if (!(is_arraycopy || 2221 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 2222 (call->as_CallLeaf()->_name != nullptr && 2223 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 2224 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 2225 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 2226 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 2227 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 2228 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 2229 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 2230 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 2231 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 2232 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 2233 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 2234 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 2235 strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 || 2236 strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 || 2237 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 2238 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 2239 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 2240 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 2241 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 2242 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 2243 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 2244 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 2245 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 2246 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 2247 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 2248 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 2249 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 2250 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 2251 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 2252 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 2253 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 2254 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 2255 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 2256 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2257 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 || 2258 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 || 2259 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 2260 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 2261 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 2262 strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 || 2263 strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 || 2264 strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 || 2265 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 || 2266 strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0) 2267 ))) { 2268 call->dump(); 2269 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 2270 } 2271 #endif 2272 // Always process arraycopy's destination object since 2273 // we need to add all possible edges to references in 2274 // source object. 2275 if (arg_esc >= PointsToNode::ArgEscape && 2276 !arg_is_arraycopy_dest) { 2277 continue; 2278 } 2279 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 2280 if (call->is_ArrayCopy()) { 2281 ArrayCopyNode* ac = call->as_ArrayCopy(); 2282 if (ac->is_clonebasic() || 2283 ac->is_arraycopy_validated() || 2284 ac->is_copyof_validated() || 2285 ac->is_copyofrange_validated()) { 2286 es = PointsToNode::NoEscape; 2287 } 2288 } 2289 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2290 if (arg_is_arraycopy_dest) { 2291 Node* src = call->in(TypeFunc::Parms); 2292 if (src->is_AddP()) { 2293 src = get_addp_base(src); 2294 } 2295 PointsToNode* src_ptn = ptnode_adr(src->_idx); 2296 assert(src_ptn != nullptr, "should be registered"); 2297 if (arg_ptn != src_ptn) { 2298 // Special arraycopy edge: 2299 // A destination object's field can't have the source object 2300 // as base since objects escape states are not related. 2301 // Only escape state of destination object's fields affects 2302 // escape state of fields in source object. 2303 add_arraycopy(call, es, src_ptn, arg_ptn); 2304 } 2305 } 2306 } 2307 } 2308 break; 2309 } 2310 case Op_CallStaticJava: { 2311 // For a static call, we know exactly what method is being called. 2312 // Use bytecode estimator to record the call's escape affects 2313 #ifdef ASSERT 2314 const char* name = call->as_CallStaticJava()->_name; 2315 assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 2316 #endif 2317 ciMethod* meth = call->as_CallJava()->method(); 2318 if ((meth != nullptr) && meth->is_boxing_method()) { 2319 break; // Boxing methods do not modify any oops. 2320 } 2321 BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; 2322 // fall-through if not a Java method or no analyzer information 2323 if (call_analyzer != nullptr) { 2324 PointsToNode* call_ptn = ptnode_adr(call->_idx); 2325 const TypeTuple* d = call->tf()->domain_cc(); 2326 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2327 const Type* at = d->field_at(i); 2328 int k = i - TypeFunc::Parms; 2329 Node* arg = call->in(i); 2330 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 2331 if (at->isa_ptr() != nullptr && 2332 call_analyzer->is_arg_returned(k)) { 2333 // The call returns arguments. 2334 if (call_ptn != nullptr) { // Is call's result used? 2335 assert(call_ptn->is_LocalVar(), "node should be registered"); 2336 assert(arg_ptn != nullptr, "node should be registered"); 2337 add_edge(call_ptn, arg_ptn); 2338 } 2339 } 2340 if (at->isa_oopptr() != nullptr && 2341 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 2342 if (!call_analyzer->is_arg_stack(k)) { 2343 // The argument global escapes 2344 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2345 } else { 2346 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2347 if (!call_analyzer->is_arg_local(k)) { 2348 // The argument itself doesn't escape, but any fields might 2349 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2350 } 2351 } 2352 } 2353 } 2354 if (call_ptn != nullptr && call_ptn->is_LocalVar()) { 2355 // The call returns arguments. 2356 assert(call_ptn->edge_count() > 0, "sanity"); 2357 if (!call_analyzer->is_return_local()) { 2358 // Returns also unknown object. 2359 add_edge(call_ptn, phantom_obj); 2360 } 2361 } 2362 break; 2363 } 2364 } 2365 default: { 2366 // Fall-through here if not a Java method or no analyzer information 2367 // or some other type of call, assume the worst case: all arguments 2368 // globally escape. 2369 const TypeTuple* d = call->tf()->domain_cc(); 2370 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 2371 const Type* at = d->field_at(i); 2372 if (at->isa_oopptr() != nullptr) { 2373 Node* arg = call->in(i); 2374 if (arg->is_AddP()) { 2375 arg = get_addp_base(arg); 2376 } 2377 assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); 2378 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 2379 } 2380 } 2381 } 2382 } 2383 } 2384 2385 2386 // Finish Graph construction. 2387 bool ConnectionGraph::complete_connection_graph( 2388 GrowableArray<PointsToNode*>& ptnodes_worklist, 2389 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2390 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2391 GrowableArray<FieldNode*>& oop_fields_worklist) { 2392 // Normally only 1-3 passes needed to build Connection Graph depending 2393 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 2394 // Set limit to 20 to catch situation when something did go wrong and 2395 // bailout Escape Analysis. 2396 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 2397 #define GRAPH_BUILD_ITER_LIMIT 20 2398 2399 // Propagate GlobalEscape and ArgEscape escape states and check that 2400 // we still have non-escaping objects. The method pushs on _worklist 2401 // Field nodes which reference phantom_object. 2402 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2403 return false; // Nothing to do. 2404 } 2405 // Now propagate references to all JavaObject nodes. 2406 int java_objects_length = java_objects_worklist.length(); 2407 elapsedTimer build_time; 2408 build_time.start(); 2409 elapsedTimer time; 2410 bool timeout = false; 2411 int new_edges = 1; 2412 int iterations = 0; 2413 do { 2414 while ((new_edges > 0) && 2415 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 2416 double start_time = time.seconds(); 2417 time.start(); 2418 new_edges = 0; 2419 // Propagate references to phantom_object for nodes pushed on _worklist 2420 // by find_non_escaped_objects() and find_field_value(). 2421 new_edges += add_java_object_edges(phantom_obj, false); 2422 for (int next = 0; next < java_objects_length; ++next) { 2423 JavaObjectNode* ptn = java_objects_worklist.at(next); 2424 new_edges += add_java_object_edges(ptn, true); 2425 2426 #define SAMPLE_SIZE 4 2427 if ((next % SAMPLE_SIZE) == 0) { 2428 // Each 4 iterations calculate how much time it will take 2429 // to complete graph construction. 2430 time.stop(); 2431 // Poll for requests from shutdown mechanism to quiesce compiler 2432 // because Connection graph construction may take long time. 2433 CompileBroker::maybe_block(); 2434 double stop_time = time.seconds(); 2435 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 2436 double time_until_end = time_per_iter * (double)(java_objects_length - next); 2437 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 2438 timeout = true; 2439 break; // Timeout 2440 } 2441 start_time = stop_time; 2442 time.start(); 2443 } 2444 #undef SAMPLE_SIZE 2445 2446 } 2447 if (timeout) break; 2448 if (new_edges > 0) { 2449 // Update escape states on each iteration if graph was updated. 2450 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 2451 return false; // Nothing to do. 2452 } 2453 } 2454 time.stop(); 2455 if (time.seconds() >= EscapeAnalysisTimeout) { 2456 timeout = true; 2457 break; 2458 } 2459 } 2460 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 2461 time.start(); 2462 // Find fields which have unknown value. 2463 int fields_length = oop_fields_worklist.length(); 2464 for (int next = 0; next < fields_length; next++) { 2465 FieldNode* field = oop_fields_worklist.at(next); 2466 if (field->edge_count() == 0) { 2467 new_edges += find_field_value(field); 2468 // This code may added new edges to phantom_object. 2469 // Need an other cycle to propagate references to phantom_object. 2470 } 2471 } 2472 time.stop(); 2473 if (time.seconds() >= EscapeAnalysisTimeout) { 2474 timeout = true; 2475 break; 2476 } 2477 } else { 2478 new_edges = 0; // Bailout 2479 } 2480 } while (new_edges > 0); 2481 2482 build_time.stop(); 2483 _build_time = build_time.seconds(); 2484 _build_iterations = iterations; 2485 2486 // Bailout if passed limits. 2487 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 2488 Compile* C = _compile; 2489 if (C->log() != nullptr) { 2490 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 2491 C->log()->text("%s", timeout ? "time" : "iterations"); 2492 C->log()->end_elem(" limit'"); 2493 } 2494 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 2495 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 2496 // Possible infinite build_connection_graph loop, 2497 // bailout (no changes to ideal graph were made). 2498 return false; 2499 } 2500 2501 #undef GRAPH_BUILD_ITER_LIMIT 2502 2503 // Find fields initialized by null for non-escaping Allocations. 2504 int non_escaped_length = non_escaped_allocs_worklist.length(); 2505 for (int next = 0; next < non_escaped_length; next++) { 2506 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2507 PointsToNode::EscapeState es = ptn->escape_state(); 2508 assert(es <= PointsToNode::ArgEscape, "sanity"); 2509 if (es == PointsToNode::NoEscape) { 2510 if (find_init_values_null(ptn, _igvn) > 0) { 2511 // Adding references to null object does not change escape states 2512 // since it does not escape. Also no fields are added to null object. 2513 add_java_object_edges(null_obj, false); 2514 } 2515 } 2516 Node* n = ptn->ideal_node(); 2517 if (n->is_Allocate()) { 2518 // The object allocated by this Allocate node will never be 2519 // seen by an other thread. Mark it so that when it is 2520 // expanded no MemBarStoreStore is added. 2521 InitializeNode* ini = n->as_Allocate()->initialization(); 2522 if (ini != nullptr) 2523 ini->set_does_not_escape(); 2524 } 2525 } 2526 return true; // Finished graph construction. 2527 } 2528 2529 // Propagate GlobalEscape and ArgEscape escape states to all nodes 2530 // and check that we still have non-escaping java objects. 2531 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 2532 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 2533 GrowableArray<PointsToNode*> escape_worklist; 2534 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 2535 int ptnodes_length = ptnodes_worklist.length(); 2536 for (int next = 0; next < ptnodes_length; ++next) { 2537 PointsToNode* ptn = ptnodes_worklist.at(next); 2538 if (ptn->escape_state() >= PointsToNode::ArgEscape || 2539 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 2540 escape_worklist.push(ptn); 2541 } 2542 } 2543 // Set escape states to referenced nodes (edges list). 2544 while (escape_worklist.length() > 0) { 2545 PointsToNode* ptn = escape_worklist.pop(); 2546 PointsToNode::EscapeState es = ptn->escape_state(); 2547 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 2548 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 2549 es >= PointsToNode::ArgEscape) { 2550 // GlobalEscape or ArgEscape state of field means it has unknown value. 2551 if (add_edge(ptn, phantom_obj)) { 2552 // New edge was added 2553 add_field_uses_to_worklist(ptn->as_Field()); 2554 } 2555 } 2556 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2557 PointsToNode* e = i.get(); 2558 if (e->is_Arraycopy()) { 2559 assert(ptn->arraycopy_dst(), "sanity"); 2560 // Propagate only fields escape state through arraycopy edge. 2561 if (e->fields_escape_state() < field_es) { 2562 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2563 escape_worklist.push(e); 2564 } 2565 } else if (es >= field_es) { 2566 // fields_escape_state is also set to 'es' if it is less than 'es'. 2567 if (e->escape_state() < es) { 2568 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2569 escape_worklist.push(e); 2570 } 2571 } else { 2572 // Propagate field escape state. 2573 bool es_changed = false; 2574 if (e->fields_escape_state() < field_es) { 2575 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2576 es_changed = true; 2577 } 2578 if ((e->escape_state() < field_es) && 2579 e->is_Field() && ptn->is_JavaObject() && 2580 e->as_Field()->is_oop()) { 2581 // Change escape state of referenced fields. 2582 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2583 es_changed = true; 2584 } else if (e->escape_state() < es) { 2585 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 2586 es_changed = true; 2587 } 2588 if (es_changed) { 2589 escape_worklist.push(e); 2590 } 2591 } 2592 } 2593 } 2594 // Remove escaped objects from non_escaped list. 2595 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 2596 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 2597 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 2598 non_escaped_allocs_worklist.delete_at(next); 2599 } 2600 if (ptn->escape_state() == PointsToNode::NoEscape) { 2601 // Find fields in non-escaped allocations which have unknown value. 2602 find_init_values_phantom(ptn); 2603 } 2604 } 2605 return (non_escaped_allocs_worklist.length() > 0); 2606 } 2607 2608 // Add all references to JavaObject node by walking over all uses. 2609 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 2610 int new_edges = 0; 2611 if (populate_worklist) { 2612 // Populate _worklist by uses of jobj's uses. 2613 for (UseIterator i(jobj); i.has_next(); i.next()) { 2614 PointsToNode* use = i.get(); 2615 if (use->is_Arraycopy()) { 2616 continue; 2617 } 2618 add_uses_to_worklist(use); 2619 if (use->is_Field() && use->as_Field()->is_oop()) { 2620 // Put on worklist all field's uses (loads) and 2621 // related field nodes (same base and offset). 2622 add_field_uses_to_worklist(use->as_Field()); 2623 } 2624 } 2625 } 2626 for (int l = 0; l < _worklist.length(); l++) { 2627 PointsToNode* use = _worklist.at(l); 2628 if (PointsToNode::is_base_use(use)) { 2629 // Add reference from jobj to field and from field to jobj (field's base). 2630 use = PointsToNode::get_use_node(use)->as_Field(); 2631 if (add_base(use->as_Field(), jobj)) { 2632 new_edges++; 2633 } 2634 continue; 2635 } 2636 assert(!use->is_JavaObject(), "sanity"); 2637 if (use->is_Arraycopy()) { 2638 if (jobj == null_obj) { // null object does not have field edges 2639 continue; 2640 } 2641 // Added edge from Arraycopy node to arraycopy's source java object 2642 if (add_edge(use, jobj)) { 2643 jobj->set_arraycopy_src(); 2644 new_edges++; 2645 } 2646 // and stop here. 2647 continue; 2648 } 2649 if (!add_edge(use, jobj)) { 2650 continue; // No new edge added, there was such edge already. 2651 } 2652 new_edges++; 2653 if (use->is_LocalVar()) { 2654 add_uses_to_worklist(use); 2655 if (use->arraycopy_dst()) { 2656 for (EdgeIterator i(use); i.has_next(); i.next()) { 2657 PointsToNode* e = i.get(); 2658 if (e->is_Arraycopy()) { 2659 if (jobj == null_obj) { // null object does not have field edges 2660 continue; 2661 } 2662 // Add edge from arraycopy's destination java object to Arraycopy node. 2663 if (add_edge(jobj, e)) { 2664 new_edges++; 2665 jobj->set_arraycopy_dst(); 2666 } 2667 } 2668 } 2669 } 2670 } else { 2671 // Added new edge to stored in field values. 2672 // Put on worklist all field's uses (loads) and 2673 // related field nodes (same base and offset). 2674 add_field_uses_to_worklist(use->as_Field()); 2675 } 2676 } 2677 _worklist.clear(); 2678 _in_worklist.reset(); 2679 return new_edges; 2680 } 2681 2682 // Put on worklist all related field nodes. 2683 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 2684 assert(field->is_oop(), "sanity"); 2685 int offset = field->offset(); 2686 add_uses_to_worklist(field); 2687 // Loop over all bases of this field and push on worklist Field nodes 2688 // with the same offset and base (since they may reference the same field). 2689 for (BaseIterator i(field); i.has_next(); i.next()) { 2690 PointsToNode* base = i.get(); 2691 add_fields_to_worklist(field, base); 2692 // Check if the base was source object of arraycopy and go over arraycopy's 2693 // destination objects since values stored to a field of source object are 2694 // accessible by uses (loads) of fields of destination objects. 2695 if (base->arraycopy_src()) { 2696 for (UseIterator j(base); j.has_next(); j.next()) { 2697 PointsToNode* arycp = j.get(); 2698 if (arycp->is_Arraycopy()) { 2699 for (UseIterator k(arycp); k.has_next(); k.next()) { 2700 PointsToNode* abase = k.get(); 2701 if (abase->arraycopy_dst() && abase != base) { 2702 // Look for the same arraycopy reference. 2703 add_fields_to_worklist(field, abase); 2704 } 2705 } 2706 } 2707 } 2708 } 2709 } 2710 } 2711 2712 // Put on worklist all related field nodes. 2713 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 2714 int offset = field->offset(); 2715 if (base->is_LocalVar()) { 2716 for (UseIterator j(base); j.has_next(); j.next()) { 2717 PointsToNode* f = j.get(); 2718 if (PointsToNode::is_base_use(f)) { // Field 2719 f = PointsToNode::get_use_node(f); 2720 if (f == field || !f->as_Field()->is_oop()) { 2721 continue; 2722 } 2723 int offs = f->as_Field()->offset(); 2724 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2725 add_to_worklist(f); 2726 } 2727 } 2728 } 2729 } else { 2730 assert(base->is_JavaObject(), "sanity"); 2731 if (// Skip phantom_object since it is only used to indicate that 2732 // this field's content globally escapes. 2733 (base != phantom_obj) && 2734 // null object node does not have fields. 2735 (base != null_obj)) { 2736 for (EdgeIterator i(base); i.has_next(); i.next()) { 2737 PointsToNode* f = i.get(); 2738 // Skip arraycopy edge since store to destination object field 2739 // does not update value in source object field. 2740 if (f->is_Arraycopy()) { 2741 assert(base->arraycopy_dst(), "sanity"); 2742 continue; 2743 } 2744 if (f == field || !f->as_Field()->is_oop()) { 2745 continue; 2746 } 2747 int offs = f->as_Field()->offset(); 2748 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 2749 add_to_worklist(f); 2750 } 2751 } 2752 } 2753 } 2754 } 2755 2756 // Find fields which have unknown value. 2757 int ConnectionGraph::find_field_value(FieldNode* field) { 2758 // Escaped fields should have init value already. 2759 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 2760 int new_edges = 0; 2761 for (BaseIterator i(field); i.has_next(); i.next()) { 2762 PointsToNode* base = i.get(); 2763 if (base->is_JavaObject()) { 2764 // Skip Allocate's fields which will be processed later. 2765 if (base->ideal_node()->is_Allocate()) { 2766 return 0; 2767 } 2768 assert(base == null_obj, "only null ptr base expected here"); 2769 } 2770 } 2771 if (add_edge(field, phantom_obj)) { 2772 // New edge was added 2773 new_edges++; 2774 add_field_uses_to_worklist(field); 2775 } 2776 return new_edges; 2777 } 2778 2779 // Find fields initializing values for allocations. 2780 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 2781 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2782 PointsToNode* init_val = phantom_obj; 2783 Node* alloc = pta->ideal_node(); 2784 2785 // Do nothing for Allocate nodes since its fields values are 2786 // "known" unless they are initialized by arraycopy/clone. 2787 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 2788 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2789 // Non-flat inline type arrays are initialized with 2790 // the default value instead of null. Handle them here. 2791 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx); 2792 assert(init_val != nullptr, "default value should be registered"); 2793 } else { 2794 return 0; 2795 } 2796 } 2797 // Non-escaped allocation returned from Java or runtime call has unknown values in fields. 2798 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity"); 2799 #ifdef ASSERT 2800 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) { 2801 const char* name = alloc->as_CallStaticJava()->_name; 2802 assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 || 2803 strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "sanity"); 2804 } 2805 #endif 2806 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 2807 int new_edges = 0; 2808 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2809 PointsToNode* field = i.get(); 2810 if (field->is_Field() && field->as_Field()->is_oop()) { 2811 if (add_edge(field, init_val)) { 2812 // New edge was added 2813 new_edges++; 2814 add_field_uses_to_worklist(field->as_Field()); 2815 } 2816 } 2817 } 2818 return new_edges; 2819 } 2820 2821 // Find fields initializing values for allocations. 2822 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) { 2823 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 2824 Node* alloc = pta->ideal_node(); 2825 // Do nothing for Call nodes since its fields values are unknown. 2826 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) { 2827 return 0; 2828 } 2829 InitializeNode* ini = alloc->as_Allocate()->initialization(); 2830 bool visited_bottom_offset = false; 2831 GrowableArray<int> offsets_worklist; 2832 int new_edges = 0; 2833 2834 // Check if an oop field's initializing value is recorded and add 2835 // a corresponding null if field's value if it is not recorded. 2836 // Connection Graph does not record a default initialization by null 2837 // captured by Initialize node. 2838 // 2839 for (EdgeIterator i(pta); i.has_next(); i.next()) { 2840 PointsToNode* field = i.get(); // Field (AddP) 2841 if (!field->is_Field() || !field->as_Field()->is_oop()) { 2842 continue; // Not oop field 2843 } 2844 int offset = field->as_Field()->offset(); 2845 if (offset == Type::OffsetBot) { 2846 if (!visited_bottom_offset) { 2847 // OffsetBot is used to reference array's element, 2848 // always add reference to null to all Field nodes since we don't 2849 // known which element is referenced. 2850 if (add_edge(field, null_obj)) { 2851 // New edge was added 2852 new_edges++; 2853 add_field_uses_to_worklist(field->as_Field()); 2854 visited_bottom_offset = true; 2855 } 2856 } 2857 } else { 2858 // Check only oop fields. 2859 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 2860 if (adr_type->isa_rawptr()) { 2861 #ifdef ASSERT 2862 // Raw pointers are used for initializing stores so skip it 2863 // since it should be recorded already 2864 Node* base = get_addp_base(field->ideal_node()); 2865 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 2866 #endif 2867 continue; 2868 } 2869 if (!offsets_worklist.contains(offset)) { 2870 offsets_worklist.append(offset); 2871 Node* value = nullptr; 2872 if (ini != nullptr) { 2873 // StoreP::memory_type() == T_ADDRESS 2874 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 2875 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 2876 // Make sure initializing store has the same type as this AddP. 2877 // This AddP may reference non existing field because it is on a 2878 // dead branch of bimorphic call which is not eliminated yet. 2879 if (store != nullptr && store->is_Store() && 2880 store->as_Store()->memory_type() == ft) { 2881 value = store->in(MemNode::ValueIn); 2882 #ifdef ASSERT 2883 if (VerifyConnectionGraph) { 2884 // Verify that AddP already points to all objects the value points to. 2885 PointsToNode* val = ptnode_adr(value->_idx); 2886 assert((val != nullptr), "should be processed already"); 2887 PointsToNode* missed_obj = nullptr; 2888 if (val->is_JavaObject()) { 2889 if (!field->points_to(val->as_JavaObject())) { 2890 missed_obj = val; 2891 } 2892 } else { 2893 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 2894 tty->print_cr("----------init store has invalid value -----"); 2895 store->dump(); 2896 val->dump(); 2897 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 2898 } 2899 for (EdgeIterator j(val); j.has_next(); j.next()) { 2900 PointsToNode* obj = j.get(); 2901 if (obj->is_JavaObject()) { 2902 if (!field->points_to(obj->as_JavaObject())) { 2903 missed_obj = obj; 2904 break; 2905 } 2906 } 2907 } 2908 } 2909 if (missed_obj != nullptr) { 2910 tty->print_cr("----------field---------------------------------"); 2911 field->dump(); 2912 tty->print_cr("----------missed reference to object------------"); 2913 missed_obj->dump(); 2914 tty->print_cr("----------object referenced by init store-------"); 2915 store->dump(); 2916 val->dump(); 2917 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 2918 } 2919 } 2920 #endif 2921 } else { 2922 // There could be initializing stores which follow allocation. 2923 // For example, a volatile field store is not collected 2924 // by Initialize node. 2925 // 2926 // Need to check for dependent loads to separate such stores from 2927 // stores which follow loads. For now, add initial value null so 2928 // that compare pointers optimization works correctly. 2929 } 2930 } 2931 if (value == nullptr) { 2932 // A field's initializing value was not recorded. Add null. 2933 if (add_edge(field, null_obj)) { 2934 // New edge was added 2935 new_edges++; 2936 add_field_uses_to_worklist(field->as_Field()); 2937 } 2938 } 2939 } 2940 } 2941 } 2942 return new_edges; 2943 } 2944 2945 // Adjust scalar_replaceable state after Connection Graph is built. 2946 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) { 2947 // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)' 2948 // returns true. If one of the constraints in this method set 'jobj' to NSR 2949 // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as 2950 // input, 'adjust_scalar_replaceable_state' will eventually be called with 2951 // that other object and the Phi will become a reducible Phi. 2952 // There could be multiple merges involving the same jobj. 2953 Unique_Node_List candidates; 2954 2955 // Search for non-escaping objects which are not scalar replaceable 2956 // and mark them to propagate the state to referenced objects. 2957 2958 for (UseIterator i(jobj); i.has_next(); i.next()) { 2959 PointsToNode* use = i.get(); 2960 if (use->is_Arraycopy()) { 2961 continue; 2962 } 2963 if (use->is_Field()) { 2964 FieldNode* field = use->as_Field(); 2965 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2966 // 1. An object is not scalar replaceable if the field into which it is 2967 // stored has unknown offset (stored into unknown element of an array). 2968 if (field->offset() == Type::OffsetBot) { 2969 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 2970 return; 2971 } 2972 for (BaseIterator i(field); i.has_next(); i.next()) { 2973 PointsToNode* base = i.get(); 2974 // 2. An object is not scalar replaceable if the field into which it is 2975 // stored has multiple bases one of which is null. 2976 if ((base == null_obj) && (field->base_count() > 1)) { 2977 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 2978 return; 2979 } 2980 // 2.5. An object is not scalar replaceable if the field into which it is 2981 // stored has NSR base. 2982 if (!base->scalar_replaceable()) { 2983 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2984 return; 2985 } 2986 } 2987 } 2988 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 2989 // 3. An object is not scalar replaceable if it is merged with other objects 2990 // and we can't remove the merge 2991 for (EdgeIterator j(use); j.has_next(); j.next()) { 2992 PointsToNode* ptn = j.get(); 2993 if (ptn->is_JavaObject() && ptn != jobj) { 2994 Node* use_n = use->ideal_node(); 2995 2996 // These other local vars may point to multiple objects through a Phi 2997 // In this case we skip them and see if we can reduce the Phi. 2998 if (use_n->is_CastPP() || use_n->is_CheckCastPP()) { 2999 use_n = use_n->in(1); 3000 } 3001 3002 // If it's already a candidate or confirmed reducible merge we can skip verification 3003 if (candidates.member(use_n) || reducible_merges.member(use_n)) { 3004 continue; 3005 } 3006 3007 if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) { 3008 candidates.push(use_n); 3009 } else { 3010 // Mark all objects as NSR if we can't remove the merge 3011 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 3012 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 3013 } 3014 } 3015 } 3016 if (!jobj->scalar_replaceable()) { 3017 return; 3018 } 3019 } 3020 3021 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 3022 if (j.get()->is_Arraycopy()) { 3023 continue; 3024 } 3025 3026 // Non-escaping object node should point only to field nodes. 3027 FieldNode* field = j.get()->as_Field(); 3028 int offset = field->as_Field()->offset(); 3029 3030 // 4. An object is not scalar replaceable if it has a field with unknown 3031 // offset (array's element is accessed in loop). 3032 if (offset == Type::OffsetBot) { 3033 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 3034 return; 3035 } 3036 // 5. Currently an object is not scalar replaceable if a LoadStore node 3037 // access its field since the field value is unknown after it. 3038 // 3039 Node* n = field->ideal_node(); 3040 3041 // Test for an unsafe access that was parsed as maybe off heap 3042 // (with a CheckCastPP to raw memory). 3043 assert(n->is_AddP(), "expect an address computation"); 3044 if (n->in(AddPNode::Base)->is_top() && 3045 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 3046 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 3047 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 3048 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 3049 return; 3050 } 3051 3052 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3053 Node* u = n->fast_out(i); 3054 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 3055 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 3056 return; 3057 } 3058 } 3059 3060 // 6. Or the address may point to more then one object. This may produce 3061 // the false positive result (set not scalar replaceable) 3062 // since the flow-insensitive escape analysis can't separate 3063 // the case when stores overwrite the field's value from the case 3064 // when stores happened on different control branches. 3065 // 3066 // Note: it will disable scalar replacement in some cases: 3067 // 3068 // Point p[] = new Point[1]; 3069 // p[0] = new Point(); // Will be not scalar replaced 3070 // 3071 // but it will save us from incorrect optimizations in next cases: 3072 // 3073 // Point p[] = new Point[1]; 3074 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 3075 // 3076 if (field->base_count() > 1 && candidates.size() == 0) { 3077 if (has_non_reducible_merge(field, reducible_merges)) { 3078 for (BaseIterator i(field); i.has_next(); i.next()) { 3079 PointsToNode* base = i.get(); 3080 // Don't take into account LocalVar nodes which 3081 // may point to only one object which should be also 3082 // this field's base by now. 3083 if (base->is_JavaObject() && base != jobj) { 3084 // Mark all bases. 3085 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 3086 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 3087 } 3088 } 3089 3090 if (!jobj->scalar_replaceable()) { 3091 return; 3092 } 3093 } 3094 } 3095 } 3096 3097 // The candidate is truly a reducible merge only if none of the other 3098 // constraints ruled it as NSR. There could be multiple merges involving the 3099 // same jobj. 3100 assert(jobj->scalar_replaceable(), "sanity"); 3101 for (uint i = 0; i < candidates.size(); i++ ) { 3102 Node* candidate = candidates.at(i); 3103 reducible_merges.push(candidate); 3104 } 3105 } 3106 3107 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) { 3108 for (BaseIterator i(field); i.has_next(); i.next()) { 3109 Node* base = i.get()->ideal_node(); 3110 if (base->is_Phi() && !reducible_merges.member(base)) { 3111 return true; 3112 } 3113 } 3114 return false; 3115 } 3116 3117 // Propagate NSR (Not scalar replaceable) state. 3118 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 3119 int jobj_length = jobj_worklist.length(); 3120 bool found_nsr_alloc = true; 3121 while (found_nsr_alloc) { 3122 found_nsr_alloc = false; 3123 for (int next = 0; next < jobj_length; ++next) { 3124 JavaObjectNode* jobj = jobj_worklist.at(next); 3125 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 3126 PointsToNode* use = i.get(); 3127 if (use->is_Field()) { 3128 FieldNode* field = use->as_Field(); 3129 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 3130 assert(field->offset() != Type::OffsetBot, "sanity"); 3131 for (BaseIterator i(field); i.has_next(); i.next()) { 3132 PointsToNode* base = i.get(); 3133 // An object is not scalar replaceable if the field into which 3134 // it is stored has NSR base. 3135 if ((base != null_obj) && !base->scalar_replaceable()) { 3136 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 3137 found_nsr_alloc = true; 3138 break; 3139 } 3140 } 3141 } 3142 } 3143 } 3144 } 3145 } 3146 3147 #ifdef ASSERT 3148 void ConnectionGraph::verify_connection_graph( 3149 GrowableArray<PointsToNode*>& ptnodes_worklist, 3150 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 3151 GrowableArray<JavaObjectNode*>& java_objects_worklist, 3152 GrowableArray<Node*>& addp_worklist) { 3153 // Verify that graph is complete - no new edges could be added. 3154 int java_objects_length = java_objects_worklist.length(); 3155 int non_escaped_length = non_escaped_allocs_worklist.length(); 3156 int new_edges = 0; 3157 for (int next = 0; next < java_objects_length; ++next) { 3158 JavaObjectNode* ptn = java_objects_worklist.at(next); 3159 new_edges += add_java_object_edges(ptn, true); 3160 } 3161 assert(new_edges == 0, "graph was not complete"); 3162 // Verify that escape state is final. 3163 int length = non_escaped_allocs_worklist.length(); 3164 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 3165 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 3166 (non_escaped_length == length) && 3167 (_worklist.length() == 0), "escape state was not final"); 3168 3169 // Verify fields information. 3170 int addp_length = addp_worklist.length(); 3171 for (int next = 0; next < addp_length; ++next ) { 3172 Node* n = addp_worklist.at(next); 3173 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 3174 if (field->is_oop()) { 3175 // Verify that field has all bases 3176 Node* base = get_addp_base(n); 3177 PointsToNode* ptn = ptnode_adr(base->_idx); 3178 if (ptn->is_JavaObject()) { 3179 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 3180 } else { 3181 assert(ptn->is_LocalVar(), "sanity"); 3182 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3183 PointsToNode* e = i.get(); 3184 if (e->is_JavaObject()) { 3185 assert(field->has_base(e->as_JavaObject()), "sanity"); 3186 } 3187 } 3188 } 3189 // Verify that all fields have initializing values. 3190 if (field->edge_count() == 0) { 3191 tty->print_cr("----------field does not have references----------"); 3192 field->dump(); 3193 for (BaseIterator i(field); i.has_next(); i.next()) { 3194 PointsToNode* base = i.get(); 3195 tty->print_cr("----------field has next base---------------------"); 3196 base->dump(); 3197 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 3198 tty->print_cr("----------base has fields-------------------------"); 3199 for (EdgeIterator j(base); j.has_next(); j.next()) { 3200 j.get()->dump(); 3201 } 3202 tty->print_cr("----------base has references---------------------"); 3203 for (UseIterator j(base); j.has_next(); j.next()) { 3204 j.get()->dump(); 3205 } 3206 } 3207 } 3208 for (UseIterator i(field); i.has_next(); i.next()) { 3209 i.get()->dump(); 3210 } 3211 assert(field->edge_count() > 0, "sanity"); 3212 } 3213 } 3214 } 3215 } 3216 #endif 3217 3218 // Optimize ideal graph. 3219 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 3220 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 3221 Compile* C = _compile; 3222 PhaseIterGVN* igvn = _igvn; 3223 if (EliminateLocks) { 3224 // Mark locks before changing ideal graph. 3225 int cnt = C->macro_count(); 3226 for (int i = 0; i < cnt; i++) { 3227 Node *n = C->macro_node(i); 3228 if (n->is_AbstractLock()) { // Lock and Unlock nodes 3229 AbstractLockNode* alock = n->as_AbstractLock(); 3230 if (!alock->is_non_esc_obj()) { 3231 const Type* obj_type = igvn->type(alock->obj_node()); 3232 if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) { 3233 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 3234 // The lock could be marked eliminated by lock coarsening 3235 // code during first IGVN before EA. Replace coarsened flag 3236 // to eliminate all associated locks/unlocks. 3237 #ifdef ASSERT 3238 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 3239 #endif 3240 alock->set_non_esc_obj(); 3241 } 3242 } 3243 } 3244 } 3245 } 3246 3247 if (OptimizePtrCompare) { 3248 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 3249 Node *n = ptr_cmp_worklist.at(i); 3250 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 3251 const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2)); 3252 if (tcmp->singleton()) { 3253 Node* cmp = igvn->makecon(tcmp); 3254 #ifndef PRODUCT 3255 if (PrintOptimizePtrCompare) { 3256 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 3257 if (Verbose) { 3258 n->dump(1); 3259 } 3260 } 3261 #endif 3262 igvn->replace_node(n, cmp); 3263 } 3264 } 3265 } 3266 3267 // For MemBarStoreStore nodes added in library_call.cpp, check 3268 // escape status of associated AllocateNode and optimize out 3269 // MemBarStoreStore node if the allocated object never escapes. 3270 for (int i = 0; i < storestore_worklist.length(); i++) { 3271 Node* storestore = storestore_worklist.at(i); 3272 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 3273 if (alloc->is_Allocate() && not_global_escape(alloc)) { 3274 if (alloc->in(AllocateNode::InlineType) != nullptr) { 3275 // Non-escaping inline type buffer allocations don't require a membar 3276 storestore->as_MemBar()->remove(_igvn); 3277 } else { 3278 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 3279 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 3280 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 3281 igvn->register_new_node_with_optimizer(mb); 3282 igvn->replace_node(storestore, mb); 3283 } 3284 } 3285 } 3286 } 3287 3288 // Optimize objects compare. 3289 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) { 3290 assert(OptimizePtrCompare, "sanity"); 3291 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 3292 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 3293 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 3294 3295 PointsToNode* ptn1 = ptnode_adr(left->_idx); 3296 PointsToNode* ptn2 = ptnode_adr(right->_idx); 3297 JavaObjectNode* jobj1 = unique_java_object(left); 3298 JavaObjectNode* jobj2 = unique_java_object(right); 3299 3300 // The use of this method during allocation merge reduction may cause 'left' 3301 // or 'right' be something (e.g., a Phi) that isn't in the connection graph or 3302 // that doesn't reference an unique java object. 3303 if (ptn1 == nullptr || ptn2 == nullptr || 3304 jobj1 == nullptr || jobj2 == nullptr) { 3305 return UNKNOWN; 3306 } 3307 3308 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 3309 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 3310 3311 // Check simple cases first. 3312 if (jobj1 != nullptr) { 3313 if (jobj1->escape_state() == PointsToNode::NoEscape) { 3314 if (jobj1 == jobj2) { 3315 // Comparing the same not escaping object. 3316 return EQ; 3317 } 3318 Node* obj = jobj1->ideal_node(); 3319 // Comparing not escaping allocation. 3320 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3321 !ptn2->points_to(jobj1)) { 3322 return NE; // This includes nullness check. 3323 } 3324 } 3325 } 3326 if (jobj2 != nullptr) { 3327 if (jobj2->escape_state() == PointsToNode::NoEscape) { 3328 Node* obj = jobj2->ideal_node(); 3329 // Comparing not escaping allocation. 3330 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 3331 !ptn1->points_to(jobj2)) { 3332 return NE; // This includes nullness check. 3333 } 3334 } 3335 } 3336 if (jobj1 != nullptr && jobj1 != phantom_obj && 3337 jobj2 != nullptr && jobj2 != phantom_obj && 3338 jobj1->ideal_node()->is_Con() && 3339 jobj2->ideal_node()->is_Con()) { 3340 // Klass or String constants compare. Need to be careful with 3341 // compressed pointers - compare types of ConN and ConP instead of nodes. 3342 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 3343 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 3344 if (t1->make_ptr() == t2->make_ptr()) { 3345 return EQ; 3346 } else { 3347 return NE; 3348 } 3349 } 3350 if (ptn1->meet(ptn2)) { 3351 return UNKNOWN; // Sets are not disjoint 3352 } 3353 3354 // Sets are disjoint. 3355 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 3356 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 3357 bool set1_has_null_ptr = ptn1->points_to(null_obj); 3358 bool set2_has_null_ptr = ptn2->points_to(null_obj); 3359 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 3360 (set2_has_unknown_ptr && set1_has_null_ptr)) { 3361 // Check nullness of unknown object. 3362 return UNKNOWN; 3363 } 3364 3365 // Disjointness by itself is not sufficient since 3366 // alias analysis is not complete for escaped objects. 3367 // Disjoint sets are definitely unrelated only when 3368 // at least one set has only not escaping allocations. 3369 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 3370 if (ptn1->non_escaping_allocation()) { 3371 return NE; 3372 } 3373 } 3374 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 3375 if (ptn2->non_escaping_allocation()) { 3376 return NE; 3377 } 3378 } 3379 return UNKNOWN; 3380 } 3381 3382 // Connection Graph construction functions. 3383 3384 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 3385 PointsToNode* ptadr = _nodes.at(n->_idx); 3386 if (ptadr != nullptr) { 3387 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 3388 return; 3389 } 3390 Compile* C = _compile; 3391 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 3392 map_ideal_node(n, ptadr); 3393 } 3394 3395 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 3396 PointsToNode* ptadr = _nodes.at(n->_idx); 3397 if (ptadr != nullptr) { 3398 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 3399 return ptadr; 3400 } 3401 Compile* C = _compile; 3402 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 3403 map_ideal_node(n, ptadr); 3404 return ptadr; 3405 } 3406 3407 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 3408 PointsToNode* ptadr = _nodes.at(n->_idx); 3409 if (ptadr != nullptr) { 3410 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 3411 return; 3412 } 3413 bool unsafe = false; 3414 bool is_oop = is_oop_field(n, offset, &unsafe); 3415 if (unsafe) { 3416 es = PointsToNode::GlobalEscape; 3417 } 3418 Compile* C = _compile; 3419 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 3420 map_ideal_node(n, field); 3421 } 3422 3423 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 3424 PointsToNode* src, PointsToNode* dst) { 3425 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 3426 assert((src != null_obj) && (dst != null_obj), "not for ConP null"); 3427 PointsToNode* ptadr = _nodes.at(n->_idx); 3428 if (ptadr != nullptr) { 3429 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 3430 return; 3431 } 3432 Compile* C = _compile; 3433 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 3434 map_ideal_node(n, ptadr); 3435 // Add edge from arraycopy node to source object. 3436 (void)add_edge(ptadr, src); 3437 src->set_arraycopy_src(); 3438 // Add edge from destination object to arraycopy node. 3439 (void)add_edge(dst, ptadr); 3440 dst->set_arraycopy_dst(); 3441 } 3442 3443 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 3444 const Type* adr_type = n->as_AddP()->bottom_type(); 3445 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 3446 BasicType bt = T_INT; 3447 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 3448 // Check only oop fields. 3449 if (!adr_type->isa_aryptr() || 3450 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 3451 adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { 3452 // OffsetBot is used to reference array's element. Ignore first AddP. 3453 if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { 3454 bt = T_OBJECT; 3455 } 3456 } 3457 } else if (offset != oopDesc::klass_offset_in_bytes()) { 3458 if (adr_type->isa_instptr()) { 3459 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 3460 if (field != nullptr) { 3461 bt = field->layout_type(); 3462 } else { 3463 // Check for unsafe oop field access 3464 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3465 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3466 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3467 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3468 bt = T_OBJECT; 3469 (*unsafe) = true; 3470 } 3471 } 3472 } else if (adr_type->isa_aryptr()) { 3473 if (offset == arrayOopDesc::length_offset_in_bytes()) { 3474 // Ignore array length load. 3475 } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { 3476 // Ignore first AddP. 3477 } else { 3478 const Type* elemtype = adr_type->is_aryptr()->elem(); 3479 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) { 3480 ciInlineKlass* vk = elemtype->inline_klass(); 3481 field_offset += vk->first_field_offset(); 3482 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 3483 } else { 3484 bt = elemtype->array_element_basic_type(); 3485 } 3486 } 3487 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 3488 // Allocation initialization, ThreadLocal field access, unsafe access 3489 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 3490 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 3491 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 3492 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 3493 bt = T_OBJECT; 3494 } 3495 } 3496 } 3497 // Note: T_NARROWOOP is not classed as a real reference type 3498 return (is_reference_type(bt) || bt == T_NARROWOOP); 3499 } 3500 3501 // Returns unique pointed java object or null. 3502 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const { 3503 // If the node was created after the escape computation we can't answer. 3504 uint idx = n->_idx; 3505 if (idx >= nodes_size()) { 3506 return nullptr; 3507 } 3508 PointsToNode* ptn = ptnode_adr(idx); 3509 if (ptn == nullptr) { 3510 return nullptr; 3511 } 3512 if (ptn->is_JavaObject()) { 3513 return ptn->as_JavaObject(); 3514 } 3515 assert(ptn->is_LocalVar(), "sanity"); 3516 // Check all java objects it points to. 3517 JavaObjectNode* jobj = nullptr; 3518 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3519 PointsToNode* e = i.get(); 3520 if (e->is_JavaObject()) { 3521 if (jobj == nullptr) { 3522 jobj = e->as_JavaObject(); 3523 } else if (jobj != e) { 3524 return nullptr; 3525 } 3526 } 3527 } 3528 return jobj; 3529 } 3530 3531 // Return true if this node points only to non-escaping allocations. 3532 bool PointsToNode::non_escaping_allocation() { 3533 if (is_JavaObject()) { 3534 Node* n = ideal_node(); 3535 if (n->is_Allocate() || n->is_CallStaticJava()) { 3536 return (escape_state() == PointsToNode::NoEscape); 3537 } else { 3538 return false; 3539 } 3540 } 3541 assert(is_LocalVar(), "sanity"); 3542 // Check all java objects it points to. 3543 for (EdgeIterator i(this); i.has_next(); i.next()) { 3544 PointsToNode* e = i.get(); 3545 if (e->is_JavaObject()) { 3546 Node* n = e->ideal_node(); 3547 if ((e->escape_state() != PointsToNode::NoEscape) || 3548 !(n->is_Allocate() || n->is_CallStaticJava())) { 3549 return false; 3550 } 3551 } 3552 } 3553 return true; 3554 } 3555 3556 // Return true if we know the node does not escape globally. 3557 bool ConnectionGraph::not_global_escape(Node *n) { 3558 assert(!_collecting, "should not call during graph construction"); 3559 // If the node was created after the escape computation we can't answer. 3560 uint idx = n->_idx; 3561 if (idx >= nodes_size()) { 3562 return false; 3563 } 3564 PointsToNode* ptn = ptnode_adr(idx); 3565 if (ptn == nullptr) { 3566 return false; // not in congraph (e.g. ConI) 3567 } 3568 PointsToNode::EscapeState es = ptn->escape_state(); 3569 // If we have already computed a value, return it. 3570 if (es >= PointsToNode::GlobalEscape) { 3571 return false; 3572 } 3573 if (ptn->is_JavaObject()) { 3574 return true; // (es < PointsToNode::GlobalEscape); 3575 } 3576 assert(ptn->is_LocalVar(), "sanity"); 3577 // Check all java objects it points to. 3578 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 3579 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 3580 return false; 3581 } 3582 } 3583 return true; 3584 } 3585 3586 // Return true if locked object does not escape globally 3587 // and locked code region (identified by BoxLockNode) is balanced: 3588 // all compiled code paths have corresponding Lock/Unlock pairs. 3589 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) { 3590 if (alock->is_balanced() && not_global_escape(alock->obj_node())) { 3591 if (EliminateNestedLocks) { 3592 // We can mark whole locking region as Local only when only 3593 // one object is used for locking. 3594 alock->box_node()->as_BoxLock()->set_local(); 3595 } 3596 return true; 3597 } 3598 return false; 3599 } 3600 3601 // Helper functions 3602 3603 // Return true if this node points to specified node or nodes it points to. 3604 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 3605 if (is_JavaObject()) { 3606 return (this == ptn); 3607 } 3608 assert(is_LocalVar() || is_Field(), "sanity"); 3609 for (EdgeIterator i(this); i.has_next(); i.next()) { 3610 if (i.get() == ptn) { 3611 return true; 3612 } 3613 } 3614 return false; 3615 } 3616 3617 // Return true if one node points to an other. 3618 bool PointsToNode::meet(PointsToNode* ptn) { 3619 if (this == ptn) { 3620 return true; 3621 } else if (ptn->is_JavaObject()) { 3622 return this->points_to(ptn->as_JavaObject()); 3623 } else if (this->is_JavaObject()) { 3624 return ptn->points_to(this->as_JavaObject()); 3625 } 3626 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 3627 int ptn_count = ptn->edge_count(); 3628 for (EdgeIterator i(this); i.has_next(); i.next()) { 3629 PointsToNode* this_e = i.get(); 3630 for (int j = 0; j < ptn_count; j++) { 3631 if (this_e == ptn->edge(j)) { 3632 return true; 3633 } 3634 } 3635 } 3636 return false; 3637 } 3638 3639 #ifdef ASSERT 3640 // Return true if bases point to this java object. 3641 bool FieldNode::has_base(JavaObjectNode* jobj) const { 3642 for (BaseIterator i(this); i.has_next(); i.next()) { 3643 if (i.get() == jobj) { 3644 return true; 3645 } 3646 } 3647 return false; 3648 } 3649 #endif 3650 3651 bool ConnectionGraph::is_captured_store_address(Node* addp) { 3652 // Handle simple case first. 3653 assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); 3654 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 3655 return true; 3656 } else if (addp->in(AddPNode::Address)->is_Phi()) { 3657 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 3658 Node* addp_use = addp->fast_out(i); 3659 if (addp_use->is_Store()) { 3660 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 3661 if (addp_use->fast_out(j)->is_Initialize()) { 3662 return true; 3663 } 3664 } 3665 } 3666 } 3667 } 3668 return false; 3669 } 3670 3671 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) { 3672 const Type *adr_type = phase->type(adr); 3673 if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { 3674 // We are computing a raw address for a store captured by an Initialize 3675 // compute an appropriate address type. AddP cases #3 and #5 (see below). 3676 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 3677 assert(offs != Type::OffsetBot || 3678 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 3679 "offset must be a constant or it is initialization of array"); 3680 return offs; 3681 } 3682 return adr_type->is_ptr()->flat_offset(); 3683 } 3684 3685 Node* ConnectionGraph::get_addp_base(Node *addp) { 3686 assert(addp->is_AddP(), "must be AddP"); 3687 // 3688 // AddP cases for Base and Address inputs: 3689 // case #1. Direct object's field reference: 3690 // Allocate 3691 // | 3692 // Proj #5 ( oop result ) 3693 // | 3694 // CheckCastPP (cast to instance type) 3695 // | | 3696 // AddP ( base == address ) 3697 // 3698 // case #2. Indirect object's field reference: 3699 // Phi 3700 // | 3701 // CastPP (cast to instance type) 3702 // | | 3703 // AddP ( base == address ) 3704 // 3705 // case #3. Raw object's field reference for Initialize node: 3706 // Allocate 3707 // | 3708 // Proj #5 ( oop result ) 3709 // top | 3710 // \ | 3711 // AddP ( base == top ) 3712 // 3713 // case #4. Array's element reference: 3714 // {CheckCastPP | CastPP} 3715 // | | | 3716 // | AddP ( array's element offset ) 3717 // | | 3718 // AddP ( array's offset ) 3719 // 3720 // case #5. Raw object's field reference for arraycopy stub call: 3721 // The inline_native_clone() case when the arraycopy stub is called 3722 // after the allocation before Initialize and CheckCastPP nodes. 3723 // Allocate 3724 // | 3725 // Proj #5 ( oop result ) 3726 // | | 3727 // AddP ( base == address ) 3728 // 3729 // case #6. Constant Pool, ThreadLocal, CastX2P or 3730 // Raw object's field reference: 3731 // {ConP, ThreadLocal, CastX2P, raw Load} 3732 // top | 3733 // \ | 3734 // AddP ( base == top ) 3735 // 3736 // case #7. Klass's field reference. 3737 // LoadKlass 3738 // | | 3739 // AddP ( base == address ) 3740 // 3741 // case #8. narrow Klass's field reference. 3742 // LoadNKlass 3743 // | 3744 // DecodeN 3745 // | | 3746 // AddP ( base == address ) 3747 // 3748 // case #9. Mixed unsafe access 3749 // {instance} 3750 // | 3751 // CheckCastPP (raw) 3752 // top | 3753 // \ | 3754 // AddP ( base == top ) 3755 // 3756 Node *base = addp->in(AddPNode::Base); 3757 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 3758 base = addp->in(AddPNode::Address); 3759 while (base->is_AddP()) { 3760 // Case #6 (unsafe access) may have several chained AddP nodes. 3761 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 3762 base = base->in(AddPNode::Address); 3763 } 3764 if (base->Opcode() == Op_CheckCastPP && 3765 base->bottom_type()->isa_rawptr() && 3766 _igvn->type(base->in(1))->isa_oopptr()) { 3767 base = base->in(1); // Case #9 3768 } else { 3769 Node* uncast_base = base->uncast(); 3770 int opcode = uncast_base->Opcode(); 3771 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 3772 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 3773 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || 3774 is_captured_store_address(addp), "sanity"); 3775 } 3776 } 3777 return base; 3778 } 3779 3780 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 3781 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 3782 Node* addp2 = addp->raw_out(0); 3783 if (addp->outcnt() == 1 && addp2->is_AddP() && 3784 addp2->in(AddPNode::Base) == n && 3785 addp2->in(AddPNode::Address) == addp) { 3786 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 3787 // 3788 // Find array's offset to push it on worklist first and 3789 // as result process an array's element offset first (pushed second) 3790 // to avoid CastPP for the array's offset. 3791 // Otherwise the inserted CastPP (LocalVar) will point to what 3792 // the AddP (Field) points to. Which would be wrong since 3793 // the algorithm expects the CastPP has the same point as 3794 // as AddP's base CheckCastPP (LocalVar). 3795 // 3796 // ArrayAllocation 3797 // | 3798 // CheckCastPP 3799 // | 3800 // memProj (from ArrayAllocation CheckCastPP) 3801 // | || 3802 // | || Int (element index) 3803 // | || | ConI (log(element size)) 3804 // | || | / 3805 // | || LShift 3806 // | || / 3807 // | AddP (array's element offset) 3808 // | | 3809 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 3810 // | / / 3811 // AddP (array's offset) 3812 // | 3813 // Load/Store (memory operation on array's element) 3814 // 3815 return addp2; 3816 } 3817 return nullptr; 3818 } 3819 3820 // 3821 // Adjust the type and inputs of an AddP which computes the 3822 // address of a field of an instance 3823 // 3824 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 3825 PhaseGVN* igvn = _igvn; 3826 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 3827 assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); 3828 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 3829 if (t == nullptr) { 3830 // We are computing a raw address for a store captured by an Initialize 3831 // compute an appropriate address type (cases #3 and #5). 3832 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 3833 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 3834 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 3835 assert(offs != Type::OffsetBot, "offset must be a constant"); 3836 if (base_t->isa_aryptr() != nullptr) { 3837 // In the case of a flat inline type array, each field has its 3838 // own slice so we need to extract the field being accessed from 3839 // the address computation 3840 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 3841 } else { 3842 t = base_t->add_offset(offs)->is_oopptr(); 3843 } 3844 } 3845 int inst_id = base_t->instance_id(); 3846 assert(!t->is_known_instance() || t->instance_id() == inst_id, 3847 "old type must be non-instance or match new type"); 3848 3849 // The type 't' could be subclass of 'base_t'. 3850 // As result t->offset() could be large then base_t's size and it will 3851 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 3852 // constructor verifies correctness of the offset. 3853 // 3854 // It could happened on subclass's branch (from the type profiling 3855 // inlining) which was not eliminated during parsing since the exactness 3856 // of the allocation type was not propagated to the subclass type check. 3857 // 3858 // Or the type 't' could be not related to 'base_t' at all. 3859 // It could happen when CHA type is different from MDO type on a dead path 3860 // (for example, from instanceof check) which is not collapsed during parsing. 3861 // 3862 // Do nothing for such AddP node and don't process its users since 3863 // this code branch will go away. 3864 // 3865 if (!t->is_known_instance() && 3866 !base_t->maybe_java_subtype_of(t)) { 3867 return false; // bail out 3868 } 3869 const TypePtr* tinst = base_t->add_offset(t->offset()); 3870 if (tinst->isa_aryptr() && t->isa_aryptr()) { 3871 // In the case of a flat inline type array, each field has its 3872 // own slice so we need to keep track of the field being accessed. 3873 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 3874 // Keep array properties (not flat/null-free) 3875 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr()); 3876 if (tinst == nullptr) { 3877 return false; // Skip dead path with inconsistent properties 3878 } 3879 } 3880 3881 // Do NOT remove the next line: ensure a new alias index is allocated 3882 // for the instance type. Note: C++ will not remove it since the call 3883 // has side effect. 3884 int alias_idx = _compile->get_alias_index(tinst); 3885 igvn->set_type(addp, tinst); 3886 // record the allocation in the node map 3887 set_map(addp, get_map(base->_idx)); 3888 // Set addp's Base and Address to 'base'. 3889 Node *abase = addp->in(AddPNode::Base); 3890 Node *adr = addp->in(AddPNode::Address); 3891 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 3892 adr->in(0)->_idx == (uint)inst_id) { 3893 // Skip AddP cases #3 and #5. 3894 } else { 3895 assert(!abase->is_top(), "sanity"); // AddP case #3 3896 if (abase != base) { 3897 igvn->hash_delete(addp); 3898 addp->set_req(AddPNode::Base, base); 3899 if (abase == adr) { 3900 addp->set_req(AddPNode::Address, base); 3901 } else { 3902 // AddP case #4 (adr is array's element offset AddP node) 3903 #ifdef ASSERT 3904 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 3905 assert(adr->is_AddP() && atype != nullptr && 3906 atype->instance_id() == inst_id, "array's element offset should be processed first"); 3907 #endif 3908 } 3909 igvn->hash_insert(addp); 3910 } 3911 } 3912 // Put on IGVN worklist since at least addp's type was changed above. 3913 record_for_optimizer(addp); 3914 return true; 3915 } 3916 3917 // 3918 // Create a new version of orig_phi if necessary. Returns either the newly 3919 // created phi or an existing phi. Sets create_new to indicate whether a new 3920 // phi was created. Cache the last newly created phi in the node map. 3921 // 3922 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 3923 Compile *C = _compile; 3924 PhaseGVN* igvn = _igvn; 3925 new_created = false; 3926 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 3927 // nothing to do if orig_phi is bottom memory or matches alias_idx 3928 if (phi_alias_idx == alias_idx) { 3929 return orig_phi; 3930 } 3931 // Have we recently created a Phi for this alias index? 3932 PhiNode *result = get_map_phi(orig_phi->_idx); 3933 if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { 3934 return result; 3935 } 3936 // Previous check may fail when the same wide memory Phi was split into Phis 3937 // for different memory slices. Search all Phis for this region. 3938 if (result != nullptr) { 3939 Node* region = orig_phi->in(0); 3940 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 3941 Node* phi = region->fast_out(i); 3942 if (phi->is_Phi() && 3943 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 3944 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 3945 return phi->as_Phi(); 3946 } 3947 } 3948 } 3949 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 3950 if (C->do_escape_analysis() == true && !C->failing()) { 3951 // Retry compilation without escape analysis. 3952 // If this is the first failure, the sentinel string will "stick" 3953 // to the Compile object, and the C2Compiler will see it and retry. 3954 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3955 } 3956 return nullptr; 3957 } 3958 orig_phi_worklist.append_if_missing(orig_phi); 3959 const TypePtr *atype = C->get_adr_type(alias_idx); 3960 result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); 3961 C->copy_node_notes_to(result, orig_phi); 3962 igvn->set_type(result, result->bottom_type()); 3963 record_for_optimizer(result); 3964 set_map(orig_phi, result); 3965 new_created = true; 3966 return result; 3967 } 3968 3969 // 3970 // Return a new version of Memory Phi "orig_phi" with the inputs having the 3971 // specified alias index. 3972 // 3973 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) { 3974 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 3975 Compile *C = _compile; 3976 PhaseGVN* igvn = _igvn; 3977 bool new_phi_created; 3978 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 3979 if (!new_phi_created) { 3980 return result; 3981 } 3982 GrowableArray<PhiNode *> phi_list; 3983 GrowableArray<uint> cur_input; 3984 PhiNode *phi = orig_phi; 3985 uint idx = 1; 3986 bool finished = false; 3987 while(!finished) { 3988 while (idx < phi->req()) { 3989 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1); 3990 if (mem != nullptr && mem->is_Phi()) { 3991 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 3992 if (new_phi_created) { 3993 // found an phi for which we created a new split, push current one on worklist and begin 3994 // processing new one 3995 phi_list.push(phi); 3996 cur_input.push(idx); 3997 phi = mem->as_Phi(); 3998 result = newphi; 3999 idx = 1; 4000 continue; 4001 } else { 4002 mem = newphi; 4003 } 4004 } 4005 if (C->failing()) { 4006 return nullptr; 4007 } 4008 result->set_req(idx++, mem); 4009 } 4010 #ifdef ASSERT 4011 // verify that the new Phi has an input for each input of the original 4012 assert( phi->req() == result->req(), "must have same number of inputs."); 4013 assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); 4014 #endif 4015 // Check if all new phi's inputs have specified alias index. 4016 // Otherwise use old phi. 4017 for (uint i = 1; i < phi->req(); i++) { 4018 Node* in = result->in(i); 4019 assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); 4020 } 4021 // we have finished processing a Phi, see if there are any more to do 4022 finished = (phi_list.length() == 0 ); 4023 if (!finished) { 4024 phi = phi_list.pop(); 4025 idx = cur_input.pop(); 4026 PhiNode *prev_result = get_map_phi(phi->_idx); 4027 prev_result->set_req(idx++, result); 4028 result = prev_result; 4029 } 4030 } 4031 return result; 4032 } 4033 4034 // 4035 // The next methods are derived from methods in MemNode. 4036 // 4037 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 4038 Node *mem = mmem; 4039 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 4040 // means an array I have not precisely typed yet. Do not do any 4041 // alias stuff with it any time soon. 4042 if (toop->base() != Type::AnyPtr && 4043 !(toop->isa_instptr() && 4044 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 4045 toop->offset() == Type::OffsetBot)) { 4046 mem = mmem->memory_at(alias_idx); 4047 // Update input if it is progress over what we have now 4048 } 4049 return mem; 4050 } 4051 4052 // 4053 // Move memory users to their memory slices. 4054 // 4055 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 4056 Compile* C = _compile; 4057 PhaseGVN* igvn = _igvn; 4058 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 4059 assert(tp != nullptr, "ptr type"); 4060 int alias_idx = C->get_alias_index(tp); 4061 int general_idx = C->get_general_index(alias_idx); 4062 4063 // Move users first 4064 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4065 Node* use = n->fast_out(i); 4066 if (use->is_MergeMem()) { 4067 MergeMemNode* mmem = use->as_MergeMem(); 4068 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 4069 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 4070 continue; // Nothing to do 4071 } 4072 // Replace previous general reference to mem node. 4073 uint orig_uniq = C->unique(); 4074 Node* m = find_inst_mem(n, general_idx, orig_phis); 4075 assert(orig_uniq == C->unique(), "no new nodes"); 4076 mmem->set_memory_at(general_idx, m); 4077 --imax; 4078 --i; 4079 } else if (use->is_MemBar()) { 4080 assert(!use->is_Initialize(), "initializing stores should not be moved"); 4081 if (use->req() > MemBarNode::Precedent && 4082 use->in(MemBarNode::Precedent) == n) { 4083 // Don't move related membars. 4084 record_for_optimizer(use); 4085 continue; 4086 } 4087 tp = use->as_MemBar()->adr_type()->isa_ptr(); 4088 if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || 4089 alias_idx == general_idx) { 4090 continue; // Nothing to do 4091 } 4092 // Move to general memory slice. 4093 uint orig_uniq = C->unique(); 4094 Node* m = find_inst_mem(n, general_idx, orig_phis); 4095 assert(orig_uniq == C->unique(), "no new nodes"); 4096 igvn->hash_delete(use); 4097 imax -= use->replace_edge(n, m, igvn); 4098 igvn->hash_insert(use); 4099 record_for_optimizer(use); 4100 --i; 4101 #ifdef ASSERT 4102 } else if (use->is_Mem()) { 4103 // Memory nodes should have new memory input. 4104 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 4105 assert(tp != nullptr, "ptr type"); 4106 int idx = C->get_alias_index(tp); 4107 assert(get_map(use->_idx) != nullptr || idx == alias_idx, 4108 "Following memory nodes should have new memory input or be on the same memory slice"); 4109 } else if (use->is_Phi()) { 4110 // Phi nodes should be split and moved already. 4111 tp = use->as_Phi()->adr_type()->isa_ptr(); 4112 assert(tp != nullptr, "ptr type"); 4113 int idx = C->get_alias_index(tp); 4114 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 4115 } else { 4116 use->dump(); 4117 assert(false, "should not be here"); 4118 #endif 4119 } 4120 } 4121 } 4122 4123 // 4124 // Search memory chain of "mem" to find a MemNode whose address 4125 // is the specified alias index. 4126 // 4127 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000 4128 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis, uint rec_depth) { 4129 if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) { 4130 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4131 return nullptr; 4132 } 4133 if (orig_mem == nullptr) { 4134 return orig_mem; 4135 } 4136 Compile* C = _compile; 4137 PhaseGVN* igvn = _igvn; 4138 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 4139 bool is_instance = (toop != nullptr) && toop->is_known_instance(); 4140 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 4141 Node *prev = nullptr; 4142 Node *result = orig_mem; 4143 while (prev != result) { 4144 prev = result; 4145 if (result == start_mem) { 4146 break; // hit one of our sentinels 4147 } 4148 if (result->is_Mem()) { 4149 const Type *at = igvn->type(result->in(MemNode::Address)); 4150 if (at == Type::TOP) { 4151 break; // Dead 4152 } 4153 assert (at->isa_ptr() != nullptr, "pointer type required."); 4154 int idx = C->get_alias_index(at->is_ptr()); 4155 if (idx == alias_idx) { 4156 break; // Found 4157 } 4158 if (!is_instance && (at->isa_oopptr() == nullptr || 4159 !at->is_oopptr()->is_known_instance())) { 4160 break; // Do not skip store to general memory slice. 4161 } 4162 result = result->in(MemNode::Memory); 4163 } 4164 if (!is_instance) { 4165 continue; // don't search further for non-instance types 4166 } 4167 // skip over a call which does not affect this memory slice 4168 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 4169 Node *proj_in = result->in(0); 4170 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 4171 break; // hit one of our sentinels 4172 } else if (proj_in->is_Call()) { 4173 // ArrayCopy node processed here as well 4174 CallNode *call = proj_in->as_Call(); 4175 if (!call->may_modify(toop, igvn)) { 4176 result = call->in(TypeFunc::Memory); 4177 } 4178 } else if (proj_in->is_Initialize()) { 4179 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 4180 // Stop if this is the initialization for the object instance which 4181 // which contains this memory slice, otherwise skip over it. 4182 if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { 4183 result = proj_in->in(TypeFunc::Memory); 4184 } 4185 } else if (proj_in->is_MemBar()) { 4186 // Check if there is an array copy for a clone 4187 // Step over GC barrier when ReduceInitialCardMarks is disabled 4188 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4189 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 4190 4191 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 4192 // Stop if it is a clone 4193 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 4194 if (ac->may_modify(toop, igvn)) { 4195 break; 4196 } 4197 } 4198 result = proj_in->in(TypeFunc::Memory); 4199 } 4200 } else if (result->is_MergeMem()) { 4201 MergeMemNode *mmem = result->as_MergeMem(); 4202 result = step_through_mergemem(mmem, alias_idx, toop); 4203 if (result == mmem->base_memory()) { 4204 // Didn't find instance memory, search through general slice recursively. 4205 result = mmem->memory_at(C->get_general_index(alias_idx)); 4206 result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1); 4207 if (C->failing()) { 4208 return nullptr; 4209 } 4210 mmem->set_memory_at(alias_idx, result); 4211 } 4212 } else if (result->is_Phi() && 4213 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 4214 Node *un = result->as_Phi()->unique_input(igvn); 4215 if (un != nullptr) { 4216 orig_phis.append_if_missing(result->as_Phi()); 4217 result = un; 4218 } else { 4219 break; 4220 } 4221 } else if (result->is_ClearArray()) { 4222 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 4223 // Can not bypass initialization of the instance 4224 // we are looking for. 4225 break; 4226 } 4227 // Otherwise skip it (the call updated 'result' value). 4228 } else if (result->Opcode() == Op_SCMemProj) { 4229 Node* mem = result->in(0); 4230 Node* adr = nullptr; 4231 if (mem->is_LoadStore()) { 4232 adr = mem->in(MemNode::Address); 4233 } else { 4234 assert(mem->Opcode() == Op_EncodeISOArray || 4235 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 4236 adr = mem->in(3); // Memory edge corresponds to destination array 4237 } 4238 const Type *at = igvn->type(adr); 4239 if (at != Type::TOP) { 4240 assert(at->isa_ptr() != nullptr, "pointer type required."); 4241 int idx = C->get_alias_index(at->is_ptr()); 4242 if (idx == alias_idx) { 4243 // Assert in debug mode 4244 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 4245 break; // In product mode return SCMemProj node 4246 } 4247 } 4248 result = mem->in(MemNode::Memory); 4249 } else if (result->Opcode() == Op_StrInflatedCopy) { 4250 Node* adr = result->in(3); // Memory edge corresponds to destination array 4251 const Type *at = igvn->type(adr); 4252 if (at != Type::TOP) { 4253 assert(at->isa_ptr() != nullptr, "pointer type required."); 4254 int idx = C->get_alias_index(at->is_ptr()); 4255 if (idx == alias_idx) { 4256 // Assert in debug mode 4257 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 4258 break; // In product mode return SCMemProj node 4259 } 4260 } 4261 result = result->in(MemNode::Memory); 4262 } 4263 } 4264 if (result->is_Phi()) { 4265 PhiNode *mphi = result->as_Phi(); 4266 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 4267 const TypePtr *t = mphi->adr_type(); 4268 if (!is_instance) { 4269 // Push all non-instance Phis on the orig_phis worklist to update inputs 4270 // during Phase 4 if needed. 4271 orig_phis.append_if_missing(mphi); 4272 } else if (C->get_alias_index(t) != alias_idx) { 4273 // Create a new Phi with the specified alias index type. 4274 result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1); 4275 } 4276 } 4277 // the result is either MemNode, PhiNode, InitializeNode. 4278 return result; 4279 } 4280 4281 // 4282 // Convert the types of non-escaped object to instance types where possible, 4283 // propagate the new type information through the graph, and update memory 4284 // edges and MergeMem inputs to reflect the new type. 4285 // 4286 // We start with allocations (and calls which may be allocations) on alloc_worklist. 4287 // The processing is done in 4 phases: 4288 // 4289 // Phase 1: Process possible allocations from alloc_worklist. Create instance 4290 // types for the CheckCastPP for allocations where possible. 4291 // Propagate the new types through users as follows: 4292 // casts and Phi: push users on alloc_worklist 4293 // AddP: cast Base and Address inputs to the instance type 4294 // push any AddP users on alloc_worklist and push any memnode 4295 // users onto memnode_worklist. 4296 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4297 // search the Memory chain for a store with the appropriate type 4298 // address type. If a Phi is found, create a new version with 4299 // the appropriate memory slices from each of the Phi inputs. 4300 // For stores, process the users as follows: 4301 // MemNode: push on memnode_worklist 4302 // MergeMem: push on mergemem_worklist 4303 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 4304 // moving the first node encountered of each instance type to the 4305 // the input corresponding to its alias index. 4306 // appropriate memory slice. 4307 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 4308 // 4309 // In the following example, the CheckCastPP nodes are the cast of allocation 4310 // results and the allocation of node 29 is non-escaped and eligible to be an 4311 // instance type. 4312 // 4313 // We start with: 4314 // 4315 // 7 Parm #memory 4316 // 10 ConI "12" 4317 // 19 CheckCastPP "Foo" 4318 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4319 // 29 CheckCastPP "Foo" 4320 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 4321 // 4322 // 40 StoreP 25 7 20 ... alias_index=4 4323 // 50 StoreP 35 40 30 ... alias_index=4 4324 // 60 StoreP 45 50 20 ... alias_index=4 4325 // 70 LoadP _ 60 30 ... alias_index=4 4326 // 80 Phi 75 50 60 Memory alias_index=4 4327 // 90 LoadP _ 80 30 ... alias_index=4 4328 // 100 LoadP _ 80 20 ... alias_index=4 4329 // 4330 // 4331 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 4332 // and creating a new alias index for node 30. This gives: 4333 // 4334 // 7 Parm #memory 4335 // 10 ConI "12" 4336 // 19 CheckCastPP "Foo" 4337 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4338 // 29 CheckCastPP "Foo" iid=24 4339 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4340 // 4341 // 40 StoreP 25 7 20 ... alias_index=4 4342 // 50 StoreP 35 40 30 ... alias_index=6 4343 // 60 StoreP 45 50 20 ... alias_index=4 4344 // 70 LoadP _ 60 30 ... alias_index=6 4345 // 80 Phi 75 50 60 Memory alias_index=4 4346 // 90 LoadP _ 80 30 ... alias_index=6 4347 // 100 LoadP _ 80 20 ... alias_index=4 4348 // 4349 // In phase 2, new memory inputs are computed for the loads and stores, 4350 // And a new version of the phi is created. In phase 4, the inputs to 4351 // node 80 are updated and then the memory nodes are updated with the 4352 // values computed in phase 2. This results in: 4353 // 4354 // 7 Parm #memory 4355 // 10 ConI "12" 4356 // 19 CheckCastPP "Foo" 4357 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 4358 // 29 CheckCastPP "Foo" iid=24 4359 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 4360 // 4361 // 40 StoreP 25 7 20 ... alias_index=4 4362 // 50 StoreP 35 7 30 ... alias_index=6 4363 // 60 StoreP 45 40 20 ... alias_index=4 4364 // 70 LoadP _ 50 30 ... alias_index=6 4365 // 80 Phi 75 40 60 Memory alias_index=4 4366 // 120 Phi 75 50 50 Memory alias_index=6 4367 // 90 LoadP _ 120 30 ... alias_index=6 4368 // 100 LoadP _ 80 20 ... alias_index=4 4369 // 4370 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 4371 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 4372 GrowableArray<MergeMemNode*> &mergemem_worklist, 4373 Unique_Node_List &reducible_merges) { 4374 DEBUG_ONLY(Unique_Node_List reduced_merges;) 4375 GrowableArray<Node *> memnode_worklist; 4376 GrowableArray<PhiNode *> orig_phis; 4377 PhaseIterGVN *igvn = _igvn; 4378 uint new_index_start = (uint) _compile->num_alias_types(); 4379 VectorSet visited; 4380 ideal_nodes.clear(); // Reset for use with set_map/get_map. 4381 uint unique_old = _compile->unique(); 4382 4383 // Phase 1: Process possible allocations from alloc_worklist. 4384 // Create instance types for the CheckCastPP for allocations where possible. 4385 // 4386 // (Note: don't forget to change the order of the second AddP node on 4387 // the alloc_worklist if the order of the worklist processing is changed, 4388 // see the comment in find_second_addp().) 4389 // 4390 while (alloc_worklist.length() != 0) { 4391 Node *n = alloc_worklist.pop(); 4392 uint ni = n->_idx; 4393 if (n->is_Call()) { 4394 CallNode *alloc = n->as_Call(); 4395 // copy escape information to call node 4396 PointsToNode* ptn = ptnode_adr(alloc->_idx); 4397 PointsToNode::EscapeState es = ptn->escape_state(); 4398 // We have an allocation or call which returns a Java object, 4399 // see if it is non-escaped. 4400 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 4401 continue; 4402 } 4403 // Find CheckCastPP for the allocate or for the return value of a call 4404 n = alloc->result_cast(); 4405 if (n == nullptr) { // No uses except Initialize node 4406 if (alloc->is_Allocate()) { 4407 // Set the scalar_replaceable flag for allocation 4408 // so it could be eliminated if it has no uses. 4409 alloc->as_Allocate()->_is_scalar_replaceable = true; 4410 } 4411 continue; 4412 } 4413 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 4414 // we could reach here for allocate case if one init is associated with many allocs. 4415 if (alloc->is_Allocate()) { 4416 alloc->as_Allocate()->_is_scalar_replaceable = false; 4417 } 4418 continue; 4419 } 4420 4421 // The inline code for Object.clone() casts the allocation result to 4422 // java.lang.Object and then to the actual type of the allocated 4423 // object. Detect this case and use the second cast. 4424 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 4425 // the allocation result is cast to java.lang.Object and then 4426 // to the actual Array type. 4427 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 4428 && (alloc->is_AllocateArray() || 4429 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 4430 Node *cast2 = nullptr; 4431 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4432 Node *use = n->fast_out(i); 4433 if (use->is_CheckCastPP()) { 4434 cast2 = use; 4435 break; 4436 } 4437 } 4438 if (cast2 != nullptr) { 4439 n = cast2; 4440 } else { 4441 // Non-scalar replaceable if the allocation type is unknown statically 4442 // (reflection allocation), the object can't be restored during 4443 // deoptimization without precise type. 4444 continue; 4445 } 4446 } 4447 4448 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 4449 if (t == nullptr) { 4450 continue; // not a TypeOopPtr 4451 } 4452 if (!t->klass_is_exact()) { 4453 continue; // not an unique type 4454 } 4455 if (alloc->is_Allocate()) { 4456 // Set the scalar_replaceable flag for allocation 4457 // so it could be eliminated. 4458 alloc->as_Allocate()->_is_scalar_replaceable = true; 4459 } 4460 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 4461 // in order for an object to be scalar-replaceable, it must be: 4462 // - a direct allocation (not a call returning an object) 4463 // - non-escaping 4464 // - eligible to be a unique type 4465 // - not determined to be ineligible by escape analysis 4466 set_map(alloc, n); 4467 set_map(n, alloc); 4468 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 4469 igvn->hash_delete(n); 4470 igvn->set_type(n, tinst); 4471 n->raise_bottom_type(tinst); 4472 igvn->hash_insert(n); 4473 record_for_optimizer(n); 4474 // Allocate an alias index for the header fields. Accesses to 4475 // the header emitted during macro expansion wouldn't have 4476 // correct memory state otherwise. 4477 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 4478 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 4479 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 4480 4481 // First, put on the worklist all Field edges from Connection Graph 4482 // which is more accurate than putting immediate users from Ideal Graph. 4483 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 4484 PointsToNode* tgt = e.get(); 4485 if (tgt->is_Arraycopy()) { 4486 continue; 4487 } 4488 Node* use = tgt->ideal_node(); 4489 assert(tgt->is_Field() && use->is_AddP(), 4490 "only AddP nodes are Field edges in CG"); 4491 if (use->outcnt() > 0) { // Don't process dead nodes 4492 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 4493 if (addp2 != nullptr) { 4494 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4495 alloc_worklist.append_if_missing(addp2); 4496 } 4497 alloc_worklist.append_if_missing(use); 4498 } 4499 } 4500 4501 // An allocation may have an Initialize which has raw stores. Scan 4502 // the users of the raw allocation result and push AddP users 4503 // on alloc_worklist. 4504 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 4505 assert (raw_result != nullptr, "must have an allocation result"); 4506 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 4507 Node *use = raw_result->fast_out(i); 4508 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 4509 Node* addp2 = find_second_addp(use, raw_result); 4510 if (addp2 != nullptr) { 4511 assert(alloc->is_AllocateArray(),"array allocation was expected"); 4512 alloc_worklist.append_if_missing(addp2); 4513 } 4514 alloc_worklist.append_if_missing(use); 4515 } else if (use->is_MemBar()) { 4516 memnode_worklist.append_if_missing(use); 4517 } 4518 } 4519 } 4520 } else if (n->is_AddP()) { 4521 if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) { 4522 // This AddP will go away when we reduce the the Phi 4523 continue; 4524 } 4525 Node* addp_base = get_addp_base(n); 4526 JavaObjectNode* jobj = unique_java_object(addp_base); 4527 if (jobj == nullptr || jobj == phantom_obj) { 4528 #ifdef ASSERT 4529 ptnode_adr(get_addp_base(n)->_idx)->dump(); 4530 ptnode_adr(n->_idx)->dump(); 4531 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4532 #endif 4533 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4534 return; 4535 } 4536 Node *base = get_map(jobj->idx()); // CheckCastPP node 4537 if (!split_AddP(n, base)) continue; // wrong type from dead path 4538 } else if (n->is_Phi() || 4539 n->is_CheckCastPP() || 4540 n->is_EncodeP() || 4541 n->is_DecodeN() || 4542 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 4543 if (visited.test_set(n->_idx)) { 4544 assert(n->is_Phi(), "loops only through Phi's"); 4545 continue; // already processed 4546 } 4547 // Reducible Phi's will be removed from the graph after split_unique_types 4548 // finishes. For now we just try to split out the SR inputs of the merge. 4549 Node* parent = n->in(1); 4550 if (reducible_merges.member(n)) { 4551 reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist); 4552 #ifdef ASSERT 4553 if (VerifyReduceAllocationMerges) { 4554 reduced_merges.push(n); 4555 } 4556 #endif 4557 continue; 4558 } else if (reducible_merges.member(parent)) { 4559 // 'n' is an user of a reducible merge (a Phi). It will be simplified as 4560 // part of reduce_merge. 4561 continue; 4562 } 4563 JavaObjectNode* jobj = unique_java_object(n); 4564 if (jobj == nullptr || jobj == phantom_obj) { 4565 #ifdef ASSERT 4566 ptnode_adr(n->_idx)->dump(); 4567 assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); 4568 #endif 4569 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 4570 return; 4571 } else { 4572 Node *val = get_map(jobj->idx()); // CheckCastPP node 4573 TypeNode *tn = n->as_Type(); 4574 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 4575 assert(tinst != nullptr && tinst->is_known_instance() && 4576 tinst->instance_id() == jobj->idx() , "instance type expected."); 4577 4578 const Type *tn_type = igvn->type(tn); 4579 const TypeOopPtr *tn_t; 4580 if (tn_type->isa_narrowoop()) { 4581 tn_t = tn_type->make_ptr()->isa_oopptr(); 4582 } else { 4583 tn_t = tn_type->isa_oopptr(); 4584 } 4585 if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { 4586 if (tn_t->isa_aryptr()) { 4587 // Keep array properties (not flat/null-free) 4588 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr()); 4589 if (tinst == nullptr) { 4590 continue; // Skip dead path with inconsistent properties 4591 } 4592 } 4593 if (tn_type->isa_narrowoop()) { 4594 tn_type = tinst->make_narrowoop(); 4595 } else { 4596 tn_type = tinst; 4597 } 4598 igvn->hash_delete(tn); 4599 igvn->set_type(tn, tn_type); 4600 tn->set_type(tn_type); 4601 igvn->hash_insert(tn); 4602 record_for_optimizer(n); 4603 } else { 4604 assert(tn_type == TypePtr::NULL_PTR || 4605 (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)), 4606 "unexpected type"); 4607 continue; // Skip dead path with different type 4608 } 4609 } 4610 } else { 4611 debug_only(n->dump();) 4612 assert(false, "EA: unexpected node"); 4613 continue; 4614 } 4615 // push allocation's users on appropriate worklist 4616 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4617 Node *use = n->fast_out(i); 4618 if (use->is_Mem() && use->in(MemNode::Address) == n) { 4619 // Load/store to instance's field 4620 memnode_worklist.append_if_missing(use); 4621 } else if (use->is_MemBar()) { 4622 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4623 memnode_worklist.append_if_missing(use); 4624 } 4625 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 4626 Node* addp2 = find_second_addp(use, n); 4627 if (addp2 != nullptr) { 4628 alloc_worklist.append_if_missing(addp2); 4629 } 4630 alloc_worklist.append_if_missing(use); 4631 } else if (use->is_Phi() || 4632 use->is_CheckCastPP() || 4633 use->is_EncodeNarrowPtr() || 4634 use->is_DecodeNarrowPtr() || 4635 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 4636 alloc_worklist.append_if_missing(use); 4637 #ifdef ASSERT 4638 } else if (use->is_Mem()) { 4639 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 4640 } else if (use->is_MergeMem()) { 4641 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4642 } else if (use->is_SafePoint()) { 4643 // Look for MergeMem nodes for calls which reference unique allocation 4644 // (through CheckCastPP nodes) even for debug info. 4645 Node* m = use->in(TypeFunc::Memory); 4646 if (m->is_MergeMem()) { 4647 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4648 } 4649 } else if (use->Opcode() == Op_EncodeISOArray) { 4650 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4651 // EncodeISOArray overwrites destination array 4652 memnode_worklist.append_if_missing(use); 4653 } 4654 } else if (use->Opcode() == Op_Return) { 4655 // Allocation is referenced by field of returned inline type 4656 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode"); 4657 } else { 4658 uint op = use->Opcode(); 4659 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 4660 (use->in(MemNode::Memory) == n)) { 4661 // They overwrite memory edge corresponding to destination array, 4662 memnode_worklist.append_if_missing(use); 4663 } else if (!(op == Op_CmpP || op == Op_Conv2B || 4664 op == Op_CastP2X || 4665 op == Op_FastLock || op == Op_AryEq || 4666 op == Op_StrComp || op == Op_CountPositives || 4667 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 4668 op == Op_StrEquals || op == Op_VectorizedHashCode || 4669 op == Op_StrIndexOf || op == Op_StrIndexOfChar || 4670 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck || 4671 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 4672 n->dump(); 4673 use->dump(); 4674 assert(false, "EA: missing allocation reference path"); 4675 } 4676 #endif 4677 } 4678 } 4679 4680 } 4681 4682 #ifdef ASSERT 4683 if (VerifyReduceAllocationMerges) { 4684 for (uint i = 0; i < reducible_merges.size(); i++) { 4685 Node* phi = reducible_merges.at(i); 4686 4687 if (!reduced_merges.member(phi)) { 4688 phi->dump(2); 4689 phi->dump(-2); 4690 assert(false, "This reducible merge wasn't reduced."); 4691 } 4692 4693 // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts. 4694 for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) { 4695 Node* use = phi->fast_out(j); 4696 if (!use->is_SafePoint() && !use->is_CastPP()) { 4697 phi->dump(2); 4698 phi->dump(-2); 4699 assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt()); 4700 } 4701 } 4702 } 4703 } 4704 #endif 4705 4706 // Go over all ArrayCopy nodes and if one of the inputs has a unique 4707 // type, record it in the ArrayCopy node so we know what memory this 4708 // node uses/modified. 4709 for (int next = 0; next < arraycopy_worklist.length(); next++) { 4710 ArrayCopyNode* ac = arraycopy_worklist.at(next); 4711 Node* dest = ac->in(ArrayCopyNode::Dest); 4712 if (dest->is_AddP()) { 4713 dest = get_addp_base(dest); 4714 } 4715 JavaObjectNode* jobj = unique_java_object(dest); 4716 if (jobj != nullptr) { 4717 Node *base = get_map(jobj->idx()); 4718 if (base != nullptr) { 4719 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4720 ac->_dest_type = base_t; 4721 } 4722 } 4723 Node* src = ac->in(ArrayCopyNode::Src); 4724 if (src->is_AddP()) { 4725 src = get_addp_base(src); 4726 } 4727 jobj = unique_java_object(src); 4728 if (jobj != nullptr) { 4729 Node* base = get_map(jobj->idx()); 4730 if (base != nullptr) { 4731 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 4732 ac->_src_type = base_t; 4733 } 4734 } 4735 } 4736 4737 // New alias types were created in split_AddP(). 4738 uint new_index_end = (uint) _compile->num_alias_types(); 4739 4740 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 4741 // compute new values for Memory inputs (the Memory inputs are not 4742 // actually updated until phase 4.) 4743 if (memnode_worklist.length() == 0) 4744 return; // nothing to do 4745 while (memnode_worklist.length() != 0) { 4746 Node *n = memnode_worklist.pop(); 4747 if (visited.test_set(n->_idx)) { 4748 continue; 4749 } 4750 if (n->is_Phi() || n->is_ClearArray()) { 4751 // we don't need to do anything, but the users must be pushed 4752 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 4753 // we don't need to do anything, but the users must be pushed 4754 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 4755 if (n == nullptr) { 4756 continue; 4757 } 4758 } else if (n->is_CallLeaf()) { 4759 // Runtime calls with narrow memory input (no MergeMem node) 4760 // get the memory projection 4761 n = n->as_Call()->proj_out_or_null(TypeFunc::Memory); 4762 if (n == nullptr) { 4763 continue; 4764 } 4765 } else if (n->Opcode() == Op_StrCompressedCopy || 4766 n->Opcode() == Op_EncodeISOArray) { 4767 // get the memory projection 4768 n = n->find_out_with(Op_SCMemProj); 4769 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4770 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr && 4771 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4772 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); 4773 } else { 4774 assert(n->is_Mem(), "memory node required."); 4775 Node *addr = n->in(MemNode::Address); 4776 const Type *addr_t = igvn->type(addr); 4777 if (addr_t == Type::TOP) { 4778 continue; 4779 } 4780 assert (addr_t->isa_ptr() != nullptr, "pointer type required."); 4781 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 4782 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 4783 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 4784 if (_compile->failing()) { 4785 return; 4786 } 4787 if (mem != n->in(MemNode::Memory)) { 4788 // We delay the memory edge update since we need old one in 4789 // MergeMem code below when instances memory slices are separated. 4790 set_map(n, mem); 4791 } 4792 if (n->is_Load()) { 4793 continue; // don't push users 4794 } else if (n->is_LoadStore()) { 4795 // get the memory projection 4796 n = n->find_out_with(Op_SCMemProj); 4797 assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); 4798 } 4799 } 4800 // push user on appropriate worklist 4801 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 4802 Node *use = n->fast_out(i); 4803 if (use->is_Phi() || use->is_ClearArray()) { 4804 memnode_worklist.append_if_missing(use); 4805 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 4806 memnode_worklist.append_if_missing(use); 4807 } else if (use->is_MemBar() || use->is_CallLeaf()) { 4808 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 4809 memnode_worklist.append_if_missing(use); 4810 } 4811 #ifdef ASSERT 4812 } else if (use->is_Mem()) { 4813 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 4814 } else if (use->is_MergeMem()) { 4815 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 4816 } else if (use->Opcode() == Op_EncodeISOArray) { 4817 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 4818 // EncodeISOArray overwrites destination array 4819 memnode_worklist.append_if_missing(use); 4820 } 4821 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr && 4822 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 4823 // store_unknown_inline overwrites destination array 4824 memnode_worklist.append_if_missing(use); 4825 } else { 4826 uint op = use->Opcode(); 4827 if ((use->in(MemNode::Memory) == n) && 4828 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 4829 // They overwrite memory edge corresponding to destination array, 4830 memnode_worklist.append_if_missing(use); 4831 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 4832 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 4833 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode || 4834 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) { 4835 n->dump(); 4836 use->dump(); 4837 assert(false, "EA: missing memory path"); 4838 } 4839 #endif 4840 } 4841 } 4842 } 4843 4844 // Phase 3: Process MergeMem nodes from mergemem_worklist. 4845 // Walk each memory slice moving the first node encountered of each 4846 // instance type to the input corresponding to its alias index. 4847 uint length = mergemem_worklist.length(); 4848 for( uint next = 0; next < length; ++next ) { 4849 MergeMemNode* nmm = mergemem_worklist.at(next); 4850 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 4851 // Note: we don't want to use MergeMemStream here because we only want to 4852 // scan inputs which exist at the start, not ones we add during processing. 4853 // Note 2: MergeMem may already contains instance memory slices added 4854 // during find_inst_mem() call when memory nodes were processed above. 4855 igvn->hash_delete(nmm); 4856 uint nslices = MIN2(nmm->req(), new_index_start); 4857 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 4858 Node* mem = nmm->in(i); 4859 Node* cur = nullptr; 4860 if (mem == nullptr || mem->is_top()) { 4861 continue; 4862 } 4863 // First, update mergemem by moving memory nodes to corresponding slices 4864 // if their type became more precise since this mergemem was created. 4865 while (mem->is_Mem()) { 4866 const Type *at = igvn->type(mem->in(MemNode::Address)); 4867 if (at != Type::TOP) { 4868 assert (at->isa_ptr() != nullptr, "pointer type required."); 4869 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 4870 if (idx == i) { 4871 if (cur == nullptr) { 4872 cur = mem; 4873 } 4874 } else { 4875 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 4876 nmm->set_memory_at(idx, mem); 4877 } 4878 } 4879 } 4880 mem = mem->in(MemNode::Memory); 4881 } 4882 nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); 4883 // Find any instance of the current type if we haven't encountered 4884 // already a memory slice of the instance along the memory chain. 4885 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4886 if((uint)_compile->get_general_index(ni) == i) { 4887 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 4888 if (nmm->is_empty_memory(m)) { 4889 Node* result = find_inst_mem(mem, ni, orig_phis); 4890 if (_compile->failing()) { 4891 return; 4892 } 4893 nmm->set_memory_at(ni, result); 4894 } 4895 } 4896 } 4897 } 4898 // Find the rest of instances values 4899 for (uint ni = new_index_start; ni < new_index_end; ni++) { 4900 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 4901 Node* result = step_through_mergemem(nmm, ni, tinst); 4902 if (result == nmm->base_memory()) { 4903 // Didn't find instance memory, search through general slice recursively. 4904 result = nmm->memory_at(_compile->get_general_index(ni)); 4905 result = find_inst_mem(result, ni, orig_phis); 4906 if (_compile->failing()) { 4907 return; 4908 } 4909 nmm->set_memory_at(ni, result); 4910 } 4911 } 4912 4913 // If we have crossed the 3/4 point of max node limit it's too risky 4914 // to continue with EA/SR because we might hit the max node limit. 4915 if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) { 4916 if (_compile->do_reduce_allocation_merges()) { 4917 _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges()); 4918 } else if (_invocation > 0) { 4919 _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis()); 4920 } else { 4921 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 4922 } 4923 return; 4924 } 4925 4926 igvn->hash_insert(nmm); 4927 record_for_optimizer(nmm); 4928 } 4929 4930 // Phase 4: Update the inputs of non-instance memory Phis and 4931 // the Memory input of memnodes 4932 // First update the inputs of any non-instance Phi's from 4933 // which we split out an instance Phi. Note we don't have 4934 // to recursively process Phi's encountered on the input memory 4935 // chains as is done in split_memory_phi() since they will 4936 // also be processed here. 4937 for (int j = 0; j < orig_phis.length(); j++) { 4938 PhiNode *phi = orig_phis.at(j); 4939 int alias_idx = _compile->get_alias_index(phi->adr_type()); 4940 igvn->hash_delete(phi); 4941 for (uint i = 1; i < phi->req(); i++) { 4942 Node *mem = phi->in(i); 4943 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 4944 if (_compile->failing()) { 4945 return; 4946 } 4947 if (mem != new_mem) { 4948 phi->set_req(i, new_mem); 4949 } 4950 } 4951 igvn->hash_insert(phi); 4952 record_for_optimizer(phi); 4953 } 4954 4955 // Update the memory inputs of MemNodes with the value we computed 4956 // in Phase 2 and move stores memory users to corresponding memory slices. 4957 // Disable memory split verification code until the fix for 6984348. 4958 // Currently it produces false negative results since it does not cover all cases. 4959 #if 0 // ifdef ASSERT 4960 visited.Reset(); 4961 Node_Stack old_mems(arena, _compile->unique() >> 2); 4962 #endif 4963 for (uint i = 0; i < ideal_nodes.size(); i++) { 4964 Node* n = ideal_nodes.at(i); 4965 Node* nmem = get_map(n->_idx); 4966 assert(nmem != nullptr, "sanity"); 4967 if (n->is_Mem()) { 4968 #if 0 // ifdef ASSERT 4969 Node* old_mem = n->in(MemNode::Memory); 4970 if (!visited.test_set(old_mem->_idx)) { 4971 old_mems.push(old_mem, old_mem->outcnt()); 4972 } 4973 #endif 4974 assert(n->in(MemNode::Memory) != nmem, "sanity"); 4975 if (!n->is_Load()) { 4976 // Move memory users of a store first. 4977 move_inst_mem(n, orig_phis); 4978 } 4979 // Now update memory input 4980 igvn->hash_delete(n); 4981 n->set_req(MemNode::Memory, nmem); 4982 igvn->hash_insert(n); 4983 record_for_optimizer(n); 4984 } else { 4985 assert(n->is_Allocate() || n->is_CheckCastPP() || 4986 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 4987 } 4988 } 4989 #if 0 // ifdef ASSERT 4990 // Verify that memory was split correctly 4991 while (old_mems.is_nonempty()) { 4992 Node* old_mem = old_mems.node(); 4993 uint old_cnt = old_mems.index(); 4994 old_mems.pop(); 4995 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 4996 } 4997 #endif 4998 } 4999 5000 #ifndef PRODUCT 5001 int ConnectionGraph::_no_escape_counter = 0; 5002 int ConnectionGraph::_arg_escape_counter = 0; 5003 int ConnectionGraph::_global_escape_counter = 0; 5004 5005 static const char *node_type_names[] = { 5006 "UnknownType", 5007 "JavaObject", 5008 "LocalVar", 5009 "Field", 5010 "Arraycopy" 5011 }; 5012 5013 static const char *esc_names[] = { 5014 "UnknownEscape", 5015 "NoEscape", 5016 "ArgEscape", 5017 "GlobalEscape" 5018 }; 5019 5020 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 5021 NodeType nt = node_type(); 5022 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 5023 if (print_state) { 5024 EscapeState es = escape_state(); 5025 EscapeState fields_es = fields_escape_state(); 5026 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 5027 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 5028 out->print("NSR "); 5029 } 5030 } 5031 } 5032 5033 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 5034 dump_header(print_state, out); 5035 if (is_Field()) { 5036 FieldNode* f = (FieldNode*)this; 5037 if (f->is_oop()) { 5038 out->print("oop "); 5039 } 5040 if (f->offset() > 0) { 5041 out->print("+%d ", f->offset()); 5042 } 5043 out->print("("); 5044 for (BaseIterator i(f); i.has_next(); i.next()) { 5045 PointsToNode* b = i.get(); 5046 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 5047 } 5048 out->print(" )"); 5049 } 5050 out->print("["); 5051 for (EdgeIterator i(this); i.has_next(); i.next()) { 5052 PointsToNode* e = i.get(); 5053 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 5054 } 5055 out->print(" ["); 5056 for (UseIterator i(this); i.has_next(); i.next()) { 5057 PointsToNode* u = i.get(); 5058 bool is_base = false; 5059 if (PointsToNode::is_base_use(u)) { 5060 is_base = true; 5061 u = PointsToNode::get_use_node(u)->as_Field(); 5062 } 5063 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 5064 } 5065 out->print(" ]] "); 5066 if (_node == nullptr) { 5067 out->print("<null>%s", newline ? "\n" : ""); 5068 } else { 5069 _node->dump(newline ? "\n" : "", false, out); 5070 } 5071 } 5072 5073 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 5074 bool first = true; 5075 int ptnodes_length = ptnodes_worklist.length(); 5076 for (int i = 0; i < ptnodes_length; i++) { 5077 PointsToNode *ptn = ptnodes_worklist.at(i); 5078 if (ptn == nullptr || !ptn->is_JavaObject()) { 5079 continue; 5080 } 5081 PointsToNode::EscapeState es = ptn->escape_state(); 5082 if ((es != PointsToNode::NoEscape) && !Verbose) { 5083 continue; 5084 } 5085 Node* n = ptn->ideal_node(); 5086 if (n->is_Allocate() || (n->is_CallStaticJava() && 5087 n->as_CallStaticJava()->is_boxing_method())) { 5088 if (first) { 5089 tty->cr(); 5090 tty->print("======== Connection graph for "); 5091 _compile->method()->print_short_name(); 5092 tty->cr(); 5093 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 5094 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 5095 tty->cr(); 5096 first = false; 5097 } 5098 ptn->dump(); 5099 // Print all locals and fields which reference this allocation 5100 for (UseIterator j(ptn); j.has_next(); j.next()) { 5101 PointsToNode* use = j.get(); 5102 if (use->is_LocalVar()) { 5103 use->dump(Verbose); 5104 } else if (Verbose) { 5105 use->dump(); 5106 } 5107 } 5108 tty->cr(); 5109 } 5110 } 5111 } 5112 5113 void ConnectionGraph::print_statistics() { 5114 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 5115 } 5116 5117 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 5118 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 5119 return; 5120 } 5121 for (int next = 0; next < java_objects_worklist.length(); ++next) { 5122 JavaObjectNode* ptn = java_objects_worklist.at(next); 5123 if (ptn->ideal_node()->is_Allocate()) { 5124 if (ptn->escape_state() == PointsToNode::NoEscape) { 5125 Atomic::inc(&ConnectionGraph::_no_escape_counter); 5126 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 5127 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 5128 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 5129 Atomic::inc(&ConnectionGraph::_global_escape_counter); 5130 } else { 5131 assert(false, "Unexpected Escape State"); 5132 } 5133 } 5134 } 5135 } 5136 5137 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 5138 if (_compile->directive()->TraceEscapeAnalysisOption) { 5139 assert(ptn != nullptr, "should not be null"); 5140 assert(reason != nullptr, "should not be null"); 5141 ptn->dump_header(true); 5142 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 5143 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 5144 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 5145 } 5146 } 5147 5148 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 5149 if (_compile->directive()->TraceEscapeAnalysisOption) { 5150 stringStream ss; 5151 ss.print("propagated from: "); 5152 from->dump(true, &ss, false); 5153 return ss.as_string(); 5154 } else { 5155 return nullptr; 5156 } 5157 } 5158 5159 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 5160 if (_compile->directive()->TraceEscapeAnalysisOption) { 5161 stringStream ss; 5162 ss.print("escapes as arg to:"); 5163 call->dump("", false, &ss); 5164 return ss.as_string(); 5165 } else { 5166 return nullptr; 5167 } 5168 } 5169 5170 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 5171 if (_compile->directive()->TraceEscapeAnalysisOption) { 5172 stringStream ss; 5173 ss.print("is merged with other object: "); 5174 other->dump_header(true, &ss); 5175 return ss.as_string(); 5176 } else { 5177 return nullptr; 5178 } 5179 } 5180 5181 #endif 5182 5183 void ConnectionGraph::record_for_optimizer(Node *n) { 5184 _igvn->_worklist.push(n); 5185 _igvn->add_users_to_worklist(n); 5186 }