1 /* 2 * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/movenode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "utilities/macros.hpp" 43 44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 46 _in_worklist(C->comp_arena()), 47 _next_pidx(0), 48 _collecting(true), 49 _verify(false), 50 _compile(C), 51 _igvn(igvn), 52 _invocation(invocation), 53 _build_iterations(0), 54 _build_time(0.), 55 _node_map(C->comp_arena()) { 56 // Add unknown java object. 57 add_java_object(C->top(), PointsToNode::GlobalEscape); 58 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 59 // Add ConP(#NULL) and ConN(#NULL) nodes. 60 Node* oop_null = igvn->zerocon(T_OBJECT); 61 assert(oop_null->_idx < nodes_size(), "should be created already"); 62 add_java_object(oop_null, PointsToNode::NoEscape); 63 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 64 if (UseCompressedOops) { 65 Node* noop_null = igvn->zerocon(T_NARROWOOP); 66 assert(noop_null->_idx < nodes_size(), "should be created already"); 67 map_ideal_node(noop_null, null_obj); 68 } 69 } 70 71 bool ConnectionGraph::has_candidates(Compile *C) { 72 // EA brings benefits only when the code has allocations and/or locks which 73 // are represented by ideal Macro nodes. 74 int cnt = C->macro_count(); 75 for (int i = 0; i < cnt; i++) { 76 Node *n = C->macro_node(i); 77 if (n->is_Allocate()) { 78 return true; 79 } 80 if (n->is_Lock()) { 81 Node* obj = n->as_Lock()->obj_node()->uncast(); 82 if (!(obj->is_Parm() || obj->is_Con())) { 83 return true; 84 } 85 } 86 if (n->is_CallStaticJava() && 87 n->as_CallStaticJava()->is_boxing_method()) { 88 return true; 89 } 90 } 91 return false; 92 } 93 94 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 95 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 96 ResourceMark rm; 97 98 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 99 // to create space for them in ConnectionGraph::_nodes[]. 100 Node* oop_null = igvn->zerocon(T_OBJECT); 101 Node* noop_null = igvn->zerocon(T_NARROWOOP); 102 int invocation = 0; 103 if (C->congraph() != NULL) { 104 invocation = C->congraph()->_invocation + 1; 105 } 106 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 107 // Perform escape analysis 108 if (congraph->compute_escape()) { 109 // There are non escaping objects. 110 C->set_congraph(congraph); 111 } 112 // Cleanup. 113 if (oop_null->outcnt() == 0) { 114 igvn->hash_delete(oop_null); 115 } 116 if (noop_null->outcnt() == 0) { 117 igvn->hash_delete(noop_null); 118 } 119 } 120 121 bool ConnectionGraph::compute_escape() { 122 Compile* C = _compile; 123 PhaseGVN* igvn = _igvn; 124 125 // Worklists used by EA. 126 Unique_Node_List delayed_worklist; 127 GrowableArray<Node*> alloc_worklist; 128 GrowableArray<Node*> ptr_cmp_worklist; 129 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 130 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 131 GrowableArray<PointsToNode*> ptnodes_worklist; 132 GrowableArray<JavaObjectNode*> java_objects_worklist; 133 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 134 GrowableArray<FieldNode*> oop_fields_worklist; 135 GrowableArray<SafePointNode*> sfn_worklist; 136 GrowableArray<MergeMemNode*> mergemem_worklist; 137 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 138 139 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 140 141 // 1. Populate Connection Graph (CG) with PointsTo nodes. 142 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 143 // Initialize worklist 144 if (C->root() != NULL) { 145 ideal_nodes.push(C->root()); 146 } 147 // Processed ideal nodes are unique on ideal_nodes list 148 // but several ideal nodes are mapped to the phantom_obj. 149 // To avoid duplicated entries on the following worklists 150 // add the phantom_obj only once to them. 151 ptnodes_worklist.append(phantom_obj); 152 java_objects_worklist.append(phantom_obj); 153 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 154 Node* n = ideal_nodes.at(next); 155 // Create PointsTo nodes and add them to Connection Graph. Called 156 // only once per ideal node since ideal_nodes is Unique_Node list. 157 add_node_to_connection_graph(n, &delayed_worklist); 158 PointsToNode* ptn = ptnode_adr(n->_idx); 159 if (ptn != NULL && ptn != phantom_obj) { 160 ptnodes_worklist.append(ptn); 161 if (ptn->is_JavaObject()) { 162 java_objects_worklist.append(ptn->as_JavaObject()); 163 if ((n->is_Allocate() || n->is_CallStaticJava()) && 164 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 165 // Only allocations and java static calls results are interesting. 166 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 167 } 168 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 169 oop_fields_worklist.append(ptn->as_Field()); 170 } 171 } 172 // Collect some interesting nodes for further use. 173 switch (n->Opcode()) { 174 case Op_MergeMem: 175 // Collect all MergeMem nodes to add memory slices for 176 // scalar replaceable objects in split_unique_types(). 177 mergemem_worklist.append(n->as_MergeMem()); 178 break; 179 case Op_CmpP: 180 case Op_CmpN: 181 // Collect compare pointers nodes. 182 if (OptimizePtrCompare) { 183 ptr_cmp_worklist.append(n); 184 } 185 break; 186 case Op_MemBarStoreStore: 187 // Collect all MemBarStoreStore nodes so that depending on the 188 // escape status of the associated Allocate node some of them 189 // may be eliminated. 190 storestore_worklist.append(n->as_MemBarStoreStore()); 191 break; 192 case Op_MemBarRelease: 193 if (n->req() > MemBarNode::Precedent) { 194 record_for_optimizer(n); 195 } 196 break; 197 #ifdef ASSERT 198 case Op_AddP: 199 // Collect address nodes for graph verification. 200 addp_worklist.append(n); 201 break; 202 #endif 203 case Op_ArrayCopy: 204 // Keep a list of ArrayCopy nodes so if one of its input is non 205 // escaping, we can record a unique type 206 arraycopy_worklist.append(n->as_ArrayCopy()); 207 break; 208 default: 209 // not interested now, ignore... 210 break; 211 } 212 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 213 Node* m = n->fast_out(i); // Get user 214 ideal_nodes.push(m); 215 } 216 if (n->is_SafePoint()) { 217 sfn_worklist.append(n->as_SafePoint()); 218 } 219 } 220 221 #ifndef PRODUCT 222 if (_compile->directive()->TraceEscapeAnalysisOption) { 223 tty->print("+++++ Initial worklist for "); 224 _compile->method()->print_name(); 225 tty->print_cr(" (ea_inv=%d)", _invocation); 226 for (int i = 0; i < ptnodes_worklist.length(); i++) { 227 PointsToNode* ptn = ptnodes_worklist.at(i); 228 ptn->dump(); 229 } 230 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 231 } 232 #endif 233 234 if (non_escaped_allocs_worklist.length() == 0) { 235 _collecting = false; 236 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 237 return false; // Nothing to do. 238 } 239 // Add final simple edges to graph. 240 while(delayed_worklist.size() > 0) { 241 Node* n = delayed_worklist.pop(); 242 add_final_edges(n); 243 } 244 245 #ifdef ASSERT 246 if (VerifyConnectionGraph) { 247 // Verify that no new simple edges could be created and all 248 // local vars has edges. 249 _verify = true; 250 int ptnodes_length = ptnodes_worklist.length(); 251 for (int next = 0; next < ptnodes_length; ++next) { 252 PointsToNode* ptn = ptnodes_worklist.at(next); 253 add_final_edges(ptn->ideal_node()); 254 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 255 ptn->dump(); 256 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 257 } 258 } 259 _verify = false; 260 } 261 #endif 262 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 263 // processing, calls to CI to resolve symbols (types, fields, methods) 264 // referenced in bytecode. During symbol resolution VM may throw 265 // an exception which CI cleans and converts to compilation failure. 266 if (C->failing()) { 267 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 268 return false; 269 } 270 271 // 2. Finish Graph construction by propagating references to all 272 // java objects through graph. 273 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 274 java_objects_worklist, oop_fields_worklist)) { 275 // All objects escaped or hit time or iterations limits. 276 _collecting = false; 277 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 278 return false; 279 } 280 281 // 3. Adjust scalar_replaceable state of nonescaping objects and push 282 // scalar replaceable allocations on alloc_worklist for processing 283 // in split_unique_types(). 284 GrowableArray<JavaObjectNode*> jobj_worklist; 285 int non_escaped_length = non_escaped_allocs_worklist.length(); 286 bool found_nsr_alloc = false; 287 for (int next = 0; next < non_escaped_length; next++) { 288 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 289 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 290 Node* n = ptn->ideal_node(); 291 if (n->is_Allocate()) { 292 n->as_Allocate()->_is_non_escaping = noescape; 293 } 294 if (noescape && ptn->scalar_replaceable()) { 295 adjust_scalar_replaceable_state(ptn); 296 if (ptn->scalar_replaceable()) { 297 jobj_worklist.push(ptn); 298 } else { 299 found_nsr_alloc = true; 300 } 301 } 302 } 303 304 // Propagate NSR (Not Scalar Replaceable) state. 305 if (found_nsr_alloc) { 306 find_scalar_replaceable_allocs(jobj_worklist); 307 } 308 309 for (int next = 0; next < jobj_worklist.length(); ++next) { 310 JavaObjectNode* jobj = jobj_worklist.at(next); 311 if (jobj->scalar_replaceable()) { 312 alloc_worklist.append(jobj->ideal_node()); 313 } 314 } 315 316 #ifdef ASSERT 317 if (VerifyConnectionGraph) { 318 // Verify that graph is complete - no new edges could be added or needed. 319 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 320 java_objects_worklist, addp_worklist); 321 } 322 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 323 assert(null_obj->escape_state() == PointsToNode::NoEscape && 324 null_obj->edge_count() == 0 && 325 !null_obj->arraycopy_src() && 326 !null_obj->arraycopy_dst(), "sanity"); 327 #endif 328 329 _collecting = false; 330 331 } // TracePhase t3("connectionGraph") 332 333 // 4. Optimize ideal graph based on EA information. 334 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 335 if (has_non_escaping_obj) { 336 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 337 } 338 339 #ifndef PRODUCT 340 if (PrintEscapeAnalysis) { 341 dump(ptnodes_worklist); // Dump ConnectionGraph 342 } 343 #endif 344 345 #ifdef ASSERT 346 if (VerifyConnectionGraph) { 347 int alloc_length = alloc_worklist.length(); 348 for (int next = 0; next < alloc_length; ++next) { 349 Node* n = alloc_worklist.at(next); 350 PointsToNode* ptn = ptnode_adr(n->_idx); 351 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 352 } 353 } 354 #endif 355 356 // 5. Separate memory graph for scalar replaceable allcations. 357 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 358 if (has_scalar_replaceable_candidates && EliminateAllocations) { 359 assert(C->do_aliasing(), "Aliasing should be enabled"); 360 // Now use the escape information to create unique types for 361 // scalar replaceable objects. 362 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist); 363 if (C->failing()) { 364 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 365 return false; 366 } 367 C->print_method(PHASE_AFTER_EA, 2); 368 369 #ifdef ASSERT 370 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 371 tty->print("=== No allocations eliminated for "); 372 C->method()->print_short_name(); 373 if (!EliminateAllocations) { 374 tty->print(" since EliminateAllocations is off ==="); 375 } else if(!has_scalar_replaceable_candidates) { 376 tty->print(" since there are no scalar replaceable candidates ==="); 377 } 378 tty->cr(); 379 #endif 380 } 381 382 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 383 // java calls if they pass ArgEscape objects as parameters. 384 if (has_non_escaping_obj && 385 (C->env()->should_retain_local_variables() || 386 C->env()->jvmti_can_get_owned_monitor_info() || 387 C->env()->jvmti_can_walk_any_space() || 388 DeoptimizeObjectsALot)) { 389 int sfn_length = sfn_worklist.length(); 390 for (int next = 0; next < sfn_length; next++) { 391 SafePointNode* sfn = sfn_worklist.at(next); 392 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 393 if (sfn->is_CallJava()) { 394 CallJavaNode* call = sfn->as_CallJava(); 395 call->set_arg_escape(has_arg_escape(call)); 396 } 397 } 398 } 399 400 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 401 return has_non_escaping_obj; 402 } 403 404 // Returns true if there is an object in the scope of sfn that does not escape globally. 405 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 406 Compile* C = _compile; 407 for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) { 408 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 409 DeoptimizeObjectsALot) { 410 // Jvmti agents can access locals. Must provide info about local objects at runtime. 411 int num_locs = jvms->loc_size(); 412 for (int idx = 0; idx < num_locs; idx++) { 413 Node* l = sfn->local(jvms, idx); 414 if (not_global_escape(l)) { 415 return true; 416 } 417 } 418 } 419 if (C->env()->jvmti_can_get_owned_monitor_info() || 420 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 421 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 422 int num_mon = jvms->nof_monitors(); 423 for (int idx = 0; idx < num_mon; idx++) { 424 Node* m = sfn->monitor_obj(jvms, idx); 425 if (m != NULL && not_global_escape(m)) { 426 return true; 427 } 428 } 429 } 430 } 431 return false; 432 } 433 434 // Returns true if at least one of the arguments to the call is an object 435 // that does not escape globally. 436 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 437 if (call->method() != NULL) { 438 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 439 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 440 Node* p = call->in(idx); 441 if (not_global_escape(p)) { 442 return true; 443 } 444 } 445 } else { 446 const char* name = call->as_CallStaticJava()->_name; 447 assert(name != NULL, "no name"); 448 // no arg escapes through uncommon traps 449 if (strcmp(name, "uncommon_trap") != 0) { 450 // process_call_arguments() assumes that all arguments escape globally 451 const TypeTuple* d = call->tf()->domain(); 452 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 453 const Type* at = d->field_at(i); 454 if (at->isa_oopptr() != NULL) { 455 return true; 456 } 457 } 458 } 459 } 460 return false; 461 } 462 463 464 465 // Utility function for nodes that load an object 466 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 467 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 468 // ThreadLocal has RawPtr type. 469 const Type* t = _igvn->type(n); 470 if (t->make_ptr() != NULL) { 471 Node* adr = n->in(MemNode::Address); 472 #ifdef ASSERT 473 if (!adr->is_AddP()) { 474 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 475 } else { 476 assert((ptnode_adr(adr->_idx) == NULL || 477 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 478 } 479 #endif 480 add_local_var_and_edge(n, PointsToNode::NoEscape, 481 adr, delayed_worklist); 482 } 483 } 484 485 // Populate Connection Graph with PointsTo nodes and create simple 486 // connection graph edges. 487 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 488 assert(!_verify, "this method should not be called for verification"); 489 PhaseGVN* igvn = _igvn; 490 uint n_idx = n->_idx; 491 PointsToNode* n_ptn = ptnode_adr(n_idx); 492 if (n_ptn != NULL) { 493 return; // No need to redefine PointsTo node during first iteration. 494 } 495 int opcode = n->Opcode(); 496 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 497 if (gc_handled) { 498 return; // Ignore node if already handled by GC. 499 } 500 501 if (n->is_Call()) { 502 // Arguments to allocation and locking don't escape. 503 if (n->is_AbstractLock()) { 504 // Put Lock and Unlock nodes on IGVN worklist to process them during 505 // first IGVN optimization when escape information is still available. 506 record_for_optimizer(n); 507 } else if (n->is_Allocate()) { 508 add_call_node(n->as_Call()); 509 record_for_optimizer(n); 510 } else { 511 if (n->is_CallStaticJava()) { 512 const char* name = n->as_CallStaticJava()->_name; 513 if (name != NULL && strcmp(name, "uncommon_trap") == 0) { 514 return; // Skip uncommon traps 515 } 516 } 517 // Don't mark as processed since call's arguments have to be processed. 518 delayed_worklist->push(n); 519 // Check if a call returns an object. 520 if ((n->as_Call()->returns_pointer() && 521 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 522 (n->is_CallStaticJava() && 523 n->as_CallStaticJava()->is_boxing_method())) { 524 add_call_node(n->as_Call()); 525 } 526 } 527 return; 528 } 529 // Put this check here to process call arguments since some call nodes 530 // point to phantom_obj. 531 if (n_ptn == phantom_obj || n_ptn == null_obj) { 532 return; // Skip predefined nodes. 533 } 534 switch (opcode) { 535 case Op_AddP: { 536 Node* base = get_addp_base(n); 537 PointsToNode* ptn_base = ptnode_adr(base->_idx); 538 // Field nodes are created for all field types. They are used in 539 // adjust_scalar_replaceable_state() and split_unique_types(). 540 // Note, non-oop fields will have only base edges in Connection 541 // Graph because such fields are not used for oop loads and stores. 542 int offset = address_offset(n, igvn); 543 add_field(n, PointsToNode::NoEscape, offset); 544 if (ptn_base == NULL) { 545 delayed_worklist->push(n); // Process it later. 546 } else { 547 n_ptn = ptnode_adr(n_idx); 548 add_base(n_ptn->as_Field(), ptn_base); 549 } 550 break; 551 } 552 case Op_CastX2P: { 553 map_ideal_node(n, phantom_obj); 554 break; 555 } 556 case Op_CastPP: 557 case Op_CheckCastPP: 558 case Op_EncodeP: 559 case Op_DecodeN: 560 case Op_EncodePKlass: 561 case Op_DecodeNKlass: { 562 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 563 break; 564 } 565 case Op_CMoveP: { 566 add_local_var(n, PointsToNode::NoEscape); 567 // Do not add edges during first iteration because some could be 568 // not defined yet. 569 delayed_worklist->push(n); 570 break; 571 } 572 case Op_ConP: 573 case Op_ConN: 574 case Op_ConNKlass: { 575 // assume all oop constants globally escape except for null 576 PointsToNode::EscapeState es; 577 const Type* t = igvn->type(n); 578 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 579 es = PointsToNode::NoEscape; 580 } else { 581 es = PointsToNode::GlobalEscape; 582 } 583 add_java_object(n, es); 584 break; 585 } 586 case Op_CreateEx: { 587 // assume that all exception objects globally escape 588 map_ideal_node(n, phantom_obj); 589 break; 590 } 591 case Op_LoadKlass: 592 case Op_LoadNKlass: { 593 // Unknown class is loaded 594 map_ideal_node(n, phantom_obj); 595 break; 596 } 597 case Op_LoadP: 598 case Op_LoadN: { 599 add_objload_to_connection_graph(n, delayed_worklist); 600 break; 601 } 602 case Op_Parm: { 603 map_ideal_node(n, phantom_obj); 604 break; 605 } 606 case Op_PartialSubtypeCheck: { 607 // Produces Null or notNull and is used in only in CmpP so 608 // phantom_obj could be used. 609 map_ideal_node(n, phantom_obj); // Result is unknown 610 break; 611 } 612 case Op_Phi: { 613 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 614 // ThreadLocal has RawPtr type. 615 const Type* t = n->as_Phi()->type(); 616 if (t->make_ptr() != NULL) { 617 add_local_var(n, PointsToNode::NoEscape); 618 // Do not add edges during first iteration because some could be 619 // not defined yet. 620 delayed_worklist->push(n); 621 } 622 break; 623 } 624 case Op_Proj: { 625 // we are only interested in the oop result projection from a call 626 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 627 n->in(0)->as_Call()->returns_pointer()) { 628 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 629 } 630 break; 631 } 632 case Op_Rethrow: // Exception object escapes 633 case Op_Return: { 634 if (n->req() > TypeFunc::Parms && 635 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 636 // Treat Return value as LocalVar with GlobalEscape escape state. 637 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 638 } 639 break; 640 } 641 case Op_CompareAndExchangeP: 642 case Op_CompareAndExchangeN: 643 case Op_GetAndSetP: 644 case Op_GetAndSetN: { 645 add_objload_to_connection_graph(n, delayed_worklist); 646 // fall-through 647 } 648 case Op_StoreP: 649 case Op_StoreN: 650 case Op_StoreNKlass: 651 case Op_WeakCompareAndSwapP: 652 case Op_WeakCompareAndSwapN: 653 case Op_CompareAndSwapP: 654 case Op_CompareAndSwapN: { 655 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 656 break; 657 } 658 case Op_AryEq: 659 case Op_CountPositives: 660 case Op_StrComp: 661 case Op_StrEquals: 662 case Op_StrIndexOf: 663 case Op_StrIndexOfChar: 664 case Op_StrInflatedCopy: 665 case Op_StrCompressedCopy: 666 case Op_EncodeISOArray: { 667 add_local_var(n, PointsToNode::ArgEscape); 668 delayed_worklist->push(n); // Process it later. 669 break; 670 } 671 case Op_ThreadLocal: { 672 add_java_object(n, PointsToNode::ArgEscape); 673 break; 674 } 675 case Op_Blackhole: { 676 // All blackhole pointer arguments are globally escaping. 677 // Only do this if there is at least one pointer argument. 678 // Do not add edges during first iteration because some could be 679 // not defined yet, defer to final step. 680 for (uint i = 0; i < n->req(); i++) { 681 Node* in = n->in(i); 682 if (in != nullptr) { 683 const Type* at = _igvn->type(in); 684 if (!at->isa_ptr()) continue; 685 686 add_local_var(n, PointsToNode::GlobalEscape); 687 delayed_worklist->push(n); 688 break; 689 } 690 } 691 break; 692 } 693 default: 694 ; // Do nothing for nodes not related to EA. 695 } 696 return; 697 } 698 699 // Add final simple edges to graph. 700 void ConnectionGraph::add_final_edges(Node *n) { 701 PointsToNode* n_ptn = ptnode_adr(n->_idx); 702 #ifdef ASSERT 703 if (_verify && n_ptn->is_JavaObject()) 704 return; // This method does not change graph for JavaObject. 705 #endif 706 707 if (n->is_Call()) { 708 process_call_arguments(n->as_Call()); 709 return; 710 } 711 assert(n->is_Store() || n->is_LoadStore() || 712 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 713 "node should be registered already"); 714 int opcode = n->Opcode(); 715 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 716 if (gc_handled) { 717 return; // Ignore node if already handled by GC. 718 } 719 switch (opcode) { 720 case Op_AddP: { 721 Node* base = get_addp_base(n); 722 PointsToNode* ptn_base = ptnode_adr(base->_idx); 723 assert(ptn_base != NULL, "field's base should be registered"); 724 add_base(n_ptn->as_Field(), ptn_base); 725 break; 726 } 727 case Op_CastPP: 728 case Op_CheckCastPP: 729 case Op_EncodeP: 730 case Op_DecodeN: 731 case Op_EncodePKlass: 732 case Op_DecodeNKlass: { 733 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); 734 break; 735 } 736 case Op_CMoveP: { 737 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 738 Node* in = n->in(i); 739 if (in == NULL) { 740 continue; // ignore NULL 741 } 742 Node* uncast_in = in->uncast(); 743 if (uncast_in->is_top() || uncast_in == n) { 744 continue; // ignore top or inputs which go back this node 745 } 746 PointsToNode* ptn = ptnode_adr(in->_idx); 747 assert(ptn != NULL, "node should be registered"); 748 add_edge(n_ptn, ptn); 749 } 750 break; 751 } 752 case Op_LoadP: 753 case Op_LoadN: { 754 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 755 // ThreadLocal has RawPtr type. 756 assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); 757 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); 758 break; 759 } 760 case Op_Phi: { 761 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 762 // ThreadLocal has RawPtr type. 763 assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type"); 764 for (uint i = 1; i < n->req(); i++) { 765 Node* in = n->in(i); 766 if (in == NULL) { 767 continue; // ignore NULL 768 } 769 Node* uncast_in = in->uncast(); 770 if (uncast_in->is_top() || uncast_in == n) { 771 continue; // ignore top or inputs which go back this node 772 } 773 PointsToNode* ptn = ptnode_adr(in->_idx); 774 assert(ptn != NULL, "node should be registered"); 775 add_edge(n_ptn, ptn); 776 } 777 break; 778 } 779 case Op_Proj: { 780 // we are only interested in the oop result projection from a call 781 assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 782 n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); 783 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 784 break; 785 } 786 case Op_Rethrow: // Exception object escapes 787 case Op_Return: { 788 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 789 "Unexpected node type"); 790 // Treat Return value as LocalVar with GlobalEscape escape state. 791 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL); 792 break; 793 } 794 case Op_CompareAndExchangeP: 795 case Op_CompareAndExchangeN: 796 case Op_GetAndSetP: 797 case Op_GetAndSetN:{ 798 assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); 799 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); 800 // fall-through 801 } 802 case Op_CompareAndSwapP: 803 case Op_CompareAndSwapN: 804 case Op_WeakCompareAndSwapP: 805 case Op_WeakCompareAndSwapN: 806 case Op_StoreP: 807 case Op_StoreN: 808 case Op_StoreNKlass:{ 809 add_final_edges_unsafe_access(n, opcode); 810 break; 811 } 812 case Op_AryEq: 813 case Op_CountPositives: 814 case Op_StrComp: 815 case Op_StrEquals: 816 case Op_StrIndexOf: 817 case Op_StrIndexOfChar: 818 case Op_StrInflatedCopy: 819 case Op_StrCompressedCopy: 820 case Op_EncodeISOArray: { 821 // char[]/byte[] arrays passed to string intrinsic do not escape but 822 // they are not scalar replaceable. Adjust escape state for them. 823 // Start from in(2) edge since in(1) is memory edge. 824 for (uint i = 2; i < n->req(); i++) { 825 Node* adr = n->in(i); 826 const Type* at = _igvn->type(adr); 827 if (!adr->is_top() && at->isa_ptr()) { 828 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 829 at->isa_ptr() != NULL, "expecting a pointer"); 830 if (adr->is_AddP()) { 831 adr = get_addp_base(adr); 832 } 833 PointsToNode* ptn = ptnode_adr(adr->_idx); 834 assert(ptn != NULL, "node should be registered"); 835 add_edge(n_ptn, ptn); 836 } 837 } 838 break; 839 } 840 case Op_Blackhole: { 841 // All blackhole pointer arguments are globally escaping. 842 for (uint i = 0; i < n->req(); i++) { 843 Node* in = n->in(i); 844 if (in != nullptr) { 845 const Type* at = _igvn->type(in); 846 if (!at->isa_ptr()) continue; 847 848 if (in->is_AddP()) { 849 in = get_addp_base(in); 850 } 851 852 PointsToNode* ptn = ptnode_adr(in->_idx); 853 assert(ptn != nullptr, "should be defined already"); 854 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 855 add_edge(n_ptn, ptn); 856 } 857 } 858 break; 859 } 860 default: { 861 // This method should be called only for EA specific nodes which may 862 // miss some edges when they were created. 863 #ifdef ASSERT 864 n->dump(1); 865 #endif 866 guarantee(false, "unknown node"); 867 } 868 } 869 return; 870 } 871 872 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 873 Node* adr = n->in(MemNode::Address); 874 const Type* adr_type = _igvn->type(adr); 875 adr_type = adr_type->make_ptr(); 876 if (adr_type == NULL) { 877 return; // skip dead nodes 878 } 879 if (adr_type->isa_oopptr() 880 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 881 && adr_type == TypeRawPtr::NOTNULL 882 && is_captured_store_address(adr))) { 883 delayed_worklist->push(n); // Process it later. 884 #ifdef ASSERT 885 assert (adr->is_AddP(), "expecting an AddP"); 886 if (adr_type == TypeRawPtr::NOTNULL) { 887 // Verify a raw address for a store captured by Initialize node. 888 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 889 assert(offs != Type::OffsetBot, "offset must be a constant"); 890 } 891 #endif 892 } else { 893 // Ignore copy the displaced header to the BoxNode (OSR compilation). 894 if (adr->is_BoxLock()) { 895 return; 896 } 897 // Stored value escapes in unsafe access. 898 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 899 delayed_worklist->push(n); // Process unsafe access later. 900 return; 901 } 902 #ifdef ASSERT 903 n->dump(1); 904 assert(false, "not unsafe"); 905 #endif 906 } 907 } 908 909 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 910 Node* adr = n->in(MemNode::Address); 911 const Type *adr_type = _igvn->type(adr); 912 adr_type = adr_type->make_ptr(); 913 #ifdef ASSERT 914 if (adr_type == NULL) { 915 n->dump(1); 916 assert(adr_type != NULL, "dead node should not be on list"); 917 return true; 918 } 919 #endif 920 921 if (adr_type->isa_oopptr() 922 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 923 && adr_type == TypeRawPtr::NOTNULL 924 && is_captured_store_address(adr))) { 925 // Point Address to Value 926 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 927 assert(adr_ptn != NULL && 928 adr_ptn->as_Field()->is_oop(), "node should be registered"); 929 Node* val = n->in(MemNode::ValueIn); 930 PointsToNode* ptn = ptnode_adr(val->_idx); 931 assert(ptn != NULL, "node should be registered"); 932 add_edge(adr_ptn, ptn); 933 return true; 934 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 935 // Stored value escapes in unsafe access. 936 Node* val = n->in(MemNode::ValueIn); 937 PointsToNode* ptn = ptnode_adr(val->_idx); 938 assert(ptn != NULL, "node should be registered"); 939 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 940 // Add edge to object for unsafe access with offset. 941 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 942 assert(adr_ptn != NULL, "node should be registered"); 943 if (adr_ptn->is_Field()) { 944 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 945 add_edge(adr_ptn, ptn); 946 } 947 return true; 948 } 949 #ifdef ASSERT 950 n->dump(1); 951 assert(false, "not unsafe"); 952 #endif 953 return false; 954 } 955 956 void ConnectionGraph::add_call_node(CallNode* call) { 957 assert(call->returns_pointer(), "only for call which returns pointer"); 958 uint call_idx = call->_idx; 959 if (call->is_Allocate()) { 960 Node* k = call->in(AllocateNode::KlassNode); 961 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 962 assert(kt != NULL, "TypeKlassPtr required."); 963 PointsToNode::EscapeState es = PointsToNode::NoEscape; 964 bool scalar_replaceable = true; 965 NOT_PRODUCT(const char* nsr_reason = ""); 966 if (call->is_AllocateArray()) { 967 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 968 es = PointsToNode::GlobalEscape; 969 } else { 970 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 971 if (length < 0) { 972 // Not scalar replaceable if the length is not constant. 973 scalar_replaceable = false; 974 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 975 } else if (length > EliminateAllocationArraySizeLimit) { 976 // Not scalar replaceable if the length is too big. 977 scalar_replaceable = false; 978 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 979 } 980 } 981 } else { // Allocate instance 982 if (!kt->isa_instklassptr()) { // StressReflectiveCode 983 es = PointsToNode::GlobalEscape; 984 } else { 985 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 986 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 987 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 988 ik->is_subclass_of(_compile->env()->Reference_klass()) || 989 !ik->can_be_instantiated() || 990 ik->has_finalizer()) { 991 es = PointsToNode::GlobalEscape; 992 } else { 993 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 994 if (nfields > EliminateAllocationFieldsLimit) { 995 // Not scalar replaceable if there are too many fields. 996 scalar_replaceable = false; 997 NOT_PRODUCT(nsr_reason = "has too many fields"); 998 } 999 } 1000 } 1001 } 1002 add_java_object(call, es); 1003 PointsToNode* ptn = ptnode_adr(call_idx); 1004 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1005 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1006 } 1007 } else if (call->is_CallStaticJava()) { 1008 // Call nodes could be different types: 1009 // 1010 // 1. CallDynamicJavaNode (what happened during call is unknown): 1011 // 1012 // - mapped to GlobalEscape JavaObject node if oop is returned; 1013 // 1014 // - all oop arguments are escaping globally; 1015 // 1016 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1017 // 1018 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1019 // 1020 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1021 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1022 // during call is returned; 1023 // - mapped to ArgEscape LocalVar node pointed to object arguments 1024 // which are returned and does not escape during call; 1025 // 1026 // - oop arguments escaping status is defined by bytecode analysis; 1027 // 1028 // For a static call, we know exactly what method is being called. 1029 // Use bytecode estimator to record whether the call's return value escapes. 1030 ciMethod* meth = call->as_CallJava()->method(); 1031 if (meth == NULL) { 1032 const char* name = call->as_CallStaticJava()->_name; 1033 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 1034 // Returns a newly allocated non-escaped object. 1035 add_java_object(call, PointsToNode::NoEscape); 1036 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1037 } else if (meth->is_boxing_method()) { 1038 // Returns boxing object 1039 PointsToNode::EscapeState es; 1040 vmIntrinsics::ID intr = meth->intrinsic_id(); 1041 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1042 // It does not escape if object is always allocated. 1043 es = PointsToNode::NoEscape; 1044 } else { 1045 // It escapes globally if object could be loaded from cache. 1046 es = PointsToNode::GlobalEscape; 1047 } 1048 add_java_object(call, es); 1049 } else { 1050 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1051 call_analyzer->copy_dependencies(_compile->dependencies()); 1052 if (call_analyzer->is_return_allocated()) { 1053 // Returns a newly allocated non-escaped object, simply 1054 // update dependency information. 1055 // Mark it as NoEscape so that objects referenced by 1056 // it's fields will be marked as NoEscape at least. 1057 add_java_object(call, PointsToNode::NoEscape); 1058 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1059 } else { 1060 // Determine whether any arguments are returned. 1061 const TypeTuple* d = call->tf()->domain(); 1062 bool ret_arg = false; 1063 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1064 if (d->field_at(i)->isa_ptr() != NULL && 1065 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1066 ret_arg = true; 1067 break; 1068 } 1069 } 1070 if (ret_arg) { 1071 add_local_var(call, PointsToNode::ArgEscape); 1072 } else { 1073 // Returns unknown object. 1074 map_ideal_node(call, phantom_obj); 1075 } 1076 } 1077 } 1078 } else { 1079 // An other type of call, assume the worst case: 1080 // returned value is unknown and globally escapes. 1081 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1082 map_ideal_node(call, phantom_obj); 1083 } 1084 } 1085 1086 void ConnectionGraph::process_call_arguments(CallNode *call) { 1087 bool is_arraycopy = false; 1088 switch (call->Opcode()) { 1089 #ifdef ASSERT 1090 case Op_Allocate: 1091 case Op_AllocateArray: 1092 case Op_Lock: 1093 case Op_Unlock: 1094 assert(false, "should be done already"); 1095 break; 1096 #endif 1097 case Op_ArrayCopy: 1098 case Op_CallLeafNoFP: 1099 // Most array copies are ArrayCopy nodes at this point but there 1100 // are still a few direct calls to the copy subroutines (See 1101 // PhaseStringOpts::copy_string()) 1102 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1103 call->as_CallLeaf()->is_call_to_arraycopystub(); 1104 // fall through 1105 case Op_CallLeafVector: 1106 case Op_CallLeaf: { 1107 // Stub calls, objects do not escape but they are not scale replaceable. 1108 // Adjust escape state for outgoing arguments. 1109 const TypeTuple * d = call->tf()->domain(); 1110 bool src_has_oops = false; 1111 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1112 const Type* at = d->field_at(i); 1113 Node *arg = call->in(i); 1114 if (arg == NULL) { 1115 continue; 1116 } 1117 const Type *aat = _igvn->type(arg); 1118 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1119 continue; 1120 } 1121 if (arg->is_AddP()) { 1122 // 1123 // The inline_native_clone() case when the arraycopy stub is called 1124 // after the allocation before Initialize and CheckCastPP nodes. 1125 // Or normal arraycopy for object arrays case. 1126 // 1127 // Set AddP's base (Allocate) as not scalar replaceable since 1128 // pointer to the base (with offset) is passed as argument. 1129 // 1130 arg = get_addp_base(arg); 1131 } 1132 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1133 assert(arg_ptn != NULL, "should be registered"); 1134 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1135 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1136 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1137 aat->isa_ptr() != NULL, "expecting an Ptr"); 1138 bool arg_has_oops = aat->isa_oopptr() && 1139 (aat->isa_instptr() || 1140 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL))); 1141 if (i == TypeFunc::Parms) { 1142 src_has_oops = arg_has_oops; 1143 } 1144 // 1145 // src or dst could be j.l.Object when other is basic type array: 1146 // 1147 // arraycopy(char[],0,Object*,0,size); 1148 // arraycopy(Object*,0,char[],0,size); 1149 // 1150 // Don't add edges in such cases. 1151 // 1152 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1153 arg_has_oops && (i > TypeFunc::Parms); 1154 #ifdef ASSERT 1155 if (!(is_arraycopy || 1156 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1157 (call->as_CallLeaf()->_name != NULL && 1158 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1159 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1160 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1161 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1162 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1163 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1164 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1165 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1166 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1167 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1168 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1169 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1170 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1171 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1172 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1173 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1174 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1175 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1176 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1177 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1178 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1179 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1180 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1181 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1182 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1183 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1184 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1185 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1186 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1187 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1188 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1189 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1190 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1191 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1192 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1193 ))) { 1194 call->dump(); 1195 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1196 } 1197 #endif 1198 // Always process arraycopy's destination object since 1199 // we need to add all possible edges to references in 1200 // source object. 1201 if (arg_esc >= PointsToNode::ArgEscape && 1202 !arg_is_arraycopy_dest) { 1203 continue; 1204 } 1205 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1206 if (call->is_ArrayCopy()) { 1207 ArrayCopyNode* ac = call->as_ArrayCopy(); 1208 if (ac->is_clonebasic() || 1209 ac->is_arraycopy_validated() || 1210 ac->is_copyof_validated() || 1211 ac->is_copyofrange_validated()) { 1212 es = PointsToNode::NoEscape; 1213 } 1214 } 1215 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1216 if (arg_is_arraycopy_dest) { 1217 Node* src = call->in(TypeFunc::Parms); 1218 if (src->is_AddP()) { 1219 src = get_addp_base(src); 1220 } 1221 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1222 assert(src_ptn != NULL, "should be registered"); 1223 if (arg_ptn != src_ptn) { 1224 // Special arraycopy edge: 1225 // A destination object's field can't have the source object 1226 // as base since objects escape states are not related. 1227 // Only escape state of destination object's fields affects 1228 // escape state of fields in source object. 1229 add_arraycopy(call, es, src_ptn, arg_ptn); 1230 } 1231 } 1232 } 1233 } 1234 break; 1235 } 1236 case Op_CallStaticJava: { 1237 // For a static call, we know exactly what method is being called. 1238 // Use bytecode estimator to record the call's escape affects 1239 #ifdef ASSERT 1240 const char* name = call->as_CallStaticJava()->_name; 1241 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1242 #endif 1243 ciMethod* meth = call->as_CallJava()->method(); 1244 if ((meth != NULL) && meth->is_boxing_method()) { 1245 break; // Boxing methods do not modify any oops. 1246 } 1247 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1248 // fall-through if not a Java method or no analyzer information 1249 if (call_analyzer != NULL) { 1250 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1251 const TypeTuple* d = call->tf()->domain(); 1252 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1253 const Type* at = d->field_at(i); 1254 int k = i - TypeFunc::Parms; 1255 Node* arg = call->in(i); 1256 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1257 if (at->isa_ptr() != NULL && 1258 call_analyzer->is_arg_returned(k)) { 1259 // The call returns arguments. 1260 if (call_ptn != NULL) { // Is call's result used? 1261 assert(call_ptn->is_LocalVar(), "node should be registered"); 1262 assert(arg_ptn != NULL, "node should be registered"); 1263 add_edge(call_ptn, arg_ptn); 1264 } 1265 } 1266 if (at->isa_oopptr() != NULL && 1267 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1268 if (!call_analyzer->is_arg_stack(k)) { 1269 // The argument global escapes 1270 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1271 } else { 1272 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1273 if (!call_analyzer->is_arg_local(k)) { 1274 // The argument itself doesn't escape, but any fields might 1275 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1276 } 1277 } 1278 } 1279 } 1280 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1281 // The call returns arguments. 1282 assert(call_ptn->edge_count() > 0, "sanity"); 1283 if (!call_analyzer->is_return_local()) { 1284 // Returns also unknown object. 1285 add_edge(call_ptn, phantom_obj); 1286 } 1287 } 1288 break; 1289 } 1290 } 1291 default: { 1292 // Fall-through here if not a Java method or no analyzer information 1293 // or some other type of call, assume the worst case: all arguments 1294 // globally escape. 1295 const TypeTuple* d = call->tf()->domain(); 1296 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1297 const Type* at = d->field_at(i); 1298 if (at->isa_oopptr() != NULL) { 1299 Node* arg = call->in(i); 1300 if (arg->is_AddP()) { 1301 arg = get_addp_base(arg); 1302 } 1303 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1304 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1305 } 1306 } 1307 } 1308 } 1309 } 1310 1311 1312 // Finish Graph construction. 1313 bool ConnectionGraph::complete_connection_graph( 1314 GrowableArray<PointsToNode*>& ptnodes_worklist, 1315 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1316 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1317 GrowableArray<FieldNode*>& oop_fields_worklist) { 1318 // Normally only 1-3 passes needed to build Connection Graph depending 1319 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1320 // Set limit to 20 to catch situation when something did go wrong and 1321 // bailout Escape Analysis. 1322 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1323 #define GRAPH_BUILD_ITER_LIMIT 20 1324 1325 // Propagate GlobalEscape and ArgEscape escape states and check that 1326 // we still have non-escaping objects. The method pushs on _worklist 1327 // Field nodes which reference phantom_object. 1328 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1329 return false; // Nothing to do. 1330 } 1331 // Now propagate references to all JavaObject nodes. 1332 int java_objects_length = java_objects_worklist.length(); 1333 elapsedTimer build_time; 1334 build_time.start(); 1335 elapsedTimer time; 1336 bool timeout = false; 1337 int new_edges = 1; 1338 int iterations = 0; 1339 do { 1340 while ((new_edges > 0) && 1341 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1342 double start_time = time.seconds(); 1343 time.start(); 1344 new_edges = 0; 1345 // Propagate references to phantom_object for nodes pushed on _worklist 1346 // by find_non_escaped_objects() and find_field_value(). 1347 new_edges += add_java_object_edges(phantom_obj, false); 1348 for (int next = 0; next < java_objects_length; ++next) { 1349 JavaObjectNode* ptn = java_objects_worklist.at(next); 1350 new_edges += add_java_object_edges(ptn, true); 1351 1352 #define SAMPLE_SIZE 4 1353 if ((next % SAMPLE_SIZE) == 0) { 1354 // Each 4 iterations calculate how much time it will take 1355 // to complete graph construction. 1356 time.stop(); 1357 // Poll for requests from shutdown mechanism to quiesce compiler 1358 // because Connection graph construction may take long time. 1359 CompileBroker::maybe_block(); 1360 double stop_time = time.seconds(); 1361 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1362 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1363 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1364 timeout = true; 1365 break; // Timeout 1366 } 1367 start_time = stop_time; 1368 time.start(); 1369 } 1370 #undef SAMPLE_SIZE 1371 1372 } 1373 if (timeout) break; 1374 if (new_edges > 0) { 1375 // Update escape states on each iteration if graph was updated. 1376 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1377 return false; // Nothing to do. 1378 } 1379 } 1380 time.stop(); 1381 if (time.seconds() >= EscapeAnalysisTimeout) { 1382 timeout = true; 1383 break; 1384 } 1385 } 1386 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1387 time.start(); 1388 // Find fields which have unknown value. 1389 int fields_length = oop_fields_worklist.length(); 1390 for (int next = 0; next < fields_length; next++) { 1391 FieldNode* field = oop_fields_worklist.at(next); 1392 if (field->edge_count() == 0) { 1393 new_edges += find_field_value(field); 1394 // This code may added new edges to phantom_object. 1395 // Need an other cycle to propagate references to phantom_object. 1396 } 1397 } 1398 time.stop(); 1399 if (time.seconds() >= EscapeAnalysisTimeout) { 1400 timeout = true; 1401 break; 1402 } 1403 } else { 1404 new_edges = 0; // Bailout 1405 } 1406 } while (new_edges > 0); 1407 1408 build_time.stop(); 1409 _build_time = build_time.seconds(); 1410 _build_iterations = iterations; 1411 1412 // Bailout if passed limits. 1413 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1414 Compile* C = _compile; 1415 if (C->log() != NULL) { 1416 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1417 C->log()->text("%s", timeout ? "time" : "iterations"); 1418 C->log()->end_elem(" limit'"); 1419 } 1420 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1421 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1422 // Possible infinite build_connection_graph loop, 1423 // bailout (no changes to ideal graph were made). 1424 return false; 1425 } 1426 1427 #undef GRAPH_BUILD_ITER_LIMIT 1428 1429 // Find fields initialized by NULL for non-escaping Allocations. 1430 int non_escaped_length = non_escaped_allocs_worklist.length(); 1431 for (int next = 0; next < non_escaped_length; next++) { 1432 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1433 PointsToNode::EscapeState es = ptn->escape_state(); 1434 assert(es <= PointsToNode::ArgEscape, "sanity"); 1435 if (es == PointsToNode::NoEscape) { 1436 if (find_init_values_null(ptn, _igvn) > 0) { 1437 // Adding references to NULL object does not change escape states 1438 // since it does not escape. Also no fields are added to NULL object. 1439 add_java_object_edges(null_obj, false); 1440 } 1441 } 1442 Node* n = ptn->ideal_node(); 1443 if (n->is_Allocate()) { 1444 // The object allocated by this Allocate node will never be 1445 // seen by an other thread. Mark it so that when it is 1446 // expanded no MemBarStoreStore is added. 1447 InitializeNode* ini = n->as_Allocate()->initialization(); 1448 if (ini != NULL) 1449 ini->set_does_not_escape(); 1450 } 1451 } 1452 return true; // Finished graph construction. 1453 } 1454 1455 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1456 // and check that we still have non-escaping java objects. 1457 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1458 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1459 GrowableArray<PointsToNode*> escape_worklist; 1460 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1461 int ptnodes_length = ptnodes_worklist.length(); 1462 for (int next = 0; next < ptnodes_length; ++next) { 1463 PointsToNode* ptn = ptnodes_worklist.at(next); 1464 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1465 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1466 escape_worklist.push(ptn); 1467 } 1468 } 1469 // Set escape states to referenced nodes (edges list). 1470 while (escape_worklist.length() > 0) { 1471 PointsToNode* ptn = escape_worklist.pop(); 1472 PointsToNode::EscapeState es = ptn->escape_state(); 1473 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1474 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1475 es >= PointsToNode::ArgEscape) { 1476 // GlobalEscape or ArgEscape state of field means it has unknown value. 1477 if (add_edge(ptn, phantom_obj)) { 1478 // New edge was added 1479 add_field_uses_to_worklist(ptn->as_Field()); 1480 } 1481 } 1482 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1483 PointsToNode* e = i.get(); 1484 if (e->is_Arraycopy()) { 1485 assert(ptn->arraycopy_dst(), "sanity"); 1486 // Propagate only fields escape state through arraycopy edge. 1487 if (e->fields_escape_state() < field_es) { 1488 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1489 escape_worklist.push(e); 1490 } 1491 } else if (es >= field_es) { 1492 // fields_escape_state is also set to 'es' if it is less than 'es'. 1493 if (e->escape_state() < es) { 1494 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1495 escape_worklist.push(e); 1496 } 1497 } else { 1498 // Propagate field escape state. 1499 bool es_changed = false; 1500 if (e->fields_escape_state() < field_es) { 1501 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1502 es_changed = true; 1503 } 1504 if ((e->escape_state() < field_es) && 1505 e->is_Field() && ptn->is_JavaObject() && 1506 e->as_Field()->is_oop()) { 1507 // Change escape state of referenced fields. 1508 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1509 es_changed = true; 1510 } else if (e->escape_state() < es) { 1511 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1512 es_changed = true; 1513 } 1514 if (es_changed) { 1515 escape_worklist.push(e); 1516 } 1517 } 1518 } 1519 } 1520 // Remove escaped objects from non_escaped list. 1521 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1522 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1523 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1524 non_escaped_allocs_worklist.delete_at(next); 1525 } 1526 if (ptn->escape_state() == PointsToNode::NoEscape) { 1527 // Find fields in non-escaped allocations which have unknown value. 1528 find_init_values_phantom(ptn); 1529 } 1530 } 1531 return (non_escaped_allocs_worklist.length() > 0); 1532 } 1533 1534 // Add all references to JavaObject node by walking over all uses. 1535 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1536 int new_edges = 0; 1537 if (populate_worklist) { 1538 // Populate _worklist by uses of jobj's uses. 1539 for (UseIterator i(jobj); i.has_next(); i.next()) { 1540 PointsToNode* use = i.get(); 1541 if (use->is_Arraycopy()) { 1542 continue; 1543 } 1544 add_uses_to_worklist(use); 1545 if (use->is_Field() && use->as_Field()->is_oop()) { 1546 // Put on worklist all field's uses (loads) and 1547 // related field nodes (same base and offset). 1548 add_field_uses_to_worklist(use->as_Field()); 1549 } 1550 } 1551 } 1552 for (int l = 0; l < _worklist.length(); l++) { 1553 PointsToNode* use = _worklist.at(l); 1554 if (PointsToNode::is_base_use(use)) { 1555 // Add reference from jobj to field and from field to jobj (field's base). 1556 use = PointsToNode::get_use_node(use)->as_Field(); 1557 if (add_base(use->as_Field(), jobj)) { 1558 new_edges++; 1559 } 1560 continue; 1561 } 1562 assert(!use->is_JavaObject(), "sanity"); 1563 if (use->is_Arraycopy()) { 1564 if (jobj == null_obj) { // NULL object does not have field edges 1565 continue; 1566 } 1567 // Added edge from Arraycopy node to arraycopy's source java object 1568 if (add_edge(use, jobj)) { 1569 jobj->set_arraycopy_src(); 1570 new_edges++; 1571 } 1572 // and stop here. 1573 continue; 1574 } 1575 if (!add_edge(use, jobj)) { 1576 continue; // No new edge added, there was such edge already. 1577 } 1578 new_edges++; 1579 if (use->is_LocalVar()) { 1580 add_uses_to_worklist(use); 1581 if (use->arraycopy_dst()) { 1582 for (EdgeIterator i(use); i.has_next(); i.next()) { 1583 PointsToNode* e = i.get(); 1584 if (e->is_Arraycopy()) { 1585 if (jobj == null_obj) { // NULL object does not have field edges 1586 continue; 1587 } 1588 // Add edge from arraycopy's destination java object to Arraycopy node. 1589 if (add_edge(jobj, e)) { 1590 new_edges++; 1591 jobj->set_arraycopy_dst(); 1592 } 1593 } 1594 } 1595 } 1596 } else { 1597 // Added new edge to stored in field values. 1598 // Put on worklist all field's uses (loads) and 1599 // related field nodes (same base and offset). 1600 add_field_uses_to_worklist(use->as_Field()); 1601 } 1602 } 1603 _worklist.clear(); 1604 _in_worklist.reset(); 1605 return new_edges; 1606 } 1607 1608 // Put on worklist all related field nodes. 1609 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1610 assert(field->is_oop(), "sanity"); 1611 int offset = field->offset(); 1612 add_uses_to_worklist(field); 1613 // Loop over all bases of this field and push on worklist Field nodes 1614 // with the same offset and base (since they may reference the same field). 1615 for (BaseIterator i(field); i.has_next(); i.next()) { 1616 PointsToNode* base = i.get(); 1617 add_fields_to_worklist(field, base); 1618 // Check if the base was source object of arraycopy and go over arraycopy's 1619 // destination objects since values stored to a field of source object are 1620 // accessible by uses (loads) of fields of destination objects. 1621 if (base->arraycopy_src()) { 1622 for (UseIterator j(base); j.has_next(); j.next()) { 1623 PointsToNode* arycp = j.get(); 1624 if (arycp->is_Arraycopy()) { 1625 for (UseIterator k(arycp); k.has_next(); k.next()) { 1626 PointsToNode* abase = k.get(); 1627 if (abase->arraycopy_dst() && abase != base) { 1628 // Look for the same arraycopy reference. 1629 add_fields_to_worklist(field, abase); 1630 } 1631 } 1632 } 1633 } 1634 } 1635 } 1636 } 1637 1638 // Put on worklist all related field nodes. 1639 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1640 int offset = field->offset(); 1641 if (base->is_LocalVar()) { 1642 for (UseIterator j(base); j.has_next(); j.next()) { 1643 PointsToNode* f = j.get(); 1644 if (PointsToNode::is_base_use(f)) { // Field 1645 f = PointsToNode::get_use_node(f); 1646 if (f == field || !f->as_Field()->is_oop()) { 1647 continue; 1648 } 1649 int offs = f->as_Field()->offset(); 1650 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1651 add_to_worklist(f); 1652 } 1653 } 1654 } 1655 } else { 1656 assert(base->is_JavaObject(), "sanity"); 1657 if (// Skip phantom_object since it is only used to indicate that 1658 // this field's content globally escapes. 1659 (base != phantom_obj) && 1660 // NULL object node does not have fields. 1661 (base != null_obj)) { 1662 for (EdgeIterator i(base); i.has_next(); i.next()) { 1663 PointsToNode* f = i.get(); 1664 // Skip arraycopy edge since store to destination object field 1665 // does not update value in source object field. 1666 if (f->is_Arraycopy()) { 1667 assert(base->arraycopy_dst(), "sanity"); 1668 continue; 1669 } 1670 if (f == field || !f->as_Field()->is_oop()) { 1671 continue; 1672 } 1673 int offs = f->as_Field()->offset(); 1674 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1675 add_to_worklist(f); 1676 } 1677 } 1678 } 1679 } 1680 } 1681 1682 // Find fields which have unknown value. 1683 int ConnectionGraph::find_field_value(FieldNode* field) { 1684 // Escaped fields should have init value already. 1685 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1686 int new_edges = 0; 1687 for (BaseIterator i(field); i.has_next(); i.next()) { 1688 PointsToNode* base = i.get(); 1689 if (base->is_JavaObject()) { 1690 // Skip Allocate's fields which will be processed later. 1691 if (base->ideal_node()->is_Allocate()) { 1692 return 0; 1693 } 1694 assert(base == null_obj, "only NULL ptr base expected here"); 1695 } 1696 } 1697 if (add_edge(field, phantom_obj)) { 1698 // New edge was added 1699 new_edges++; 1700 add_field_uses_to_worklist(field); 1701 } 1702 return new_edges; 1703 } 1704 1705 // Find fields initializing values for allocations. 1706 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 1707 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1708 Node* alloc = pta->ideal_node(); 1709 1710 // Do nothing for Allocate nodes since its fields values are 1711 // "known" unless they are initialized by arraycopy/clone. 1712 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 1713 return 0; 1714 } 1715 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1716 #ifdef ASSERT 1717 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1718 const char* name = alloc->as_CallStaticJava()->_name; 1719 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1720 } 1721 #endif 1722 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 1723 int new_edges = 0; 1724 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1725 PointsToNode* field = i.get(); 1726 if (field->is_Field() && field->as_Field()->is_oop()) { 1727 if (add_edge(field, phantom_obj)) { 1728 // New edge was added 1729 new_edges++; 1730 add_field_uses_to_worklist(field->as_Field()); 1731 } 1732 } 1733 } 1734 return new_edges; 1735 } 1736 1737 // Find fields initializing values for allocations. 1738 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) { 1739 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1740 Node* alloc = pta->ideal_node(); 1741 // Do nothing for Call nodes since its fields values are unknown. 1742 if (!alloc->is_Allocate()) { 1743 return 0; 1744 } 1745 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1746 bool visited_bottom_offset = false; 1747 GrowableArray<int> offsets_worklist; 1748 int new_edges = 0; 1749 1750 // Check if an oop field's initializing value is recorded and add 1751 // a corresponding NULL if field's value if it is not recorded. 1752 // Connection Graph does not record a default initialization by NULL 1753 // captured by Initialize node. 1754 // 1755 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1756 PointsToNode* field = i.get(); // Field (AddP) 1757 if (!field->is_Field() || !field->as_Field()->is_oop()) { 1758 continue; // Not oop field 1759 } 1760 int offset = field->as_Field()->offset(); 1761 if (offset == Type::OffsetBot) { 1762 if (!visited_bottom_offset) { 1763 // OffsetBot is used to reference array's element, 1764 // always add reference to NULL to all Field nodes since we don't 1765 // known which element is referenced. 1766 if (add_edge(field, null_obj)) { 1767 // New edge was added 1768 new_edges++; 1769 add_field_uses_to_worklist(field->as_Field()); 1770 visited_bottom_offset = true; 1771 } 1772 } 1773 } else { 1774 // Check only oop fields. 1775 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1776 if (adr_type->isa_rawptr()) { 1777 #ifdef ASSERT 1778 // Raw pointers are used for initializing stores so skip it 1779 // since it should be recorded already 1780 Node* base = get_addp_base(field->ideal_node()); 1781 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 1782 #endif 1783 continue; 1784 } 1785 if (!offsets_worklist.contains(offset)) { 1786 offsets_worklist.append(offset); 1787 Node* value = NULL; 1788 if (ini != NULL) { 1789 // StoreP::memory_type() == T_ADDRESS 1790 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1791 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1792 // Make sure initializing store has the same type as this AddP. 1793 // This AddP may reference non existing field because it is on a 1794 // dead branch of bimorphic call which is not eliminated yet. 1795 if (store != NULL && store->is_Store() && 1796 store->as_Store()->memory_type() == ft) { 1797 value = store->in(MemNode::ValueIn); 1798 #ifdef ASSERT 1799 if (VerifyConnectionGraph) { 1800 // Verify that AddP already points to all objects the value points to. 1801 PointsToNode* val = ptnode_adr(value->_idx); 1802 assert((val != NULL), "should be processed already"); 1803 PointsToNode* missed_obj = NULL; 1804 if (val->is_JavaObject()) { 1805 if (!field->points_to(val->as_JavaObject())) { 1806 missed_obj = val; 1807 } 1808 } else { 1809 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1810 tty->print_cr("----------init store has invalid value -----"); 1811 store->dump(); 1812 val->dump(); 1813 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1814 } 1815 for (EdgeIterator j(val); j.has_next(); j.next()) { 1816 PointsToNode* obj = j.get(); 1817 if (obj->is_JavaObject()) { 1818 if (!field->points_to(obj->as_JavaObject())) { 1819 missed_obj = obj; 1820 break; 1821 } 1822 } 1823 } 1824 } 1825 if (missed_obj != NULL) { 1826 tty->print_cr("----------field---------------------------------"); 1827 field->dump(); 1828 tty->print_cr("----------missed referernce to object-----------"); 1829 missed_obj->dump(); 1830 tty->print_cr("----------object referernced by init store -----"); 1831 store->dump(); 1832 val->dump(); 1833 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1834 } 1835 } 1836 #endif 1837 } else { 1838 // There could be initializing stores which follow allocation. 1839 // For example, a volatile field store is not collected 1840 // by Initialize node. 1841 // 1842 // Need to check for dependent loads to separate such stores from 1843 // stores which follow loads. For now, add initial value NULL so 1844 // that compare pointers optimization works correctly. 1845 } 1846 } 1847 if (value == NULL) { 1848 // A field's initializing value was not recorded. Add NULL. 1849 if (add_edge(field, null_obj)) { 1850 // New edge was added 1851 new_edges++; 1852 add_field_uses_to_worklist(field->as_Field()); 1853 } 1854 } 1855 } 1856 } 1857 } 1858 return new_edges; 1859 } 1860 1861 // Adjust scalar_replaceable state after Connection Graph is built. 1862 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1863 // Search for non-escaping objects which are not scalar replaceable 1864 // and mark them to propagate the state to referenced objects. 1865 1866 for (UseIterator i(jobj); i.has_next(); i.next()) { 1867 PointsToNode* use = i.get(); 1868 if (use->is_Arraycopy()) { 1869 continue; 1870 } 1871 if (use->is_Field()) { 1872 FieldNode* field = use->as_Field(); 1873 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1874 // 1. An object is not scalar replaceable if the field into which it is 1875 // stored has unknown offset (stored into unknown element of an array). 1876 if (field->offset() == Type::OffsetBot) { 1877 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 1878 return; 1879 } 1880 for (BaseIterator i(field); i.has_next(); i.next()) { 1881 PointsToNode* base = i.get(); 1882 // 2. An object is not scalar replaceable if the field into which it is 1883 // stored has multiple bases one of which is null. 1884 if ((base == null_obj) && (field->base_count() > 1)) { 1885 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 1886 return; 1887 } 1888 // 2.5. An object is not scalar replaceable if the field into which it is 1889 // stored has NSR base. 1890 if (!base->scalar_replaceable()) { 1891 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 1892 return; 1893 } 1894 } 1895 } 1896 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1897 // 3. An object is not scalar replaceable if it is merged with other objects. 1898 for (EdgeIterator j(use); j.has_next(); j.next()) { 1899 PointsToNode* ptn = j.get(); 1900 if (ptn->is_JavaObject() && ptn != jobj) { 1901 // Mark all objects. 1902 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 1903 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 1904 } 1905 } 1906 if (!jobj->scalar_replaceable()) { 1907 return; 1908 } 1909 } 1910 1911 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1912 if (j.get()->is_Arraycopy()) { 1913 continue; 1914 } 1915 1916 // Non-escaping object node should point only to field nodes. 1917 FieldNode* field = j.get()->as_Field(); 1918 int offset = field->as_Field()->offset(); 1919 1920 // 4. An object is not scalar replaceable if it has a field with unknown 1921 // offset (array's element is accessed in loop). 1922 if (offset == Type::OffsetBot) { 1923 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 1924 return; 1925 } 1926 // 5. Currently an object is not scalar replaceable if a LoadStore node 1927 // access its field since the field value is unknown after it. 1928 // 1929 Node* n = field->ideal_node(); 1930 1931 // Test for an unsafe access that was parsed as maybe off heap 1932 // (with a CheckCastPP to raw memory). 1933 assert(n->is_AddP(), "expect an address computation"); 1934 if (n->in(AddPNode::Base)->is_top() && 1935 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1936 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1937 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1938 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 1939 return; 1940 } 1941 1942 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1943 Node* u = n->fast_out(i); 1944 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1945 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 1946 return; 1947 } 1948 } 1949 1950 // 6. Or the address may point to more then one object. This may produce 1951 // the false positive result (set not scalar replaceable) 1952 // since the flow-insensitive escape analysis can't separate 1953 // the case when stores overwrite the field's value from the case 1954 // when stores happened on different control branches. 1955 // 1956 // Note: it will disable scalar replacement in some cases: 1957 // 1958 // Point p[] = new Point[1]; 1959 // p[0] = new Point(); // Will be not scalar replaced 1960 // 1961 // but it will save us from incorrect optimizations in next cases: 1962 // 1963 // Point p[] = new Point[1]; 1964 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1965 // 1966 if (field->base_count() > 1) { 1967 for (BaseIterator i(field); i.has_next(); i.next()) { 1968 PointsToNode* base = i.get(); 1969 // Don't take into account LocalVar nodes which 1970 // may point to only one object which should be also 1971 // this field's base by now. 1972 if (base->is_JavaObject() && base != jobj) { 1973 // Mark all bases. 1974 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 1975 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 1976 } 1977 } 1978 } 1979 } 1980 } 1981 1982 // Propagate NSR (Not scalar replaceable) state. 1983 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 1984 int jobj_length = jobj_worklist.length(); 1985 bool found_nsr_alloc = true; 1986 while (found_nsr_alloc) { 1987 found_nsr_alloc = false; 1988 for (int next = 0; next < jobj_length; ++next) { 1989 JavaObjectNode* jobj = jobj_worklist.at(next); 1990 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 1991 PointsToNode* use = i.get(); 1992 if (use->is_Field()) { 1993 FieldNode* field = use->as_Field(); 1994 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1995 assert(field->offset() != Type::OffsetBot, "sanity"); 1996 for (BaseIterator i(field); i.has_next(); i.next()) { 1997 PointsToNode* base = i.get(); 1998 // An object is not scalar replaceable if the field into which 1999 // it is stored has NSR base. 2000 if ((base != null_obj) && !base->scalar_replaceable()) { 2001 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2002 found_nsr_alloc = true; 2003 break; 2004 } 2005 } 2006 } 2007 } 2008 } 2009 } 2010 } 2011 2012 #ifdef ASSERT 2013 void ConnectionGraph::verify_connection_graph( 2014 GrowableArray<PointsToNode*>& ptnodes_worklist, 2015 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2016 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2017 GrowableArray<Node*>& addp_worklist) { 2018 // Verify that graph is complete - no new edges could be added. 2019 int java_objects_length = java_objects_worklist.length(); 2020 int non_escaped_length = non_escaped_allocs_worklist.length(); 2021 int new_edges = 0; 2022 for (int next = 0; next < java_objects_length; ++next) { 2023 JavaObjectNode* ptn = java_objects_worklist.at(next); 2024 new_edges += add_java_object_edges(ptn, true); 2025 } 2026 assert(new_edges == 0, "graph was not complete"); 2027 // Verify that escape state is final. 2028 int length = non_escaped_allocs_worklist.length(); 2029 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2030 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2031 (non_escaped_length == length) && 2032 (_worklist.length() == 0), "escape state was not final"); 2033 2034 // Verify fields information. 2035 int addp_length = addp_worklist.length(); 2036 for (int next = 0; next < addp_length; ++next ) { 2037 Node* n = addp_worklist.at(next); 2038 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2039 if (field->is_oop()) { 2040 // Verify that field has all bases 2041 Node* base = get_addp_base(n); 2042 PointsToNode* ptn = ptnode_adr(base->_idx); 2043 if (ptn->is_JavaObject()) { 2044 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2045 } else { 2046 assert(ptn->is_LocalVar(), "sanity"); 2047 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2048 PointsToNode* e = i.get(); 2049 if (e->is_JavaObject()) { 2050 assert(field->has_base(e->as_JavaObject()), "sanity"); 2051 } 2052 } 2053 } 2054 // Verify that all fields have initializing values. 2055 if (field->edge_count() == 0) { 2056 tty->print_cr("----------field does not have references----------"); 2057 field->dump(); 2058 for (BaseIterator i(field); i.has_next(); i.next()) { 2059 PointsToNode* base = i.get(); 2060 tty->print_cr("----------field has next base---------------------"); 2061 base->dump(); 2062 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2063 tty->print_cr("----------base has fields-------------------------"); 2064 for (EdgeIterator j(base); j.has_next(); j.next()) { 2065 j.get()->dump(); 2066 } 2067 tty->print_cr("----------base has references---------------------"); 2068 for (UseIterator j(base); j.has_next(); j.next()) { 2069 j.get()->dump(); 2070 } 2071 } 2072 } 2073 for (UseIterator i(field); i.has_next(); i.next()) { 2074 i.get()->dump(); 2075 } 2076 assert(field->edge_count() > 0, "sanity"); 2077 } 2078 } 2079 } 2080 } 2081 #endif 2082 2083 // Optimize ideal graph. 2084 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2085 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2086 Compile* C = _compile; 2087 PhaseIterGVN* igvn = _igvn; 2088 if (EliminateLocks) { 2089 // Mark locks before changing ideal graph. 2090 int cnt = C->macro_count(); 2091 for (int i = 0; i < cnt; i++) { 2092 Node *n = C->macro_node(i); 2093 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2094 AbstractLockNode* alock = n->as_AbstractLock(); 2095 if (!alock->is_non_esc_obj()) { 2096 if (not_global_escape(alock->obj_node())) { 2097 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2098 // The lock could be marked eliminated by lock coarsening 2099 // code during first IGVN before EA. Replace coarsened flag 2100 // to eliminate all associated locks/unlocks. 2101 #ifdef ASSERT 2102 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2103 #endif 2104 alock->set_non_esc_obj(); 2105 } 2106 } 2107 } 2108 } 2109 } 2110 2111 if (OptimizePtrCompare) { 2112 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2113 Node *n = ptr_cmp_worklist.at(i); 2114 const TypeInt* tcmp = optimize_ptr_compare(n); 2115 if (tcmp->singleton()) { 2116 Node* cmp = igvn->makecon(tcmp); 2117 #ifndef PRODUCT 2118 if (PrintOptimizePtrCompare) { 2119 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2120 if (Verbose) { 2121 n->dump(1); 2122 } 2123 } 2124 #endif 2125 igvn->replace_node(n, cmp); 2126 } 2127 } 2128 } 2129 2130 // For MemBarStoreStore nodes added in library_call.cpp, check 2131 // escape status of associated AllocateNode and optimize out 2132 // MemBarStoreStore node if the allocated object never escapes. 2133 for (int i = 0; i < storestore_worklist.length(); i++) { 2134 Node* storestore = storestore_worklist.at(i); 2135 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2136 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2137 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2138 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2139 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2140 igvn->register_new_node_with_optimizer(mb); 2141 igvn->replace_node(storestore, mb); 2142 } 2143 } 2144 } 2145 2146 // Optimize objects compare. 2147 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2148 assert(OptimizePtrCompare, "sanity"); 2149 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2150 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2151 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2152 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2153 2154 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2155 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2156 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2157 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2158 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2159 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2160 2161 // Check simple cases first. 2162 if (jobj1 != NULL) { 2163 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2164 if (jobj1 == jobj2) { 2165 // Comparing the same not escaping object. 2166 return EQ; 2167 } 2168 Node* obj = jobj1->ideal_node(); 2169 // Comparing not escaping allocation. 2170 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2171 !ptn2->points_to(jobj1)) { 2172 return NE; // This includes nullness check. 2173 } 2174 } 2175 } 2176 if (jobj2 != NULL) { 2177 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2178 Node* obj = jobj2->ideal_node(); 2179 // Comparing not escaping allocation. 2180 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2181 !ptn1->points_to(jobj2)) { 2182 return NE; // This includes nullness check. 2183 } 2184 } 2185 } 2186 if (jobj1 != NULL && jobj1 != phantom_obj && 2187 jobj2 != NULL && jobj2 != phantom_obj && 2188 jobj1->ideal_node()->is_Con() && 2189 jobj2->ideal_node()->is_Con()) { 2190 // Klass or String constants compare. Need to be careful with 2191 // compressed pointers - compare types of ConN and ConP instead of nodes. 2192 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2193 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2194 if (t1->make_ptr() == t2->make_ptr()) { 2195 return EQ; 2196 } else { 2197 return NE; 2198 } 2199 } 2200 if (ptn1->meet(ptn2)) { 2201 return UNKNOWN; // Sets are not disjoint 2202 } 2203 2204 // Sets are disjoint. 2205 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2206 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2207 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2208 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2209 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2210 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2211 // Check nullness of unknown object. 2212 return UNKNOWN; 2213 } 2214 2215 // Disjointness by itself is not sufficient since 2216 // alias analysis is not complete for escaped objects. 2217 // Disjoint sets are definitely unrelated only when 2218 // at least one set has only not escaping allocations. 2219 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2220 if (ptn1->non_escaping_allocation()) { 2221 return NE; 2222 } 2223 } 2224 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2225 if (ptn2->non_escaping_allocation()) { 2226 return NE; 2227 } 2228 } 2229 return UNKNOWN; 2230 } 2231 2232 // Connection Graph construction functions. 2233 2234 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2235 PointsToNode* ptadr = _nodes.at(n->_idx); 2236 if (ptadr != NULL) { 2237 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2238 return; 2239 } 2240 Compile* C = _compile; 2241 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2242 map_ideal_node(n, ptadr); 2243 } 2244 2245 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2246 PointsToNode* ptadr = _nodes.at(n->_idx); 2247 if (ptadr != NULL) { 2248 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2249 return; 2250 } 2251 Compile* C = _compile; 2252 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2253 map_ideal_node(n, ptadr); 2254 } 2255 2256 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2257 PointsToNode* ptadr = _nodes.at(n->_idx); 2258 if (ptadr != NULL) { 2259 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2260 return; 2261 } 2262 bool unsafe = false; 2263 bool is_oop = is_oop_field(n, offset, &unsafe); 2264 if (unsafe) { 2265 es = PointsToNode::GlobalEscape; 2266 } 2267 Compile* C = _compile; 2268 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2269 map_ideal_node(n, field); 2270 } 2271 2272 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2273 PointsToNode* src, PointsToNode* dst) { 2274 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2275 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2276 PointsToNode* ptadr = _nodes.at(n->_idx); 2277 if (ptadr != NULL) { 2278 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2279 return; 2280 } 2281 Compile* C = _compile; 2282 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2283 map_ideal_node(n, ptadr); 2284 // Add edge from arraycopy node to source object. 2285 (void)add_edge(ptadr, src); 2286 src->set_arraycopy_src(); 2287 // Add edge from destination object to arraycopy node. 2288 (void)add_edge(dst, ptadr); 2289 dst->set_arraycopy_dst(); 2290 } 2291 2292 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2293 const Type* adr_type = n->as_AddP()->bottom_type(); 2294 BasicType bt = T_INT; 2295 if (offset == Type::OffsetBot) { 2296 // Check only oop fields. 2297 if (!adr_type->isa_aryptr() || 2298 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2299 adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) { 2300 // OffsetBot is used to reference array's element. Ignore first AddP. 2301 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2302 bt = T_OBJECT; 2303 } 2304 } 2305 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2306 if (adr_type->isa_instptr()) { 2307 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2308 if (field != NULL) { 2309 bt = field->layout_type(); 2310 } else { 2311 // Check for unsafe oop field access 2312 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2313 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2314 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2315 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2316 bt = T_OBJECT; 2317 (*unsafe) = true; 2318 } 2319 } 2320 } else if (adr_type->isa_aryptr()) { 2321 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2322 // Ignore array length load. 2323 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2324 // Ignore first AddP. 2325 } else { 2326 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2327 bt = elemtype->array_element_basic_type(); 2328 } 2329 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2330 // Allocation initialization, ThreadLocal field access, unsafe access 2331 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2332 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2333 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2334 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2335 bt = T_OBJECT; 2336 } 2337 } 2338 } 2339 // Note: T_NARROWOOP is not classed as a real reference type 2340 return (is_reference_type(bt) || bt == T_NARROWOOP); 2341 } 2342 2343 // Returns unique pointed java object or NULL. 2344 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2345 assert(!_collecting, "should not call when constructed graph"); 2346 // If the node was created after the escape computation we can't answer. 2347 uint idx = n->_idx; 2348 if (idx >= nodes_size()) { 2349 return NULL; 2350 } 2351 PointsToNode* ptn = ptnode_adr(idx); 2352 if (ptn == NULL) { 2353 return NULL; 2354 } 2355 if (ptn->is_JavaObject()) { 2356 return ptn->as_JavaObject(); 2357 } 2358 assert(ptn->is_LocalVar(), "sanity"); 2359 // Check all java objects it points to. 2360 JavaObjectNode* jobj = NULL; 2361 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2362 PointsToNode* e = i.get(); 2363 if (e->is_JavaObject()) { 2364 if (jobj == NULL) { 2365 jobj = e->as_JavaObject(); 2366 } else if (jobj != e) { 2367 return NULL; 2368 } 2369 } 2370 } 2371 return jobj; 2372 } 2373 2374 // Return true if this node points only to non-escaping allocations. 2375 bool PointsToNode::non_escaping_allocation() { 2376 if (is_JavaObject()) { 2377 Node* n = ideal_node(); 2378 if (n->is_Allocate() || n->is_CallStaticJava()) { 2379 return (escape_state() == PointsToNode::NoEscape); 2380 } else { 2381 return false; 2382 } 2383 } 2384 assert(is_LocalVar(), "sanity"); 2385 // Check all java objects it points to. 2386 for (EdgeIterator i(this); i.has_next(); i.next()) { 2387 PointsToNode* e = i.get(); 2388 if (e->is_JavaObject()) { 2389 Node* n = e->ideal_node(); 2390 if ((e->escape_state() != PointsToNode::NoEscape) || 2391 !(n->is_Allocate() || n->is_CallStaticJava())) { 2392 return false; 2393 } 2394 } 2395 } 2396 return true; 2397 } 2398 2399 // Return true if we know the node does not escape globally. 2400 bool ConnectionGraph::not_global_escape(Node *n) { 2401 assert(!_collecting, "should not call during graph construction"); 2402 // If the node was created after the escape computation we can't answer. 2403 uint idx = n->_idx; 2404 if (idx >= nodes_size()) { 2405 return false; 2406 } 2407 PointsToNode* ptn = ptnode_adr(idx); 2408 if (ptn == NULL) { 2409 return false; // not in congraph (e.g. ConI) 2410 } 2411 PointsToNode::EscapeState es = ptn->escape_state(); 2412 // If we have already computed a value, return it. 2413 if (es >= PointsToNode::GlobalEscape) { 2414 return false; 2415 } 2416 if (ptn->is_JavaObject()) { 2417 return true; // (es < PointsToNode::GlobalEscape); 2418 } 2419 assert(ptn->is_LocalVar(), "sanity"); 2420 // Check all java objects it points to. 2421 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2422 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2423 return false; 2424 } 2425 } 2426 return true; 2427 } 2428 2429 2430 // Helper functions 2431 2432 // Return true if this node points to specified node or nodes it points to. 2433 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2434 if (is_JavaObject()) { 2435 return (this == ptn); 2436 } 2437 assert(is_LocalVar() || is_Field(), "sanity"); 2438 for (EdgeIterator i(this); i.has_next(); i.next()) { 2439 if (i.get() == ptn) { 2440 return true; 2441 } 2442 } 2443 return false; 2444 } 2445 2446 // Return true if one node points to an other. 2447 bool PointsToNode::meet(PointsToNode* ptn) { 2448 if (this == ptn) { 2449 return true; 2450 } else if (ptn->is_JavaObject()) { 2451 return this->points_to(ptn->as_JavaObject()); 2452 } else if (this->is_JavaObject()) { 2453 return ptn->points_to(this->as_JavaObject()); 2454 } 2455 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2456 int ptn_count = ptn->edge_count(); 2457 for (EdgeIterator i(this); i.has_next(); i.next()) { 2458 PointsToNode* this_e = i.get(); 2459 for (int j = 0; j < ptn_count; j++) { 2460 if (this_e == ptn->edge(j)) { 2461 return true; 2462 } 2463 } 2464 } 2465 return false; 2466 } 2467 2468 #ifdef ASSERT 2469 // Return true if bases point to this java object. 2470 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2471 for (BaseIterator i(this); i.has_next(); i.next()) { 2472 if (i.get() == jobj) { 2473 return true; 2474 } 2475 } 2476 return false; 2477 } 2478 #endif 2479 2480 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2481 // Handle simple case first. 2482 assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access"); 2483 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2484 return true; 2485 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2486 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2487 Node* addp_use = addp->fast_out(i); 2488 if (addp_use->is_Store()) { 2489 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2490 if (addp_use->fast_out(j)->is_Initialize()) { 2491 return true; 2492 } 2493 } 2494 } 2495 } 2496 } 2497 return false; 2498 } 2499 2500 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2501 const Type *adr_type = phase->type(adr); 2502 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) { 2503 // We are computing a raw address for a store captured by an Initialize 2504 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2505 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2506 assert(offs != Type::OffsetBot || 2507 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2508 "offset must be a constant or it is initialization of array"); 2509 return offs; 2510 } 2511 const TypePtr *t_ptr = adr_type->isa_ptr(); 2512 assert(t_ptr != NULL, "must be a pointer type"); 2513 return t_ptr->offset(); 2514 } 2515 2516 Node* ConnectionGraph::get_addp_base(Node *addp) { 2517 assert(addp->is_AddP(), "must be AddP"); 2518 // 2519 // AddP cases for Base and Address inputs: 2520 // case #1. Direct object's field reference: 2521 // Allocate 2522 // | 2523 // Proj #5 ( oop result ) 2524 // | 2525 // CheckCastPP (cast to instance type) 2526 // | | 2527 // AddP ( base == address ) 2528 // 2529 // case #2. Indirect object's field reference: 2530 // Phi 2531 // | 2532 // CastPP (cast to instance type) 2533 // | | 2534 // AddP ( base == address ) 2535 // 2536 // case #3. Raw object's field reference for Initialize node: 2537 // Allocate 2538 // | 2539 // Proj #5 ( oop result ) 2540 // top | 2541 // \ | 2542 // AddP ( base == top ) 2543 // 2544 // case #4. Array's element reference: 2545 // {CheckCastPP | CastPP} 2546 // | | | 2547 // | AddP ( array's element offset ) 2548 // | | 2549 // AddP ( array's offset ) 2550 // 2551 // case #5. Raw object's field reference for arraycopy stub call: 2552 // The inline_native_clone() case when the arraycopy stub is called 2553 // after the allocation before Initialize and CheckCastPP nodes. 2554 // Allocate 2555 // | 2556 // Proj #5 ( oop result ) 2557 // | | 2558 // AddP ( base == address ) 2559 // 2560 // case #6. Constant Pool, ThreadLocal, CastX2P or 2561 // Raw object's field reference: 2562 // {ConP, ThreadLocal, CastX2P, raw Load} 2563 // top | 2564 // \ | 2565 // AddP ( base == top ) 2566 // 2567 // case #7. Klass's field reference. 2568 // LoadKlass 2569 // | | 2570 // AddP ( base == address ) 2571 // 2572 // case #8. narrow Klass's field reference. 2573 // LoadNKlass 2574 // | 2575 // DecodeN 2576 // | | 2577 // AddP ( base == address ) 2578 // 2579 // case #9. Mixed unsafe access 2580 // {instance} 2581 // | 2582 // CheckCastPP (raw) 2583 // top | 2584 // \ | 2585 // AddP ( base == top ) 2586 // 2587 Node *base = addp->in(AddPNode::Base); 2588 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2589 base = addp->in(AddPNode::Address); 2590 while (base->is_AddP()) { 2591 // Case #6 (unsafe access) may have several chained AddP nodes. 2592 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2593 base = base->in(AddPNode::Address); 2594 } 2595 if (base->Opcode() == Op_CheckCastPP && 2596 base->bottom_type()->isa_rawptr() && 2597 _igvn->type(base->in(1))->isa_oopptr()) { 2598 base = base->in(1); // Case #9 2599 } else { 2600 Node* uncast_base = base->uncast(); 2601 int opcode = uncast_base->Opcode(); 2602 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2603 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2604 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2605 is_captured_store_address(addp), "sanity"); 2606 } 2607 } 2608 return base; 2609 } 2610 2611 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2612 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2613 Node* addp2 = addp->raw_out(0); 2614 if (addp->outcnt() == 1 && addp2->is_AddP() && 2615 addp2->in(AddPNode::Base) == n && 2616 addp2->in(AddPNode::Address) == addp) { 2617 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2618 // 2619 // Find array's offset to push it on worklist first and 2620 // as result process an array's element offset first (pushed second) 2621 // to avoid CastPP for the array's offset. 2622 // Otherwise the inserted CastPP (LocalVar) will point to what 2623 // the AddP (Field) points to. Which would be wrong since 2624 // the algorithm expects the CastPP has the same point as 2625 // as AddP's base CheckCastPP (LocalVar). 2626 // 2627 // ArrayAllocation 2628 // | 2629 // CheckCastPP 2630 // | 2631 // memProj (from ArrayAllocation CheckCastPP) 2632 // | || 2633 // | || Int (element index) 2634 // | || | ConI (log(element size)) 2635 // | || | / 2636 // | || LShift 2637 // | || / 2638 // | AddP (array's element offset) 2639 // | | 2640 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2641 // | / / 2642 // AddP (array's offset) 2643 // | 2644 // Load/Store (memory operation on array's element) 2645 // 2646 return addp2; 2647 } 2648 return NULL; 2649 } 2650 2651 // 2652 // Adjust the type and inputs of an AddP which computes the 2653 // address of a field of an instance 2654 // 2655 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2656 PhaseGVN* igvn = _igvn; 2657 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2658 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2659 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2660 if (t == NULL) { 2661 // We are computing a raw address for a store captured by an Initialize 2662 // compute an appropriate address type (cases #3 and #5). 2663 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2664 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2665 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2666 assert(offs != Type::OffsetBot, "offset must be a constant"); 2667 t = base_t->add_offset(offs)->is_oopptr(); 2668 } 2669 int inst_id = base_t->instance_id(); 2670 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2671 "old type must be non-instance or match new type"); 2672 2673 // The type 't' could be subclass of 'base_t'. 2674 // As result t->offset() could be large then base_t's size and it will 2675 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2676 // constructor verifies correctness of the offset. 2677 // 2678 // It could happened on subclass's branch (from the type profiling 2679 // inlining) which was not eliminated during parsing since the exactness 2680 // of the allocation type was not propagated to the subclass type check. 2681 // 2682 // Or the type 't' could be not related to 'base_t' at all. 2683 // It could happened when CHA type is different from MDO type on a dead path 2684 // (for example, from instanceof check) which is not collapsed during parsing. 2685 // 2686 // Do nothing for such AddP node and don't process its users since 2687 // this code branch will go away. 2688 // 2689 if (!t->is_known_instance() && 2690 !base_t->maybe_java_subtype_of(t)) { 2691 return false; // bail out 2692 } 2693 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2694 // Do NOT remove the next line: ensure a new alias index is allocated 2695 // for the instance type. Note: C++ will not remove it since the call 2696 // has side effect. 2697 int alias_idx = _compile->get_alias_index(tinst); 2698 igvn->set_type(addp, tinst); 2699 // record the allocation in the node map 2700 set_map(addp, get_map(base->_idx)); 2701 // Set addp's Base and Address to 'base'. 2702 Node *abase = addp->in(AddPNode::Base); 2703 Node *adr = addp->in(AddPNode::Address); 2704 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2705 adr->in(0)->_idx == (uint)inst_id) { 2706 // Skip AddP cases #3 and #5. 2707 } else { 2708 assert(!abase->is_top(), "sanity"); // AddP case #3 2709 if (abase != base) { 2710 igvn->hash_delete(addp); 2711 addp->set_req(AddPNode::Base, base); 2712 if (abase == adr) { 2713 addp->set_req(AddPNode::Address, base); 2714 } else { 2715 // AddP case #4 (adr is array's element offset AddP node) 2716 #ifdef ASSERT 2717 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2718 assert(adr->is_AddP() && atype != NULL && 2719 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2720 #endif 2721 } 2722 igvn->hash_insert(addp); 2723 } 2724 } 2725 // Put on IGVN worklist since at least addp's type was changed above. 2726 record_for_optimizer(addp); 2727 return true; 2728 } 2729 2730 // 2731 // Create a new version of orig_phi if necessary. Returns either the newly 2732 // created phi or an existing phi. Sets create_new to indicate whether a new 2733 // phi was created. Cache the last newly created phi in the node map. 2734 // 2735 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2736 Compile *C = _compile; 2737 PhaseGVN* igvn = _igvn; 2738 new_created = false; 2739 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2740 // nothing to do if orig_phi is bottom memory or matches alias_idx 2741 if (phi_alias_idx == alias_idx) { 2742 return orig_phi; 2743 } 2744 // Have we recently created a Phi for this alias index? 2745 PhiNode *result = get_map_phi(orig_phi->_idx); 2746 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2747 return result; 2748 } 2749 // Previous check may fail when the same wide memory Phi was split into Phis 2750 // for different memory slices. Search all Phis for this region. 2751 if (result != NULL) { 2752 Node* region = orig_phi->in(0); 2753 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2754 Node* phi = region->fast_out(i); 2755 if (phi->is_Phi() && 2756 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2757 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2758 return phi->as_Phi(); 2759 } 2760 } 2761 } 2762 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2763 if (C->do_escape_analysis() == true && !C->failing()) { 2764 // Retry compilation without escape analysis. 2765 // If this is the first failure, the sentinel string will "stick" 2766 // to the Compile object, and the C2Compiler will see it and retry. 2767 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 2768 } 2769 return NULL; 2770 } 2771 orig_phi_worklist.append_if_missing(orig_phi); 2772 const TypePtr *atype = C->get_adr_type(alias_idx); 2773 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2774 C->copy_node_notes_to(result, orig_phi); 2775 igvn->set_type(result, result->bottom_type()); 2776 record_for_optimizer(result); 2777 set_map(orig_phi, result); 2778 new_created = true; 2779 return result; 2780 } 2781 2782 // 2783 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2784 // specified alias index. 2785 // 2786 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2787 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2788 Compile *C = _compile; 2789 PhaseGVN* igvn = _igvn; 2790 bool new_phi_created; 2791 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2792 if (!new_phi_created) { 2793 return result; 2794 } 2795 GrowableArray<PhiNode *> phi_list; 2796 GrowableArray<uint> cur_input; 2797 PhiNode *phi = orig_phi; 2798 uint idx = 1; 2799 bool finished = false; 2800 while(!finished) { 2801 while (idx < phi->req()) { 2802 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2803 if (mem != NULL && mem->is_Phi()) { 2804 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2805 if (new_phi_created) { 2806 // found an phi for which we created a new split, push current one on worklist and begin 2807 // processing new one 2808 phi_list.push(phi); 2809 cur_input.push(idx); 2810 phi = mem->as_Phi(); 2811 result = newphi; 2812 idx = 1; 2813 continue; 2814 } else { 2815 mem = newphi; 2816 } 2817 } 2818 if (C->failing()) { 2819 return NULL; 2820 } 2821 result->set_req(idx++, mem); 2822 } 2823 #ifdef ASSERT 2824 // verify that the new Phi has an input for each input of the original 2825 assert( phi->req() == result->req(), "must have same number of inputs."); 2826 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2827 #endif 2828 // Check if all new phi's inputs have specified alias index. 2829 // Otherwise use old phi. 2830 for (uint i = 1; i < phi->req(); i++) { 2831 Node* in = result->in(i); 2832 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2833 } 2834 // we have finished processing a Phi, see if there are any more to do 2835 finished = (phi_list.length() == 0 ); 2836 if (!finished) { 2837 phi = phi_list.pop(); 2838 idx = cur_input.pop(); 2839 PhiNode *prev_result = get_map_phi(phi->_idx); 2840 prev_result->set_req(idx++, result); 2841 result = prev_result; 2842 } 2843 } 2844 return result; 2845 } 2846 2847 // 2848 // The next methods are derived from methods in MemNode. 2849 // 2850 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2851 Node *mem = mmem; 2852 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2853 // means an array I have not precisely typed yet. Do not do any 2854 // alias stuff with it any time soon. 2855 if (toop->base() != Type::AnyPtr && 2856 !(toop->isa_instptr() && 2857 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 2858 toop->offset() == Type::OffsetBot)) { 2859 mem = mmem->memory_at(alias_idx); 2860 // Update input if it is progress over what we have now 2861 } 2862 return mem; 2863 } 2864 2865 // 2866 // Move memory users to their memory slices. 2867 // 2868 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2869 Compile* C = _compile; 2870 PhaseGVN* igvn = _igvn; 2871 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2872 assert(tp != NULL, "ptr type"); 2873 int alias_idx = C->get_alias_index(tp); 2874 int general_idx = C->get_general_index(alias_idx); 2875 2876 // Move users first 2877 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2878 Node* use = n->fast_out(i); 2879 if (use->is_MergeMem()) { 2880 MergeMemNode* mmem = use->as_MergeMem(); 2881 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2882 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2883 continue; // Nothing to do 2884 } 2885 // Replace previous general reference to mem node. 2886 uint orig_uniq = C->unique(); 2887 Node* m = find_inst_mem(n, general_idx, orig_phis); 2888 assert(orig_uniq == C->unique(), "no new nodes"); 2889 mmem->set_memory_at(general_idx, m); 2890 --imax; 2891 --i; 2892 } else if (use->is_MemBar()) { 2893 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2894 if (use->req() > MemBarNode::Precedent && 2895 use->in(MemBarNode::Precedent) == n) { 2896 // Don't move related membars. 2897 record_for_optimizer(use); 2898 continue; 2899 } 2900 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2901 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2902 alias_idx == general_idx) { 2903 continue; // Nothing to do 2904 } 2905 // Move to general memory slice. 2906 uint orig_uniq = C->unique(); 2907 Node* m = find_inst_mem(n, general_idx, orig_phis); 2908 assert(orig_uniq == C->unique(), "no new nodes"); 2909 igvn->hash_delete(use); 2910 imax -= use->replace_edge(n, m, igvn); 2911 igvn->hash_insert(use); 2912 record_for_optimizer(use); 2913 --i; 2914 #ifdef ASSERT 2915 } else if (use->is_Mem()) { 2916 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2917 // Don't move related cardmark. 2918 continue; 2919 } 2920 // Memory nodes should have new memory input. 2921 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2922 assert(tp != NULL, "ptr type"); 2923 int idx = C->get_alias_index(tp); 2924 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2925 "Following memory nodes should have new memory input or be on the same memory slice"); 2926 } else if (use->is_Phi()) { 2927 // Phi nodes should be split and moved already. 2928 tp = use->as_Phi()->adr_type()->isa_ptr(); 2929 assert(tp != NULL, "ptr type"); 2930 int idx = C->get_alias_index(tp); 2931 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2932 } else { 2933 use->dump(); 2934 assert(false, "should not be here"); 2935 #endif 2936 } 2937 } 2938 } 2939 2940 // 2941 // Search memory chain of "mem" to find a MemNode whose address 2942 // is the specified alias index. 2943 // 2944 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2945 if (orig_mem == NULL) { 2946 return orig_mem; 2947 } 2948 Compile* C = _compile; 2949 PhaseGVN* igvn = _igvn; 2950 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2951 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2952 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2953 Node *prev = NULL; 2954 Node *result = orig_mem; 2955 while (prev != result) { 2956 prev = result; 2957 if (result == start_mem) { 2958 break; // hit one of our sentinels 2959 } 2960 if (result->is_Mem()) { 2961 const Type *at = igvn->type(result->in(MemNode::Address)); 2962 if (at == Type::TOP) { 2963 break; // Dead 2964 } 2965 assert (at->isa_ptr() != NULL, "pointer type required."); 2966 int idx = C->get_alias_index(at->is_ptr()); 2967 if (idx == alias_idx) { 2968 break; // Found 2969 } 2970 if (!is_instance && (at->isa_oopptr() == NULL || 2971 !at->is_oopptr()->is_known_instance())) { 2972 break; // Do not skip store to general memory slice. 2973 } 2974 result = result->in(MemNode::Memory); 2975 } 2976 if (!is_instance) { 2977 continue; // don't search further for non-instance types 2978 } 2979 // skip over a call which does not affect this memory slice 2980 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2981 Node *proj_in = result->in(0); 2982 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2983 break; // hit one of our sentinels 2984 } else if (proj_in->is_Call()) { 2985 // ArrayCopy node processed here as well 2986 CallNode *call = proj_in->as_Call(); 2987 if (!call->may_modify(toop, igvn)) { 2988 result = call->in(TypeFunc::Memory); 2989 } 2990 } else if (proj_in->is_Initialize()) { 2991 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2992 // Stop if this is the initialization for the object instance which 2993 // which contains this memory slice, otherwise skip over it. 2994 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2995 result = proj_in->in(TypeFunc::Memory); 2996 } 2997 } else if (proj_in->is_MemBar()) { 2998 // Check if there is an array copy for a clone 2999 // Step over GC barrier when ReduceInitialCardMarks is disabled 3000 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3001 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3002 3003 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3004 // Stop if it is a clone 3005 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3006 if (ac->may_modify(toop, igvn)) { 3007 break; 3008 } 3009 } 3010 result = proj_in->in(TypeFunc::Memory); 3011 } 3012 } else if (result->is_MergeMem()) { 3013 MergeMemNode *mmem = result->as_MergeMem(); 3014 result = step_through_mergemem(mmem, alias_idx, toop); 3015 if (result == mmem->base_memory()) { 3016 // Didn't find instance memory, search through general slice recursively. 3017 result = mmem->memory_at(C->get_general_index(alias_idx)); 3018 result = find_inst_mem(result, alias_idx, orig_phis); 3019 if (C->failing()) { 3020 return NULL; 3021 } 3022 mmem->set_memory_at(alias_idx, result); 3023 } 3024 } else if (result->is_Phi() && 3025 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3026 Node *un = result->as_Phi()->unique_input(igvn); 3027 if (un != NULL) { 3028 orig_phis.append_if_missing(result->as_Phi()); 3029 result = un; 3030 } else { 3031 break; 3032 } 3033 } else if (result->is_ClearArray()) { 3034 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3035 // Can not bypass initialization of the instance 3036 // we are looking for. 3037 break; 3038 } 3039 // Otherwise skip it (the call updated 'result' value). 3040 } else if (result->Opcode() == Op_SCMemProj) { 3041 Node* mem = result->in(0); 3042 Node* adr = NULL; 3043 if (mem->is_LoadStore()) { 3044 adr = mem->in(MemNode::Address); 3045 } else { 3046 assert(mem->Opcode() == Op_EncodeISOArray || 3047 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3048 adr = mem->in(3); // Memory edge corresponds to destination array 3049 } 3050 const Type *at = igvn->type(adr); 3051 if (at != Type::TOP) { 3052 assert(at->isa_ptr() != NULL, "pointer type required."); 3053 int idx = C->get_alias_index(at->is_ptr()); 3054 if (idx == alias_idx) { 3055 // Assert in debug mode 3056 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3057 break; // In product mode return SCMemProj node 3058 } 3059 } 3060 result = mem->in(MemNode::Memory); 3061 } else if (result->Opcode() == Op_StrInflatedCopy) { 3062 Node* adr = result->in(3); // Memory edge corresponds to destination array 3063 const Type *at = igvn->type(adr); 3064 if (at != Type::TOP) { 3065 assert(at->isa_ptr() != NULL, "pointer type required."); 3066 int idx = C->get_alias_index(at->is_ptr()); 3067 if (idx == alias_idx) { 3068 // Assert in debug mode 3069 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3070 break; // In product mode return SCMemProj node 3071 } 3072 } 3073 result = result->in(MemNode::Memory); 3074 } 3075 } 3076 if (result->is_Phi()) { 3077 PhiNode *mphi = result->as_Phi(); 3078 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3079 const TypePtr *t = mphi->adr_type(); 3080 if (!is_instance) { 3081 // Push all non-instance Phis on the orig_phis worklist to update inputs 3082 // during Phase 4 if needed. 3083 orig_phis.append_if_missing(mphi); 3084 } else if (C->get_alias_index(t) != alias_idx) { 3085 // Create a new Phi with the specified alias index type. 3086 result = split_memory_phi(mphi, alias_idx, orig_phis); 3087 } 3088 } 3089 // the result is either MemNode, PhiNode, InitializeNode. 3090 return result; 3091 } 3092 3093 // 3094 // Convert the types of non-escaped object to instance types where possible, 3095 // propagate the new type information through the graph, and update memory 3096 // edges and MergeMem inputs to reflect the new type. 3097 // 3098 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3099 // The processing is done in 4 phases: 3100 // 3101 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3102 // types for the CheckCastPP for allocations where possible. 3103 // Propagate the new types through users as follows: 3104 // casts and Phi: push users on alloc_worklist 3105 // AddP: cast Base and Address inputs to the instance type 3106 // push any AddP users on alloc_worklist and push any memnode 3107 // users onto memnode_worklist. 3108 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3109 // search the Memory chain for a store with the appropriate type 3110 // address type. If a Phi is found, create a new version with 3111 // the appropriate memory slices from each of the Phi inputs. 3112 // For stores, process the users as follows: 3113 // MemNode: push on memnode_worklist 3114 // MergeMem: push on mergemem_worklist 3115 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3116 // moving the first node encountered of each instance type to the 3117 // the input corresponding to its alias index. 3118 // appropriate memory slice. 3119 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3120 // 3121 // In the following example, the CheckCastPP nodes are the cast of allocation 3122 // results and the allocation of node 29 is non-escaped and eligible to be an 3123 // instance type. 3124 // 3125 // We start with: 3126 // 3127 // 7 Parm #memory 3128 // 10 ConI "12" 3129 // 19 CheckCastPP "Foo" 3130 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3131 // 29 CheckCastPP "Foo" 3132 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3133 // 3134 // 40 StoreP 25 7 20 ... alias_index=4 3135 // 50 StoreP 35 40 30 ... alias_index=4 3136 // 60 StoreP 45 50 20 ... alias_index=4 3137 // 70 LoadP _ 60 30 ... alias_index=4 3138 // 80 Phi 75 50 60 Memory alias_index=4 3139 // 90 LoadP _ 80 30 ... alias_index=4 3140 // 100 LoadP _ 80 20 ... alias_index=4 3141 // 3142 // 3143 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3144 // and creating a new alias index for node 30. This gives: 3145 // 3146 // 7 Parm #memory 3147 // 10 ConI "12" 3148 // 19 CheckCastPP "Foo" 3149 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3150 // 29 CheckCastPP "Foo" iid=24 3151 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3152 // 3153 // 40 StoreP 25 7 20 ... alias_index=4 3154 // 50 StoreP 35 40 30 ... alias_index=6 3155 // 60 StoreP 45 50 20 ... alias_index=4 3156 // 70 LoadP _ 60 30 ... alias_index=6 3157 // 80 Phi 75 50 60 Memory alias_index=4 3158 // 90 LoadP _ 80 30 ... alias_index=6 3159 // 100 LoadP _ 80 20 ... alias_index=4 3160 // 3161 // In phase 2, new memory inputs are computed for the loads and stores, 3162 // And a new version of the phi is created. In phase 4, the inputs to 3163 // node 80 are updated and then the memory nodes are updated with the 3164 // values computed in phase 2. This results in: 3165 // 3166 // 7 Parm #memory 3167 // 10 ConI "12" 3168 // 19 CheckCastPP "Foo" 3169 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3170 // 29 CheckCastPP "Foo" iid=24 3171 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3172 // 3173 // 40 StoreP 25 7 20 ... alias_index=4 3174 // 50 StoreP 35 7 30 ... alias_index=6 3175 // 60 StoreP 45 40 20 ... alias_index=4 3176 // 70 LoadP _ 50 30 ... alias_index=6 3177 // 80 Phi 75 40 60 Memory alias_index=4 3178 // 120 Phi 75 50 50 Memory alias_index=6 3179 // 90 LoadP _ 120 30 ... alias_index=6 3180 // 100 LoadP _ 80 20 ... alias_index=4 3181 // 3182 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3183 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3184 GrowableArray<MergeMemNode*> &mergemem_worklist) { 3185 GrowableArray<Node *> memnode_worklist; 3186 GrowableArray<PhiNode *> orig_phis; 3187 PhaseIterGVN *igvn = _igvn; 3188 uint new_index_start = (uint) _compile->num_alias_types(); 3189 VectorSet visited; 3190 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3191 uint unique_old = _compile->unique(); 3192 3193 // Phase 1: Process possible allocations from alloc_worklist. 3194 // Create instance types for the CheckCastPP for allocations where possible. 3195 // 3196 // (Note: don't forget to change the order of the second AddP node on 3197 // the alloc_worklist if the order of the worklist processing is changed, 3198 // see the comment in find_second_addp().) 3199 // 3200 while (alloc_worklist.length() != 0) { 3201 Node *n = alloc_worklist.pop(); 3202 uint ni = n->_idx; 3203 if (n->is_Call()) { 3204 CallNode *alloc = n->as_Call(); 3205 // copy escape information to call node 3206 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3207 PointsToNode::EscapeState es = ptn->escape_state(); 3208 // We have an allocation or call which returns a Java object, 3209 // see if it is non-escaped. 3210 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3211 continue; 3212 } 3213 // Find CheckCastPP for the allocate or for the return value of a call 3214 n = alloc->result_cast(); 3215 if (n == NULL) { // No uses except Initialize node 3216 if (alloc->is_Allocate()) { 3217 // Set the scalar_replaceable flag for allocation 3218 // so it could be eliminated if it has no uses. 3219 alloc->as_Allocate()->_is_scalar_replaceable = true; 3220 } 3221 continue; 3222 } 3223 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3224 // we could reach here for allocate case if one init is associated with many allocs. 3225 if (alloc->is_Allocate()) { 3226 alloc->as_Allocate()->_is_scalar_replaceable = false; 3227 } 3228 continue; 3229 } 3230 3231 // The inline code for Object.clone() casts the allocation result to 3232 // java.lang.Object and then to the actual type of the allocated 3233 // object. Detect this case and use the second cast. 3234 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3235 // the allocation result is cast to java.lang.Object and then 3236 // to the actual Array type. 3237 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3238 && (alloc->is_AllocateArray() || 3239 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3240 Node *cast2 = NULL; 3241 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3242 Node *use = n->fast_out(i); 3243 if (use->is_CheckCastPP()) { 3244 cast2 = use; 3245 break; 3246 } 3247 } 3248 if (cast2 != NULL) { 3249 n = cast2; 3250 } else { 3251 // Non-scalar replaceable if the allocation type is unknown statically 3252 // (reflection allocation), the object can't be restored during 3253 // deoptimization without precise type. 3254 continue; 3255 } 3256 } 3257 3258 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3259 if (t == NULL) { 3260 continue; // not a TypeOopPtr 3261 } 3262 if (!t->klass_is_exact()) { 3263 continue; // not an unique type 3264 } 3265 if (alloc->is_Allocate()) { 3266 // Set the scalar_replaceable flag for allocation 3267 // so it could be eliminated. 3268 alloc->as_Allocate()->_is_scalar_replaceable = true; 3269 } 3270 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3271 // in order for an object to be scalar-replaceable, it must be: 3272 // - a direct allocation (not a call returning an object) 3273 // - non-escaping 3274 // - eligible to be a unique type 3275 // - not determined to be ineligible by escape analysis 3276 set_map(alloc, n); 3277 set_map(n, alloc); 3278 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3279 igvn->hash_delete(n); 3280 igvn->set_type(n, tinst); 3281 n->raise_bottom_type(tinst); 3282 igvn->hash_insert(n); 3283 record_for_optimizer(n); 3284 // Allocate an alias index for the header fields. Accesses to 3285 // the header emitted during macro expansion wouldn't have 3286 // correct memory state otherwise. 3287 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3288 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3289 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3290 3291 // First, put on the worklist all Field edges from Connection Graph 3292 // which is more accurate than putting immediate users from Ideal Graph. 3293 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3294 PointsToNode* tgt = e.get(); 3295 if (tgt->is_Arraycopy()) { 3296 continue; 3297 } 3298 Node* use = tgt->ideal_node(); 3299 assert(tgt->is_Field() && use->is_AddP(), 3300 "only AddP nodes are Field edges in CG"); 3301 if (use->outcnt() > 0) { // Don't process dead nodes 3302 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3303 if (addp2 != NULL) { 3304 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3305 alloc_worklist.append_if_missing(addp2); 3306 } 3307 alloc_worklist.append_if_missing(use); 3308 } 3309 } 3310 3311 // An allocation may have an Initialize which has raw stores. Scan 3312 // the users of the raw allocation result and push AddP users 3313 // on alloc_worklist. 3314 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3315 assert (raw_result != NULL, "must have an allocation result"); 3316 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3317 Node *use = raw_result->fast_out(i); 3318 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3319 Node* addp2 = find_second_addp(use, raw_result); 3320 if (addp2 != NULL) { 3321 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3322 alloc_worklist.append_if_missing(addp2); 3323 } 3324 alloc_worklist.append_if_missing(use); 3325 } else if (use->is_MemBar()) { 3326 memnode_worklist.append_if_missing(use); 3327 } 3328 } 3329 } 3330 } else if (n->is_AddP()) { 3331 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3332 if (jobj == NULL || jobj == phantom_obj) { 3333 #ifdef ASSERT 3334 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3335 ptnode_adr(n->_idx)->dump(); 3336 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3337 #endif 3338 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3339 return; 3340 } 3341 Node *base = get_map(jobj->idx()); // CheckCastPP node 3342 if (!split_AddP(n, base)) continue; // wrong type from dead path 3343 } else if (n->is_Phi() || 3344 n->is_CheckCastPP() || 3345 n->is_EncodeP() || 3346 n->is_DecodeN() || 3347 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3348 if (visited.test_set(n->_idx)) { 3349 assert(n->is_Phi(), "loops only through Phi's"); 3350 continue; // already processed 3351 } 3352 JavaObjectNode* jobj = unique_java_object(n); 3353 if (jobj == NULL || jobj == phantom_obj) { 3354 #ifdef ASSERT 3355 ptnode_adr(n->_idx)->dump(); 3356 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3357 #endif 3358 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3359 return; 3360 } else { 3361 Node *val = get_map(jobj->idx()); // CheckCastPP node 3362 TypeNode *tn = n->as_Type(); 3363 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3364 assert(tinst != NULL && tinst->is_known_instance() && 3365 tinst->instance_id() == jobj->idx() , "instance type expected."); 3366 3367 const Type *tn_type = igvn->type(tn); 3368 const TypeOopPtr *tn_t; 3369 if (tn_type->isa_narrowoop()) { 3370 tn_t = tn_type->make_ptr()->isa_oopptr(); 3371 } else { 3372 tn_t = tn_type->isa_oopptr(); 3373 } 3374 if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) { 3375 if (tn_type->isa_narrowoop()) { 3376 tn_type = tinst->make_narrowoop(); 3377 } else { 3378 tn_type = tinst; 3379 } 3380 igvn->hash_delete(tn); 3381 igvn->set_type(tn, tn_type); 3382 tn->set_type(tn_type); 3383 igvn->hash_insert(tn); 3384 record_for_optimizer(n); 3385 } else { 3386 assert(tn_type == TypePtr::NULL_PTR || 3387 tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t), 3388 "unexpected type"); 3389 continue; // Skip dead path with different type 3390 } 3391 } 3392 } else { 3393 debug_only(n->dump();) 3394 assert(false, "EA: unexpected node"); 3395 continue; 3396 } 3397 // push allocation's users on appropriate worklist 3398 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3399 Node *use = n->fast_out(i); 3400 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3401 // Load/store to instance's field 3402 memnode_worklist.append_if_missing(use); 3403 } else if (use->is_MemBar()) { 3404 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3405 memnode_worklist.append_if_missing(use); 3406 } 3407 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3408 Node* addp2 = find_second_addp(use, n); 3409 if (addp2 != NULL) { 3410 alloc_worklist.append_if_missing(addp2); 3411 } 3412 alloc_worklist.append_if_missing(use); 3413 } else if (use->is_Phi() || 3414 use->is_CheckCastPP() || 3415 use->is_EncodeNarrowPtr() || 3416 use->is_DecodeNarrowPtr() || 3417 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3418 alloc_worklist.append_if_missing(use); 3419 #ifdef ASSERT 3420 } else if (use->is_Mem()) { 3421 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3422 } else if (use->is_MergeMem()) { 3423 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3424 } else if (use->is_SafePoint()) { 3425 // Look for MergeMem nodes for calls which reference unique allocation 3426 // (through CheckCastPP nodes) even for debug info. 3427 Node* m = use->in(TypeFunc::Memory); 3428 if (m->is_MergeMem()) { 3429 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3430 } 3431 } else if (use->Opcode() == Op_EncodeISOArray) { 3432 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3433 // EncodeISOArray overwrites destination array 3434 memnode_worklist.append_if_missing(use); 3435 } 3436 } else { 3437 uint op = use->Opcode(); 3438 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3439 (use->in(MemNode::Memory) == n)) { 3440 // They overwrite memory edge corresponding to destination array, 3441 memnode_worklist.append_if_missing(use); 3442 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3443 op == Op_CastP2X || op == Op_StoreCM || 3444 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 3445 op == Op_CountPositives || 3446 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3447 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3448 op == Op_SubTypeCheck || 3449 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3450 n->dump(); 3451 use->dump(); 3452 assert(false, "EA: missing allocation reference path"); 3453 } 3454 #endif 3455 } 3456 } 3457 3458 } 3459 3460 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3461 // type, record it in the ArrayCopy node so we know what memory this 3462 // node uses/modified. 3463 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3464 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3465 Node* dest = ac->in(ArrayCopyNode::Dest); 3466 if (dest->is_AddP()) { 3467 dest = get_addp_base(dest); 3468 } 3469 JavaObjectNode* jobj = unique_java_object(dest); 3470 if (jobj != NULL) { 3471 Node *base = get_map(jobj->idx()); 3472 if (base != NULL) { 3473 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3474 ac->_dest_type = base_t; 3475 } 3476 } 3477 Node* src = ac->in(ArrayCopyNode::Src); 3478 if (src->is_AddP()) { 3479 src = get_addp_base(src); 3480 } 3481 jobj = unique_java_object(src); 3482 if (jobj != NULL) { 3483 Node* base = get_map(jobj->idx()); 3484 if (base != NULL) { 3485 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3486 ac->_src_type = base_t; 3487 } 3488 } 3489 } 3490 3491 // New alias types were created in split_AddP(). 3492 uint new_index_end = (uint) _compile->num_alias_types(); 3493 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3494 3495 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3496 // compute new values for Memory inputs (the Memory inputs are not 3497 // actually updated until phase 4.) 3498 if (memnode_worklist.length() == 0) 3499 return; // nothing to do 3500 while (memnode_worklist.length() != 0) { 3501 Node *n = memnode_worklist.pop(); 3502 if (visited.test_set(n->_idx)) { 3503 continue; 3504 } 3505 if (n->is_Phi() || n->is_ClearArray()) { 3506 // we don't need to do anything, but the users must be pushed 3507 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3508 // we don't need to do anything, but the users must be pushed 3509 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3510 if (n == NULL) { 3511 continue; 3512 } 3513 } else if (n->Opcode() == Op_StrCompressedCopy || 3514 n->Opcode() == Op_EncodeISOArray) { 3515 // get the memory projection 3516 n = n->find_out_with(Op_SCMemProj); 3517 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3518 } else { 3519 assert(n->is_Mem(), "memory node required."); 3520 Node *addr = n->in(MemNode::Address); 3521 const Type *addr_t = igvn->type(addr); 3522 if (addr_t == Type::TOP) { 3523 continue; 3524 } 3525 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3526 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3527 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3528 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3529 if (_compile->failing()) { 3530 return; 3531 } 3532 if (mem != n->in(MemNode::Memory)) { 3533 // We delay the memory edge update since we need old one in 3534 // MergeMem code below when instances memory slices are separated. 3535 set_map(n, mem); 3536 } 3537 if (n->is_Load()) { 3538 continue; // don't push users 3539 } else if (n->is_LoadStore()) { 3540 // get the memory projection 3541 n = n->find_out_with(Op_SCMemProj); 3542 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3543 } 3544 } 3545 // push user on appropriate worklist 3546 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3547 Node *use = n->fast_out(i); 3548 if (use->is_Phi() || use->is_ClearArray()) { 3549 memnode_worklist.append_if_missing(use); 3550 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3551 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 3552 continue; 3553 } 3554 memnode_worklist.append_if_missing(use); 3555 } else if (use->is_MemBar()) { 3556 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3557 memnode_worklist.append_if_missing(use); 3558 } 3559 #ifdef ASSERT 3560 } else if(use->is_Mem()) { 3561 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3562 } else if (use->is_MergeMem()) { 3563 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3564 } else if (use->Opcode() == Op_EncodeISOArray) { 3565 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3566 // EncodeISOArray overwrites destination array 3567 memnode_worklist.append_if_missing(use); 3568 } 3569 } else { 3570 uint op = use->Opcode(); 3571 if ((use->in(MemNode::Memory) == n) && 3572 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3573 // They overwrite memory edge corresponding to destination array, 3574 memnode_worklist.append_if_missing(use); 3575 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3576 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 3577 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3578 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3579 n->dump(); 3580 use->dump(); 3581 assert(false, "EA: missing memory path"); 3582 } 3583 #endif 3584 } 3585 } 3586 } 3587 3588 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3589 // Walk each memory slice moving the first node encountered of each 3590 // instance type to the input corresponding to its alias index. 3591 uint length = mergemem_worklist.length(); 3592 for( uint next = 0; next < length; ++next ) { 3593 MergeMemNode* nmm = mergemem_worklist.at(next); 3594 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3595 // Note: we don't want to use MergeMemStream here because we only want to 3596 // scan inputs which exist at the start, not ones we add during processing. 3597 // Note 2: MergeMem may already contains instance memory slices added 3598 // during find_inst_mem() call when memory nodes were processed above. 3599 igvn->hash_delete(nmm); 3600 uint nslices = MIN2(nmm->req(), new_index_start); 3601 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3602 Node* mem = nmm->in(i); 3603 Node* cur = NULL; 3604 if (mem == NULL || mem->is_top()) { 3605 continue; 3606 } 3607 // First, update mergemem by moving memory nodes to corresponding slices 3608 // if their type became more precise since this mergemem was created. 3609 while (mem->is_Mem()) { 3610 const Type *at = igvn->type(mem->in(MemNode::Address)); 3611 if (at != Type::TOP) { 3612 assert (at->isa_ptr() != NULL, "pointer type required."); 3613 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3614 if (idx == i) { 3615 if (cur == NULL) { 3616 cur = mem; 3617 } 3618 } else { 3619 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3620 nmm->set_memory_at(idx, mem); 3621 } 3622 } 3623 } 3624 mem = mem->in(MemNode::Memory); 3625 } 3626 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3627 // Find any instance of the current type if we haven't encountered 3628 // already a memory slice of the instance along the memory chain. 3629 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3630 if((uint)_compile->get_general_index(ni) == i) { 3631 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3632 if (nmm->is_empty_memory(m)) { 3633 Node* result = find_inst_mem(mem, ni, orig_phis); 3634 if (_compile->failing()) { 3635 return; 3636 } 3637 nmm->set_memory_at(ni, result); 3638 } 3639 } 3640 } 3641 } 3642 // Find the rest of instances values 3643 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3644 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3645 Node* result = step_through_mergemem(nmm, ni, tinst); 3646 if (result == nmm->base_memory()) { 3647 // Didn't find instance memory, search through general slice recursively. 3648 result = nmm->memory_at(_compile->get_general_index(ni)); 3649 result = find_inst_mem(result, ni, orig_phis); 3650 if (_compile->failing()) { 3651 return; 3652 } 3653 nmm->set_memory_at(ni, result); 3654 } 3655 } 3656 igvn->hash_insert(nmm); 3657 record_for_optimizer(nmm); 3658 } 3659 3660 // Phase 4: Update the inputs of non-instance memory Phis and 3661 // the Memory input of memnodes 3662 // First update the inputs of any non-instance Phi's from 3663 // which we split out an instance Phi. Note we don't have 3664 // to recursively process Phi's encountered on the input memory 3665 // chains as is done in split_memory_phi() since they will 3666 // also be processed here. 3667 for (int j = 0; j < orig_phis.length(); j++) { 3668 PhiNode *phi = orig_phis.at(j); 3669 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3670 igvn->hash_delete(phi); 3671 for (uint i = 1; i < phi->req(); i++) { 3672 Node *mem = phi->in(i); 3673 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3674 if (_compile->failing()) { 3675 return; 3676 } 3677 if (mem != new_mem) { 3678 phi->set_req(i, new_mem); 3679 } 3680 } 3681 igvn->hash_insert(phi); 3682 record_for_optimizer(phi); 3683 } 3684 3685 // Update the memory inputs of MemNodes with the value we computed 3686 // in Phase 2 and move stores memory users to corresponding memory slices. 3687 // Disable memory split verification code until the fix for 6984348. 3688 // Currently it produces false negative results since it does not cover all cases. 3689 #if 0 // ifdef ASSERT 3690 visited.Reset(); 3691 Node_Stack old_mems(arena, _compile->unique() >> 2); 3692 #endif 3693 for (uint i = 0; i < ideal_nodes.size(); i++) { 3694 Node* n = ideal_nodes.at(i); 3695 Node* nmem = get_map(n->_idx); 3696 assert(nmem != NULL, "sanity"); 3697 if (n->is_Mem()) { 3698 #if 0 // ifdef ASSERT 3699 Node* old_mem = n->in(MemNode::Memory); 3700 if (!visited.test_set(old_mem->_idx)) { 3701 old_mems.push(old_mem, old_mem->outcnt()); 3702 } 3703 #endif 3704 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3705 if (!n->is_Load()) { 3706 // Move memory users of a store first. 3707 move_inst_mem(n, orig_phis); 3708 } 3709 // Now update memory input 3710 igvn->hash_delete(n); 3711 n->set_req(MemNode::Memory, nmem); 3712 igvn->hash_insert(n); 3713 record_for_optimizer(n); 3714 } else { 3715 assert(n->is_Allocate() || n->is_CheckCastPP() || 3716 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3717 } 3718 } 3719 #if 0 // ifdef ASSERT 3720 // Verify that memory was split correctly 3721 while (old_mems.is_nonempty()) { 3722 Node* old_mem = old_mems.node(); 3723 uint old_cnt = old_mems.index(); 3724 old_mems.pop(); 3725 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3726 } 3727 #endif 3728 } 3729 3730 #ifndef PRODUCT 3731 int ConnectionGraph::_no_escape_counter = 0; 3732 int ConnectionGraph::_arg_escape_counter = 0; 3733 int ConnectionGraph::_global_escape_counter = 0; 3734 3735 static const char *node_type_names[] = { 3736 "UnknownType", 3737 "JavaObject", 3738 "LocalVar", 3739 "Field", 3740 "Arraycopy" 3741 }; 3742 3743 static const char *esc_names[] = { 3744 "UnknownEscape", 3745 "NoEscape", 3746 "ArgEscape", 3747 "GlobalEscape" 3748 }; 3749 3750 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 3751 NodeType nt = node_type(); 3752 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 3753 if (print_state) { 3754 EscapeState es = escape_state(); 3755 EscapeState fields_es = fields_escape_state(); 3756 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3757 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 3758 out->print("NSR "); 3759 } 3760 } 3761 } 3762 3763 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 3764 dump_header(print_state, out); 3765 if (is_Field()) { 3766 FieldNode* f = (FieldNode*)this; 3767 if (f->is_oop()) { 3768 out->print("oop "); 3769 } 3770 if (f->offset() > 0) { 3771 out->print("+%d ", f->offset()); 3772 } 3773 out->print("("); 3774 for (BaseIterator i(f); i.has_next(); i.next()) { 3775 PointsToNode* b = i.get(); 3776 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3777 } 3778 out->print(" )"); 3779 } 3780 out->print("["); 3781 for (EdgeIterator i(this); i.has_next(); i.next()) { 3782 PointsToNode* e = i.get(); 3783 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3784 } 3785 out->print(" ["); 3786 for (UseIterator i(this); i.has_next(); i.next()) { 3787 PointsToNode* u = i.get(); 3788 bool is_base = false; 3789 if (PointsToNode::is_base_use(u)) { 3790 is_base = true; 3791 u = PointsToNode::get_use_node(u)->as_Field(); 3792 } 3793 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3794 } 3795 out->print(" ]] "); 3796 if (_node == NULL) { 3797 out->print("<null>%s", newline ? "\n" : ""); 3798 } else { 3799 _node->dump(newline ? "\n" : "", false, out); 3800 } 3801 } 3802 3803 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3804 bool first = true; 3805 int ptnodes_length = ptnodes_worklist.length(); 3806 for (int i = 0; i < ptnodes_length; i++) { 3807 PointsToNode *ptn = ptnodes_worklist.at(i); 3808 if (ptn == NULL || !ptn->is_JavaObject()) { 3809 continue; 3810 } 3811 PointsToNode::EscapeState es = ptn->escape_state(); 3812 if ((es != PointsToNode::NoEscape) && !Verbose) { 3813 continue; 3814 } 3815 Node* n = ptn->ideal_node(); 3816 if (n->is_Allocate() || (n->is_CallStaticJava() && 3817 n->as_CallStaticJava()->is_boxing_method())) { 3818 if (first) { 3819 tty->cr(); 3820 tty->print("======== Connection graph for "); 3821 _compile->method()->print_short_name(); 3822 tty->cr(); 3823 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 3824 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 3825 tty->cr(); 3826 first = false; 3827 } 3828 ptn->dump(); 3829 // Print all locals and fields which reference this allocation 3830 for (UseIterator j(ptn); j.has_next(); j.next()) { 3831 PointsToNode* use = j.get(); 3832 if (use->is_LocalVar()) { 3833 use->dump(Verbose); 3834 } else if (Verbose) { 3835 use->dump(); 3836 } 3837 } 3838 tty->cr(); 3839 } 3840 } 3841 } 3842 3843 void ConnectionGraph::print_statistics() { 3844 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 3845 } 3846 3847 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 3848 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 3849 return; 3850 } 3851 for (int next = 0; next < java_objects_worklist.length(); ++next) { 3852 JavaObjectNode* ptn = java_objects_worklist.at(next); 3853 if (ptn->ideal_node()->is_Allocate()) { 3854 if (ptn->escape_state() == PointsToNode::NoEscape) { 3855 Atomic::inc(&ConnectionGraph::_no_escape_counter); 3856 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 3857 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 3858 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 3859 Atomic::inc(&ConnectionGraph::_global_escape_counter); 3860 } else { 3861 assert(false, "Unexpected Escape State"); 3862 } 3863 } 3864 } 3865 } 3866 3867 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 3868 if (_compile->directive()->TraceEscapeAnalysisOption) { 3869 assert(ptn != nullptr, "should not be null"); 3870 assert(reason != nullptr, "should not be null"); 3871 ptn->dump_header(true); 3872 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 3873 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 3874 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 3875 } 3876 } 3877 3878 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 3879 if (_compile->directive()->TraceEscapeAnalysisOption) { 3880 stringStream ss; 3881 ss.print("propagated from: "); 3882 from->dump(true, &ss, false); 3883 return ss.as_string(); 3884 } else { 3885 return nullptr; 3886 } 3887 } 3888 3889 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 3890 if (_compile->directive()->TraceEscapeAnalysisOption) { 3891 stringStream ss; 3892 ss.print("escapes as arg to:"); 3893 call->dump("", false, &ss); 3894 return ss.as_string(); 3895 } else { 3896 return nullptr; 3897 } 3898 } 3899 3900 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 3901 if (_compile->directive()->TraceEscapeAnalysisOption) { 3902 stringStream ss; 3903 ss.print("is merged with other object: "); 3904 other->dump_header(true, &ss); 3905 return ss.as_string(); 3906 } else { 3907 return nullptr; 3908 } 3909 } 3910 3911 #endif 3912 3913 void ConnectionGraph::record_for_optimizer(Node *n) { 3914 _igvn->_worklist.push(n); 3915 _igvn->add_users_to_worklist(n); 3916 }