1 /* 2 * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/metaspace.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "opto/c2compiler.hpp" 35 #include "opto/arraycopynode.hpp" 36 #include "opto/callnode.hpp" 37 #include "opto/cfgnode.hpp" 38 #include "opto/compile.hpp" 39 #include "opto/escape.hpp" 40 #include "opto/phaseX.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/rootnode.hpp" 43 #include "utilities/macros.hpp" 44 45 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : 46 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 47 _in_worklist(C->comp_arena()), 48 _next_pidx(0), 49 _collecting(true), 50 _verify(false), 51 _compile(C), 52 _igvn(igvn), 53 _invocation(invocation), 54 _build_iterations(0), 55 _build_time(0.), 56 _node_map(C->comp_arena()) { 57 // Add unknown java object. 58 add_java_object(C->top(), PointsToNode::GlobalEscape); 59 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 60 // Add ConP(#NULL) and ConN(#NULL) nodes. 61 Node* oop_null = igvn->zerocon(T_OBJECT); 62 assert(oop_null->_idx < nodes_size(), "should be created already"); 63 add_java_object(oop_null, PointsToNode::NoEscape); 64 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 65 if (UseCompressedOops) { 66 Node* noop_null = igvn->zerocon(T_NARROWOOP); 67 assert(noop_null->_idx < nodes_size(), "should be created already"); 68 map_ideal_node(noop_null, null_obj); 69 } 70 } 71 72 bool ConnectionGraph::has_candidates(Compile *C) { 73 // EA brings benefits only when the code has allocations and/or locks which 74 // are represented by ideal Macro nodes. 75 int cnt = C->macro_count(); 76 for (int i = 0; i < cnt; i++) { 77 Node *n = C->macro_node(i); 78 if (n->is_Allocate()) { 79 return true; 80 } 81 if (n->is_Lock()) { 82 Node* obj = n->as_Lock()->obj_node()->uncast(); 83 if (!(obj->is_Parm() || obj->is_Con())) { 84 return true; 85 } 86 } 87 if (n->is_CallStaticJava() && 88 n->as_CallStaticJava()->is_boxing_method()) { 89 return true; 90 } 91 } 92 return false; 93 } 94 95 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 96 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 97 ResourceMark rm; 98 99 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 100 // to create space for them in ConnectionGraph::_nodes[]. 101 Node* oop_null = igvn->zerocon(T_OBJECT); 102 Node* noop_null = igvn->zerocon(T_NARROWOOP); 103 int invocation = 0; 104 if (C->congraph() != NULL) { 105 invocation = C->congraph()->_invocation + 1; 106 } 107 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); 108 // Perform escape analysis 109 if (congraph->compute_escape()) { 110 // There are non escaping objects. 111 C->set_congraph(congraph); 112 } 113 // Cleanup. 114 if (oop_null->outcnt() == 0) { 115 igvn->hash_delete(oop_null); 116 } 117 if (noop_null->outcnt() == 0) { 118 igvn->hash_delete(noop_null); 119 } 120 } 121 122 bool ConnectionGraph::compute_escape() { 123 Compile* C = _compile; 124 PhaseGVN* igvn = _igvn; 125 126 // Worklists used by EA. 127 Unique_Node_List delayed_worklist; 128 GrowableArray<Node*> alloc_worklist; 129 GrowableArray<Node*> ptr_cmp_worklist; 130 GrowableArray<MemBarStoreStoreNode*> storestore_worklist; 131 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 132 GrowableArray<PointsToNode*> ptnodes_worklist; 133 GrowableArray<JavaObjectNode*> java_objects_worklist; 134 GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist; 135 GrowableArray<FieldNode*> oop_fields_worklist; 136 GrowableArray<SafePointNode*> sfn_worklist; 137 GrowableArray<MergeMemNode*> mergemem_worklist; 138 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 139 140 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 141 142 // 1. Populate Connection Graph (CG) with PointsTo nodes. 143 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 144 // Initialize worklist 145 if (C->root() != NULL) { 146 ideal_nodes.push(C->root()); 147 } 148 // Processed ideal nodes are unique on ideal_nodes list 149 // but several ideal nodes are mapped to the phantom_obj. 150 // To avoid duplicated entries on the following worklists 151 // add the phantom_obj only once to them. 152 ptnodes_worklist.append(phantom_obj); 153 java_objects_worklist.append(phantom_obj); 154 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 155 Node* n = ideal_nodes.at(next); 156 if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) && 157 !n->in(MemNode::Address)->is_AddP() && 158 _igvn->type(n->in(MemNode::Address))->isa_oopptr()) { 159 // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA 160 Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0)); 161 _igvn->register_new_node_with_optimizer(addp); 162 _igvn->replace_input_of(n, MemNode::Address, addp); 163 ideal_nodes.push(addp); 164 _nodes.at_put_grow(addp->_idx, NULL, NULL); 165 } 166 // Create PointsTo nodes and add them to Connection Graph. Called 167 // only once per ideal node since ideal_nodes is Unique_Node list. 168 add_node_to_connection_graph(n, &delayed_worklist); 169 PointsToNode* ptn = ptnode_adr(n->_idx); 170 if (ptn != NULL && ptn != phantom_obj) { 171 ptnodes_worklist.append(ptn); 172 if (ptn->is_JavaObject()) { 173 java_objects_worklist.append(ptn->as_JavaObject()); 174 if ((n->is_Allocate() || n->is_CallStaticJava()) && 175 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 176 // Only allocations and java static calls results are interesting. 177 non_escaped_allocs_worklist.append(ptn->as_JavaObject()); 178 } 179 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 180 oop_fields_worklist.append(ptn->as_Field()); 181 } 182 } 183 // Collect some interesting nodes for further use. 184 switch (n->Opcode()) { 185 case Op_MergeMem: 186 // Collect all MergeMem nodes to add memory slices for 187 // scalar replaceable objects in split_unique_types(). 188 mergemem_worklist.append(n->as_MergeMem()); 189 break; 190 case Op_CmpP: 191 case Op_CmpN: 192 // Collect compare pointers nodes. 193 if (OptimizePtrCompare) { 194 ptr_cmp_worklist.append(n); 195 } 196 break; 197 case Op_MemBarStoreStore: 198 // Collect all MemBarStoreStore nodes so that depending on the 199 // escape status of the associated Allocate node some of them 200 // may be eliminated. 201 storestore_worklist.append(n->as_MemBarStoreStore()); 202 break; 203 case Op_MemBarRelease: 204 if (n->req() > MemBarNode::Precedent) { 205 record_for_optimizer(n); 206 } 207 break; 208 #ifdef ASSERT 209 case Op_AddP: 210 // Collect address nodes for graph verification. 211 addp_worklist.append(n); 212 break; 213 #endif 214 case Op_ArrayCopy: 215 // Keep a list of ArrayCopy nodes so if one of its input is non 216 // escaping, we can record a unique type 217 arraycopy_worklist.append(n->as_ArrayCopy()); 218 break; 219 default: 220 // not interested now, ignore... 221 break; 222 } 223 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 224 Node* m = n->fast_out(i); // Get user 225 ideal_nodes.push(m); 226 } 227 if (n->is_SafePoint()) { 228 sfn_worklist.append(n->as_SafePoint()); 229 } 230 } 231 232 #ifndef PRODUCT 233 if (_compile->directive()->TraceEscapeAnalysisOption) { 234 tty->print("+++++ Initial worklist for "); 235 _compile->method()->print_name(); 236 tty->print_cr(" (ea_inv=%d)", _invocation); 237 for (int i = 0; i < ptnodes_worklist.length(); i++) { 238 PointsToNode* ptn = ptnodes_worklist.at(i); 239 ptn->dump(); 240 } 241 tty->print_cr("+++++ Calculating escape states and scalar replaceability"); 242 } 243 #endif 244 245 if (non_escaped_allocs_worklist.length() == 0) { 246 _collecting = false; 247 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 248 return false; // Nothing to do. 249 } 250 // Add final simple edges to graph. 251 while(delayed_worklist.size() > 0) { 252 Node* n = delayed_worklist.pop(); 253 add_final_edges(n); 254 } 255 256 #ifdef ASSERT 257 if (VerifyConnectionGraph) { 258 // Verify that no new simple edges could be created and all 259 // local vars has edges. 260 _verify = true; 261 int ptnodes_length = ptnodes_worklist.length(); 262 for (int next = 0; next < ptnodes_length; ++next) { 263 PointsToNode* ptn = ptnodes_worklist.at(next); 264 add_final_edges(ptn->ideal_node()); 265 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 266 ptn->dump(); 267 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 268 } 269 } 270 _verify = false; 271 } 272 #endif 273 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 274 // processing, calls to CI to resolve symbols (types, fields, methods) 275 // referenced in bytecode. During symbol resolution VM may throw 276 // an exception which CI cleans and converts to compilation failure. 277 if (C->failing()) { 278 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 279 return false; 280 } 281 282 // 2. Finish Graph construction by propagating references to all 283 // java objects through graph. 284 if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 285 java_objects_worklist, oop_fields_worklist)) { 286 // All objects escaped or hit time or iterations limits. 287 _collecting = false; 288 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 289 return false; 290 } 291 292 // 3. Adjust scalar_replaceable state of nonescaping objects and push 293 // scalar replaceable allocations on alloc_worklist for processing 294 // in split_unique_types(). 295 GrowableArray<JavaObjectNode*> jobj_worklist; 296 int non_escaped_length = non_escaped_allocs_worklist.length(); 297 bool found_nsr_alloc = false; 298 for (int next = 0; next < non_escaped_length; next++) { 299 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 300 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 301 Node* n = ptn->ideal_node(); 302 if (n->is_Allocate()) { 303 n->as_Allocate()->_is_non_escaping = noescape; 304 } 305 if (noescape && ptn->scalar_replaceable()) { 306 adjust_scalar_replaceable_state(ptn); 307 if (ptn->scalar_replaceable()) { 308 jobj_worklist.push(ptn); 309 } else { 310 found_nsr_alloc = true; 311 } 312 } 313 } 314 315 // Propagate NSR (Not Scalar Replaceable) state. 316 if (found_nsr_alloc) { 317 find_scalar_replaceable_allocs(jobj_worklist); 318 } 319 320 for (int next = 0; next < jobj_worklist.length(); ++next) { 321 JavaObjectNode* jobj = jobj_worklist.at(next); 322 if (jobj->scalar_replaceable()) { 323 alloc_worklist.append(jobj->ideal_node()); 324 } 325 } 326 327 #ifdef ASSERT 328 if (VerifyConnectionGraph) { 329 // Verify that graph is complete - no new edges could be added or needed. 330 verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist, 331 java_objects_worklist, addp_worklist); 332 } 333 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 334 assert(null_obj->escape_state() == PointsToNode::NoEscape && 335 null_obj->edge_count() == 0 && 336 !null_obj->arraycopy_src() && 337 !null_obj->arraycopy_dst(), "sanity"); 338 #endif 339 340 _collecting = false; 341 342 } // TracePhase t3("connectionGraph") 343 344 // 4. Optimize ideal graph based on EA information. 345 bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0); 346 if (has_non_escaping_obj) { 347 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 348 } 349 350 #ifndef PRODUCT 351 if (PrintEscapeAnalysis) { 352 dump(ptnodes_worklist); // Dump ConnectionGraph 353 } 354 #endif 355 356 #ifdef ASSERT 357 if (VerifyConnectionGraph) { 358 int alloc_length = alloc_worklist.length(); 359 for (int next = 0; next < alloc_length; ++next) { 360 Node* n = alloc_worklist.at(next); 361 PointsToNode* ptn = ptnode_adr(n->_idx); 362 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 363 } 364 } 365 #endif 366 367 // 5. Separate memory graph for scalar replaceable allcations. 368 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 369 if (has_scalar_replaceable_candidates && EliminateAllocations) { 370 assert(C->do_aliasing(), "Aliasing should be enabled"); 371 // Now use the escape information to create unique types for 372 // scalar replaceable objects. 373 split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist); 374 if (C->failing()) { 375 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 376 return false; 377 } 378 C->print_method(PHASE_AFTER_EA, 2); 379 380 #ifdef ASSERT 381 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 382 tty->print("=== No allocations eliminated for "); 383 C->method()->print_short_name(); 384 if (!EliminateAllocations) { 385 tty->print(" since EliminateAllocations is off ==="); 386 } else if(!has_scalar_replaceable_candidates) { 387 tty->print(" since there are no scalar replaceable candidates ==="); 388 } 389 tty->cr(); 390 #endif 391 } 392 393 // Annotate at safepoints if they have <= ArgEscape objects in their scope and at 394 // java calls if they pass ArgEscape objects as parameters. 395 if (has_non_escaping_obj && 396 (C->env()->should_retain_local_variables() || 397 C->env()->jvmti_can_get_owned_monitor_info() || 398 C->env()->jvmti_can_walk_any_space() || 399 DeoptimizeObjectsALot)) { 400 int sfn_length = sfn_worklist.length(); 401 for (int next = 0; next < sfn_length; next++) { 402 SafePointNode* sfn = sfn_worklist.at(next); 403 sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn)); 404 if (sfn->is_CallJava()) { 405 CallJavaNode* call = sfn->as_CallJava(); 406 call->set_arg_escape(has_arg_escape(call)); 407 } 408 } 409 } 410 411 NOT_PRODUCT(escape_state_statistics(java_objects_worklist);) 412 return has_non_escaping_obj; 413 } 414 415 // Returns true if there is an object in the scope of sfn that does not escape globally. 416 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { 417 Compile* C = _compile; 418 for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) { 419 if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || 420 DeoptimizeObjectsALot) { 421 // Jvmti agents can access locals. Must provide info about local objects at runtime. 422 int num_locs = jvms->loc_size(); 423 for (int idx = 0; idx < num_locs; idx++) { 424 Node* l = sfn->local(jvms, idx); 425 if (not_global_escape(l)) { 426 return true; 427 } 428 } 429 } 430 if (C->env()->jvmti_can_get_owned_monitor_info() || 431 C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { 432 // Jvmti agents can read monitors. Must provide info about locked objects at runtime. 433 int num_mon = jvms->nof_monitors(); 434 for (int idx = 0; idx < num_mon; idx++) { 435 Node* m = sfn->monitor_obj(jvms, idx); 436 if (m != NULL && not_global_escape(m)) { 437 return true; 438 } 439 } 440 } 441 } 442 return false; 443 } 444 445 // Returns true if at least one of the arguments to the call is an object 446 // that does not escape globally. 447 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { 448 if (call->method() != NULL) { 449 uint max_idx = TypeFunc::Parms + call->method()->arg_size(); 450 for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { 451 Node* p = call->in(idx); 452 if (not_global_escape(p)) { 453 return true; 454 } 455 } 456 } else { 457 const char* name = call->as_CallStaticJava()->_name; 458 assert(name != NULL, "no name"); 459 // no arg escapes through uncommon traps 460 if (strcmp(name, "uncommon_trap") != 0) { 461 // process_call_arguments() assumes that all arguments escape globally 462 const TypeTuple* d = call->tf()->domain_sig(); 463 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 464 const Type* at = d->field_at(i); 465 if (at->isa_oopptr() != NULL) { 466 return true; 467 } 468 } 469 } 470 } 471 return false; 472 } 473 474 475 476 // Utility function for nodes that load an object 477 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 478 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 479 // ThreadLocal has RawPtr type. 480 const Type* t = _igvn->type(n); 481 if (t->make_ptr() != NULL) { 482 Node* adr = n->in(MemNode::Address); 483 #ifdef ASSERT 484 if (!adr->is_AddP()) { 485 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 486 } else { 487 assert((ptnode_adr(adr->_idx) == NULL || 488 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 489 } 490 #endif 491 add_local_var_and_edge(n, PointsToNode::NoEscape, 492 adr, delayed_worklist); 493 } 494 } 495 496 // Populate Connection Graph with PointsTo nodes and create simple 497 // connection graph edges. 498 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 499 assert(!_verify, "this method should not be called for verification"); 500 PhaseGVN* igvn = _igvn; 501 uint n_idx = n->_idx; 502 PointsToNode* n_ptn = ptnode_adr(n_idx); 503 if (n_ptn != NULL) { 504 return; // No need to redefine PointsTo node during first iteration. 505 } 506 int opcode = n->Opcode(); 507 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 508 if (gc_handled) { 509 return; // Ignore node if already handled by GC. 510 } 511 512 if (n->is_Call()) { 513 // Arguments to allocation and locking don't escape. 514 if (n->is_AbstractLock()) { 515 // Put Lock and Unlock nodes on IGVN worklist to process them during 516 // first IGVN optimization when escape information is still available. 517 record_for_optimizer(n); 518 } else if (n->is_Allocate()) { 519 add_call_node(n->as_Call()); 520 record_for_optimizer(n); 521 } else { 522 if (n->is_CallStaticJava()) { 523 const char* name = n->as_CallStaticJava()->_name; 524 if (name != NULL && strcmp(name, "uncommon_trap") == 0) { 525 return; // Skip uncommon traps 526 } 527 } 528 // Don't mark as processed since call's arguments have to be processed. 529 delayed_worklist->push(n); 530 // Check if a call returns an object. 531 if ((n->as_Call()->returns_pointer() && 532 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 533 (n->is_CallStaticJava() && 534 n->as_CallStaticJava()->is_boxing_method())) { 535 add_call_node(n->as_Call()); 536 } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) { 537 bool returns_oop = false; 538 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) { 539 ProjNode* pn = n->fast_out(i)->as_Proj(); 540 if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) { 541 returns_oop = true; 542 } 543 } 544 if (returns_oop) { 545 add_call_node(n->as_Call()); 546 } 547 } 548 } 549 return; 550 } 551 // Put this check here to process call arguments since some call nodes 552 // point to phantom_obj. 553 if (n_ptn == phantom_obj || n_ptn == null_obj) { 554 return; // Skip predefined nodes. 555 } 556 switch (opcode) { 557 case Op_AddP: { 558 Node* base = get_addp_base(n); 559 PointsToNode* ptn_base = ptnode_adr(base->_idx); 560 // Field nodes are created for all field types. They are used in 561 // adjust_scalar_replaceable_state() and split_unique_types(). 562 // Note, non-oop fields will have only base edges in Connection 563 // Graph because such fields are not used for oop loads and stores. 564 int offset = address_offset(n, igvn); 565 add_field(n, PointsToNode::NoEscape, offset); 566 if (ptn_base == NULL) { 567 delayed_worklist->push(n); // Process it later. 568 } else { 569 n_ptn = ptnode_adr(n_idx); 570 add_base(n_ptn->as_Field(), ptn_base); 571 } 572 break; 573 } 574 case Op_CastX2P: { 575 map_ideal_node(n, phantom_obj); 576 break; 577 } 578 case Op_InlineType: 579 case Op_CastPP: 580 case Op_CheckCastPP: 581 case Op_EncodeP: 582 case Op_DecodeN: 583 case Op_EncodePKlass: 584 case Op_DecodeNKlass: { 585 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); 586 break; 587 } 588 case Op_CMoveP: { 589 add_local_var(n, PointsToNode::NoEscape); 590 // Do not add edges during first iteration because some could be 591 // not defined yet. 592 delayed_worklist->push(n); 593 break; 594 } 595 case Op_ConP: 596 case Op_ConN: 597 case Op_ConNKlass: { 598 // assume all oop constants globally escape except for null 599 PointsToNode::EscapeState es; 600 const Type* t = igvn->type(n); 601 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 602 es = PointsToNode::NoEscape; 603 } else { 604 es = PointsToNode::GlobalEscape; 605 } 606 add_java_object(n, es); 607 break; 608 } 609 case Op_CreateEx: { 610 // assume that all exception objects globally escape 611 map_ideal_node(n, phantom_obj); 612 break; 613 } 614 case Op_LoadKlass: 615 case Op_LoadNKlass: { 616 // Unknown class is loaded 617 map_ideal_node(n, phantom_obj); 618 break; 619 } 620 case Op_LoadP: 621 case Op_LoadN: { 622 add_objload_to_connection_graph(n, delayed_worklist); 623 break; 624 } 625 case Op_Parm: { 626 map_ideal_node(n, phantom_obj); 627 break; 628 } 629 case Op_PartialSubtypeCheck: { 630 // Produces Null or notNull and is used in only in CmpP so 631 // phantom_obj could be used. 632 map_ideal_node(n, phantom_obj); // Result is unknown 633 break; 634 } 635 case Op_Phi: { 636 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 637 // ThreadLocal has RawPtr type. 638 const Type* t = n->as_Phi()->type(); 639 if (t->make_ptr() != NULL) { 640 add_local_var(n, PointsToNode::NoEscape); 641 // Do not add edges during first iteration because some could be 642 // not defined yet. 643 delayed_worklist->push(n); 644 } 645 break; 646 } 647 case Op_Proj: { 648 // we are only interested in the oop result projection from a call 649 if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() && 650 (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) { 651 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 652 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 653 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist); 654 } 655 break; 656 } 657 case Op_Rethrow: // Exception object escapes 658 case Op_Return: { 659 if (n->req() > TypeFunc::Parms && 660 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 661 // Treat Return value as LocalVar with GlobalEscape escape state. 662 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist); 663 } 664 break; 665 } 666 case Op_CompareAndExchangeP: 667 case Op_CompareAndExchangeN: 668 case Op_GetAndSetP: 669 case Op_GetAndSetN: { 670 add_objload_to_connection_graph(n, delayed_worklist); 671 // fall-through 672 } 673 case Op_StoreP: 674 case Op_StoreN: 675 case Op_StoreNKlass: 676 case Op_WeakCompareAndSwapP: 677 case Op_WeakCompareAndSwapN: 678 case Op_CompareAndSwapP: 679 case Op_CompareAndSwapN: { 680 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 681 break; 682 } 683 case Op_AryEq: 684 case Op_CountPositives: 685 case Op_StrComp: 686 case Op_StrEquals: 687 case Op_StrIndexOf: 688 case Op_StrIndexOfChar: 689 case Op_StrInflatedCopy: 690 case Op_StrCompressedCopy: 691 case Op_EncodeISOArray: { 692 add_local_var(n, PointsToNode::ArgEscape); 693 delayed_worklist->push(n); // Process it later. 694 break; 695 } 696 case Op_ThreadLocal: { 697 add_java_object(n, PointsToNode::ArgEscape); 698 break; 699 } 700 case Op_Blackhole: { 701 // All blackhole pointer arguments are globally escaping. 702 // Only do this if there is at least one pointer argument. 703 // Do not add edges during first iteration because some could be 704 // not defined yet, defer to final step. 705 for (uint i = 0; i < n->req(); i++) { 706 Node* in = n->in(i); 707 if (in != nullptr) { 708 const Type* at = _igvn->type(in); 709 if (!at->isa_ptr()) continue; 710 711 add_local_var(n, PointsToNode::GlobalEscape); 712 delayed_worklist->push(n); 713 break; 714 } 715 } 716 break; 717 } 718 default: 719 ; // Do nothing for nodes not related to EA. 720 } 721 return; 722 } 723 724 // Add final simple edges to graph. 725 void ConnectionGraph::add_final_edges(Node *n) { 726 PointsToNode* n_ptn = ptnode_adr(n->_idx); 727 #ifdef ASSERT 728 if (_verify && n_ptn->is_JavaObject()) 729 return; // This method does not change graph for JavaObject. 730 #endif 731 732 if (n->is_Call()) { 733 process_call_arguments(n->as_Call()); 734 return; 735 } 736 assert(n->is_Store() || n->is_LoadStore() || 737 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 738 "node should be registered already"); 739 int opcode = n->Opcode(); 740 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 741 if (gc_handled) { 742 return; // Ignore node if already handled by GC. 743 } 744 switch (opcode) { 745 case Op_AddP: { 746 Node* base = get_addp_base(n); 747 PointsToNode* ptn_base = ptnode_adr(base->_idx); 748 assert(ptn_base != NULL, "field's base should be registered"); 749 add_base(n_ptn->as_Field(), ptn_base); 750 break; 751 } 752 case Op_InlineType: 753 case Op_CastPP: 754 case Op_CheckCastPP: 755 case Op_EncodeP: 756 case Op_DecodeN: 757 case Op_EncodePKlass: 758 case Op_DecodeNKlass: { 759 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); 760 break; 761 } 762 case Op_CMoveP: { 763 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 764 Node* in = n->in(i); 765 if (in == NULL) { 766 continue; // ignore NULL 767 } 768 Node* uncast_in = in->uncast(); 769 if (uncast_in->is_top() || uncast_in == n) { 770 continue; // ignore top or inputs which go back this node 771 } 772 PointsToNode* ptn = ptnode_adr(in->_idx); 773 assert(ptn != NULL, "node should be registered"); 774 add_edge(n_ptn, ptn); 775 } 776 break; 777 } 778 case Op_LoadP: 779 case Op_LoadN: { 780 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 781 // ThreadLocal has RawPtr type. 782 assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); 783 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); 784 break; 785 } 786 case Op_Phi: { 787 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 788 // ThreadLocal has RawPtr type. 789 assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type"); 790 for (uint i = 1; i < n->req(); i++) { 791 Node* in = n->in(i); 792 if (in == NULL) { 793 continue; // ignore NULL 794 } 795 Node* uncast_in = in->uncast(); 796 if (uncast_in->is_top() || uncast_in == n) { 797 continue; // ignore top or inputs which go back this node 798 } 799 PointsToNode* ptn = ptnode_adr(in->_idx); 800 assert(ptn != NULL, "node should be registered"); 801 add_edge(n_ptn, ptn); 802 } 803 break; 804 } 805 case Op_Proj: { 806 // we are only interested in the oop result projection from a call 807 assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) || 808 n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?"); 809 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 810 break; 811 } 812 case Op_Rethrow: // Exception object escapes 813 case Op_Return: { 814 assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), 815 "Unexpected node type"); 816 // Treat Return value as LocalVar with GlobalEscape escape state. 817 add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL); 818 break; 819 } 820 case Op_CompareAndExchangeP: 821 case Op_CompareAndExchangeN: 822 case Op_GetAndSetP: 823 case Op_GetAndSetN:{ 824 assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); 825 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); 826 // fall-through 827 } 828 case Op_CompareAndSwapP: 829 case Op_CompareAndSwapN: 830 case Op_WeakCompareAndSwapP: 831 case Op_WeakCompareAndSwapN: 832 case Op_StoreP: 833 case Op_StoreN: 834 case Op_StoreNKlass:{ 835 add_final_edges_unsafe_access(n, opcode); 836 break; 837 } 838 case Op_AryEq: 839 case Op_CountPositives: 840 case Op_StrComp: 841 case Op_StrEquals: 842 case Op_StrIndexOf: 843 case Op_StrIndexOfChar: 844 case Op_StrInflatedCopy: 845 case Op_StrCompressedCopy: 846 case Op_EncodeISOArray: { 847 // char[]/byte[] arrays passed to string intrinsic do not escape but 848 // they are not scalar replaceable. Adjust escape state for them. 849 // Start from in(2) edge since in(1) is memory edge. 850 for (uint i = 2; i < n->req(); i++) { 851 Node* adr = n->in(i); 852 const Type* at = _igvn->type(adr); 853 if (!adr->is_top() && at->isa_ptr()) { 854 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 855 at->isa_ptr() != NULL, "expecting a pointer"); 856 if (adr->is_AddP()) { 857 adr = get_addp_base(adr); 858 } 859 PointsToNode* ptn = ptnode_adr(adr->_idx); 860 assert(ptn != NULL, "node should be registered"); 861 add_edge(n_ptn, ptn); 862 } 863 } 864 break; 865 } 866 case Op_Blackhole: { 867 // All blackhole pointer arguments are globally escaping. 868 for (uint i = 0; i < n->req(); i++) { 869 Node* in = n->in(i); 870 if (in != nullptr) { 871 const Type* at = _igvn->type(in); 872 if (!at->isa_ptr()) continue; 873 874 if (in->is_AddP()) { 875 in = get_addp_base(in); 876 } 877 878 PointsToNode* ptn = ptnode_adr(in->_idx); 879 assert(ptn != nullptr, "should be defined already"); 880 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole")); 881 add_edge(n_ptn, ptn); 882 } 883 } 884 break; 885 } 886 default: { 887 // This method should be called only for EA specific nodes which may 888 // miss some edges when they were created. 889 #ifdef ASSERT 890 n->dump(1); 891 #endif 892 guarantee(false, "unknown node"); 893 } 894 } 895 return; 896 } 897 898 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 899 Node* adr = n->in(MemNode::Address); 900 const Type* adr_type = _igvn->type(adr); 901 adr_type = adr_type->make_ptr(); 902 if (adr_type == NULL) { 903 return; // skip dead nodes 904 } 905 if (adr_type->isa_oopptr() 906 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 907 && adr_type == TypeRawPtr::NOTNULL 908 && is_captured_store_address(adr))) { 909 delayed_worklist->push(n); // Process it later. 910 #ifdef ASSERT 911 assert (adr->is_AddP(), "expecting an AddP"); 912 if (adr_type == TypeRawPtr::NOTNULL) { 913 // Verify a raw address for a store captured by Initialize node. 914 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 915 assert(offs != Type::OffsetBot, "offset must be a constant"); 916 } 917 #endif 918 } else { 919 // Ignore copy the displaced header to the BoxNode (OSR compilation). 920 if (adr->is_BoxLock()) { 921 return; 922 } 923 // Stored value escapes in unsafe access. 924 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 925 delayed_worklist->push(n); // Process unsafe access later. 926 return; 927 } 928 #ifdef ASSERT 929 n->dump(1); 930 assert(false, "not unsafe"); 931 #endif 932 } 933 } 934 935 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 936 Node* adr = n->in(MemNode::Address); 937 const Type *adr_type = _igvn->type(adr); 938 adr_type = adr_type->make_ptr(); 939 #ifdef ASSERT 940 if (adr_type == NULL) { 941 n->dump(1); 942 assert(adr_type != NULL, "dead node should not be on list"); 943 return true; 944 } 945 #endif 946 947 if (adr_type->isa_oopptr() 948 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 949 && adr_type == TypeRawPtr::NOTNULL 950 && is_captured_store_address(adr))) { 951 // Point Address to Value 952 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 953 assert(adr_ptn != NULL && 954 adr_ptn->as_Field()->is_oop(), "node should be registered"); 955 Node* val = n->in(MemNode::ValueIn); 956 PointsToNode* ptn = ptnode_adr(val->_idx); 957 assert(ptn != NULL, "node should be registered"); 958 add_edge(adr_ptn, ptn); 959 return true; 960 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 961 // Stored value escapes in unsafe access. 962 Node* val = n->in(MemNode::ValueIn); 963 PointsToNode* ptn = ptnode_adr(val->_idx); 964 assert(ptn != NULL, "node should be registered"); 965 set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); 966 // Add edge to object for unsafe access with offset. 967 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 968 assert(adr_ptn != NULL, "node should be registered"); 969 if (adr_ptn->is_Field()) { 970 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 971 add_edge(adr_ptn, ptn); 972 } 973 return true; 974 } 975 #ifdef ASSERT 976 n->dump(1); 977 assert(false, "not unsafe"); 978 #endif 979 return false; 980 } 981 982 void ConnectionGraph::add_call_node(CallNode* call) { 983 assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer"); 984 uint call_idx = call->_idx; 985 if (call->is_Allocate()) { 986 Node* k = call->in(AllocateNode::KlassNode); 987 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 988 assert(kt != NULL, "TypeKlassPtr required."); 989 PointsToNode::EscapeState es = PointsToNode::NoEscape; 990 bool scalar_replaceable = true; 991 NOT_PRODUCT(const char* nsr_reason = ""); 992 if (call->is_AllocateArray()) { 993 if (!kt->isa_aryklassptr()) { // StressReflectiveCode 994 es = PointsToNode::GlobalEscape; 995 } else { 996 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 997 if (length < 0) { 998 // Not scalar replaceable if the length is not constant. 999 scalar_replaceable = false; 1000 NOT_PRODUCT(nsr_reason = "has a non-constant length"); 1001 } else if (length > EliminateAllocationArraySizeLimit) { 1002 // Not scalar replaceable if the length is too big. 1003 scalar_replaceable = false; 1004 NOT_PRODUCT(nsr_reason = "has a length that is too big"); 1005 } 1006 } 1007 } else { // Allocate instance 1008 if (!kt->isa_instklassptr()) { // StressReflectiveCode 1009 es = PointsToNode::GlobalEscape; 1010 } else { 1011 const TypeInstKlassPtr* ikt = kt->is_instklassptr(); 1012 ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass(); 1013 if (ik->is_subclass_of(_compile->env()->Thread_klass()) || 1014 ik->is_subclass_of(_compile->env()->Reference_klass()) || 1015 !ik->can_be_instantiated() || 1016 ik->has_finalizer()) { 1017 es = PointsToNode::GlobalEscape; 1018 } else { 1019 int nfields = ik->as_instance_klass()->nof_nonstatic_fields(); 1020 if (nfields > EliminateAllocationFieldsLimit) { 1021 // Not scalar replaceable if there are too many fields. 1022 scalar_replaceable = false; 1023 NOT_PRODUCT(nsr_reason = "has too many fields"); 1024 } 1025 } 1026 } 1027 } 1028 add_java_object(call, es); 1029 PointsToNode* ptn = ptnode_adr(call_idx); 1030 if (!scalar_replaceable && ptn->scalar_replaceable()) { 1031 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason)); 1032 } 1033 } else if (call->is_CallStaticJava()) { 1034 // Call nodes could be different types: 1035 // 1036 // 1. CallDynamicJavaNode (what happened during call is unknown): 1037 // 1038 // - mapped to GlobalEscape JavaObject node if oop is returned; 1039 // 1040 // - all oop arguments are escaping globally; 1041 // 1042 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 1043 // 1044 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 1045 // 1046 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 1047 // - mapped to NoEscape JavaObject node if non-escaping object allocated 1048 // during call is returned; 1049 // - mapped to ArgEscape LocalVar node pointed to object arguments 1050 // which are returned and does not escape during call; 1051 // 1052 // - oop arguments escaping status is defined by bytecode analysis; 1053 // 1054 // For a static call, we know exactly what method is being called. 1055 // Use bytecode estimator to record whether the call's return value escapes. 1056 ciMethod* meth = call->as_CallJava()->method(); 1057 if (meth == NULL) { 1058 const char* name = call->as_CallStaticJava()->_name; 1059 assert(strncmp(name, "_multianewarray", 15) == 0 || 1060 strncmp(name, "_load_unknown_inline", 20) == 0, "TODO: add failed case check"); 1061 // Returns a newly allocated non-escaped object. 1062 add_java_object(call, PointsToNode::NoEscape); 1063 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray")); 1064 } else if (meth->is_boxing_method()) { 1065 // Returns boxing object 1066 PointsToNode::EscapeState es; 1067 vmIntrinsics::ID intr = meth->intrinsic_id(); 1068 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 1069 // It does not escape if object is always allocated. 1070 es = PointsToNode::NoEscape; 1071 } else { 1072 // It escapes globally if object could be loaded from cache. 1073 es = PointsToNode::GlobalEscape; 1074 } 1075 add_java_object(call, es); 1076 } else { 1077 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 1078 call_analyzer->copy_dependencies(_compile->dependencies()); 1079 if (call_analyzer->is_return_allocated()) { 1080 // Returns a newly allocated non-escaped object, simply 1081 // update dependency information. 1082 // Mark it as NoEscape so that objects referenced by 1083 // it's fields will be marked as NoEscape at least. 1084 add_java_object(call, PointsToNode::NoEscape); 1085 set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call")); 1086 } else { 1087 // Determine whether any arguments are returned. 1088 const TypeTuple* d = call->tf()->domain_cc(); 1089 bool ret_arg = false; 1090 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1091 if (d->field_at(i)->isa_ptr() != NULL && 1092 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 1093 ret_arg = true; 1094 break; 1095 } 1096 } 1097 if (ret_arg) { 1098 add_local_var(call, PointsToNode::ArgEscape); 1099 } else { 1100 // Returns unknown object. 1101 map_ideal_node(call, phantom_obj); 1102 } 1103 } 1104 } 1105 } else { 1106 // An other type of call, assume the worst case: 1107 // returned value is unknown and globally escapes. 1108 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 1109 map_ideal_node(call, phantom_obj); 1110 } 1111 } 1112 1113 void ConnectionGraph::process_call_arguments(CallNode *call) { 1114 bool is_arraycopy = false; 1115 switch (call->Opcode()) { 1116 #ifdef ASSERT 1117 case Op_Allocate: 1118 case Op_AllocateArray: 1119 case Op_Lock: 1120 case Op_Unlock: 1121 assert(false, "should be done already"); 1122 break; 1123 #endif 1124 case Op_ArrayCopy: 1125 case Op_CallLeafNoFP: 1126 // Most array copies are ArrayCopy nodes at this point but there 1127 // are still a few direct calls to the copy subroutines (See 1128 // PhaseStringOpts::copy_string()) 1129 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 1130 call->as_CallLeaf()->is_call_to_arraycopystub(); 1131 // fall through 1132 case Op_CallLeafVector: 1133 case Op_CallLeaf: { 1134 // Stub calls, objects do not escape but they are not scale replaceable. 1135 // Adjust escape state for outgoing arguments. 1136 const TypeTuple * d = call->tf()->domain_sig(); 1137 bool src_has_oops = false; 1138 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1139 const Type* at = d->field_at(i); 1140 Node *arg = call->in(i); 1141 if (arg == NULL) { 1142 continue; 1143 } 1144 const Type *aat = _igvn->type(arg); 1145 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) { 1146 continue; 1147 } 1148 if (arg->is_AddP()) { 1149 // 1150 // The inline_native_clone() case when the arraycopy stub is called 1151 // after the allocation before Initialize and CheckCastPP nodes. 1152 // Or normal arraycopy for object arrays case. 1153 // 1154 // Set AddP's base (Allocate) as not scalar replaceable since 1155 // pointer to the base (with offset) is passed as argument. 1156 // 1157 arg = get_addp_base(arg); 1158 } 1159 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1160 assert(arg_ptn != NULL, "should be registered"); 1161 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 1162 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 1163 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 1164 aat->isa_ptr() != NULL, "expecting an Ptr"); 1165 bool arg_has_oops = aat->isa_oopptr() && 1166 (aat->isa_instptr() || 1167 (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)) || 1168 (aat->isa_aryptr() && aat->isa_aryptr()->elem() != NULL && 1169 aat->isa_aryptr()->is_flat() && 1170 aat->isa_aryptr()->elem()->inline_klass()->contains_oops())); 1171 if (i == TypeFunc::Parms) { 1172 src_has_oops = arg_has_oops; 1173 } 1174 // 1175 // src or dst could be j.l.Object when other is basic type array: 1176 // 1177 // arraycopy(char[],0,Object*,0,size); 1178 // arraycopy(Object*,0,char[],0,size); 1179 // 1180 // Don't add edges in such cases. 1181 // 1182 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 1183 arg_has_oops && (i > TypeFunc::Parms); 1184 #ifdef ASSERT 1185 if (!(is_arraycopy || 1186 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 1187 (call->as_CallLeaf()->_name != NULL && 1188 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 1189 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 1190 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 1191 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 1192 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 1193 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 1194 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 1195 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 1196 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 1197 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 1198 strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 || 1199 strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 || 1200 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 1201 strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 || 1202 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 1203 strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 || 1204 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 1205 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 1206 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 1207 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1208 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1209 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1210 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1211 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1212 strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 || 1213 strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 || 1214 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1215 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1216 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1217 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1218 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1219 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1220 strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 || 1221 strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 || 1222 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1223 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1224 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 || 1225 strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0) 1226 ))) { 1227 call->dump(); 1228 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1229 } 1230 #endif 1231 // Always process arraycopy's destination object since 1232 // we need to add all possible edges to references in 1233 // source object. 1234 if (arg_esc >= PointsToNode::ArgEscape && 1235 !arg_is_arraycopy_dest) { 1236 continue; 1237 } 1238 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1239 if (call->is_ArrayCopy()) { 1240 ArrayCopyNode* ac = call->as_ArrayCopy(); 1241 if (ac->is_clonebasic() || 1242 ac->is_arraycopy_validated() || 1243 ac->is_copyof_validated() || 1244 ac->is_copyofrange_validated()) { 1245 es = PointsToNode::NoEscape; 1246 } 1247 } 1248 set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1249 if (arg_is_arraycopy_dest) { 1250 Node* src = call->in(TypeFunc::Parms); 1251 if (src->is_AddP()) { 1252 src = get_addp_base(src); 1253 } 1254 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1255 assert(src_ptn != NULL, "should be registered"); 1256 if (arg_ptn != src_ptn) { 1257 // Special arraycopy edge: 1258 // A destination object's field can't have the source object 1259 // as base since objects escape states are not related. 1260 // Only escape state of destination object's fields affects 1261 // escape state of fields in source object. 1262 add_arraycopy(call, es, src_ptn, arg_ptn); 1263 } 1264 } 1265 } 1266 } 1267 break; 1268 } 1269 case Op_CallStaticJava: { 1270 // For a static call, we know exactly what method is being called. 1271 // Use bytecode estimator to record the call's escape affects 1272 #ifdef ASSERT 1273 const char* name = call->as_CallStaticJava()->_name; 1274 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1275 #endif 1276 ciMethod* meth = call->as_CallJava()->method(); 1277 if ((meth != NULL) && meth->is_boxing_method()) { 1278 break; // Boxing methods do not modify any oops. 1279 } 1280 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1281 // fall-through if not a Java method or no analyzer information 1282 if (call_analyzer != NULL) { 1283 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1284 const TypeTuple* d = call->tf()->domain_cc(); 1285 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1286 const Type* at = d->field_at(i); 1287 int k = i - TypeFunc::Parms; 1288 Node* arg = call->in(i); 1289 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1290 if (at->isa_ptr() != NULL && 1291 call_analyzer->is_arg_returned(k)) { 1292 // The call returns arguments. 1293 if (call_ptn != NULL) { // Is call's result used? 1294 assert(call_ptn->is_LocalVar(), "node should be registered"); 1295 assert(arg_ptn != NULL, "node should be registered"); 1296 add_edge(call_ptn, arg_ptn); 1297 } 1298 } 1299 if (at->isa_oopptr() != NULL && 1300 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1301 if (!call_analyzer->is_arg_stack(k)) { 1302 // The argument global escapes 1303 set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1304 } else { 1305 set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1306 if (!call_analyzer->is_arg_local(k)) { 1307 // The argument itself doesn't escape, but any fields might 1308 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1309 } 1310 } 1311 } 1312 } 1313 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1314 // The call returns arguments. 1315 assert(call_ptn->edge_count() > 0, "sanity"); 1316 if (!call_analyzer->is_return_local()) { 1317 // Returns also unknown object. 1318 add_edge(call_ptn, phantom_obj); 1319 } 1320 } 1321 break; 1322 } 1323 } 1324 default: { 1325 // Fall-through here if not a Java method or no analyzer information 1326 // or some other type of call, assume the worst case: all arguments 1327 // globally escape. 1328 const TypeTuple* d = call->tf()->domain_cc(); 1329 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1330 const Type* at = d->field_at(i); 1331 if (at->isa_oopptr() != NULL) { 1332 Node* arg = call->in(i); 1333 if (arg->is_AddP()) { 1334 arg = get_addp_base(arg); 1335 } 1336 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1337 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); 1338 } 1339 } 1340 } 1341 } 1342 } 1343 1344 1345 // Finish Graph construction. 1346 bool ConnectionGraph::complete_connection_graph( 1347 GrowableArray<PointsToNode*>& ptnodes_worklist, 1348 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 1349 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1350 GrowableArray<FieldNode*>& oop_fields_worklist) { 1351 // Normally only 1-3 passes needed to build Connection Graph depending 1352 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1353 // Set limit to 20 to catch situation when something did go wrong and 1354 // bailout Escape Analysis. 1355 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1356 #define GRAPH_BUILD_ITER_LIMIT 20 1357 1358 // Propagate GlobalEscape and ArgEscape escape states and check that 1359 // we still have non-escaping objects. The method pushs on _worklist 1360 // Field nodes which reference phantom_object. 1361 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1362 return false; // Nothing to do. 1363 } 1364 // Now propagate references to all JavaObject nodes. 1365 int java_objects_length = java_objects_worklist.length(); 1366 elapsedTimer build_time; 1367 build_time.start(); 1368 elapsedTimer time; 1369 bool timeout = false; 1370 int new_edges = 1; 1371 int iterations = 0; 1372 do { 1373 while ((new_edges > 0) && 1374 (iterations++ < GRAPH_BUILD_ITER_LIMIT)) { 1375 double start_time = time.seconds(); 1376 time.start(); 1377 new_edges = 0; 1378 // Propagate references to phantom_object for nodes pushed on _worklist 1379 // by find_non_escaped_objects() and find_field_value(). 1380 new_edges += add_java_object_edges(phantom_obj, false); 1381 for (int next = 0; next < java_objects_length; ++next) { 1382 JavaObjectNode* ptn = java_objects_worklist.at(next); 1383 new_edges += add_java_object_edges(ptn, true); 1384 1385 #define SAMPLE_SIZE 4 1386 if ((next % SAMPLE_SIZE) == 0) { 1387 // Each 4 iterations calculate how much time it will take 1388 // to complete graph construction. 1389 time.stop(); 1390 // Poll for requests from shutdown mechanism to quiesce compiler 1391 // because Connection graph construction may take long time. 1392 CompileBroker::maybe_block(); 1393 double stop_time = time.seconds(); 1394 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1395 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1396 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1397 timeout = true; 1398 break; // Timeout 1399 } 1400 start_time = stop_time; 1401 time.start(); 1402 } 1403 #undef SAMPLE_SIZE 1404 1405 } 1406 if (timeout) break; 1407 if (new_edges > 0) { 1408 // Update escape states on each iteration if graph was updated. 1409 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) { 1410 return false; // Nothing to do. 1411 } 1412 } 1413 time.stop(); 1414 if (time.seconds() >= EscapeAnalysisTimeout) { 1415 timeout = true; 1416 break; 1417 } 1418 } 1419 if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) { 1420 time.start(); 1421 // Find fields which have unknown value. 1422 int fields_length = oop_fields_worklist.length(); 1423 for (int next = 0; next < fields_length; next++) { 1424 FieldNode* field = oop_fields_worklist.at(next); 1425 if (field->edge_count() == 0) { 1426 new_edges += find_field_value(field); 1427 // This code may added new edges to phantom_object. 1428 // Need an other cycle to propagate references to phantom_object. 1429 } 1430 } 1431 time.stop(); 1432 if (time.seconds() >= EscapeAnalysisTimeout) { 1433 timeout = true; 1434 break; 1435 } 1436 } else { 1437 new_edges = 0; // Bailout 1438 } 1439 } while (new_edges > 0); 1440 1441 build_time.stop(); 1442 _build_time = build_time.seconds(); 1443 _build_iterations = iterations; 1444 1445 // Bailout if passed limits. 1446 if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { 1447 Compile* C = _compile; 1448 if (C->log() != NULL) { 1449 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1450 C->log()->text("%s", timeout ? "time" : "iterations"); 1451 C->log()->end_elem(" limit'"); 1452 } 1453 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d", 1454 _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length()); 1455 // Possible infinite build_connection_graph loop, 1456 // bailout (no changes to ideal graph were made). 1457 return false; 1458 } 1459 1460 #undef GRAPH_BUILD_ITER_LIMIT 1461 1462 // Find fields initialized by NULL for non-escaping Allocations. 1463 int non_escaped_length = non_escaped_allocs_worklist.length(); 1464 for (int next = 0; next < non_escaped_length; next++) { 1465 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1466 PointsToNode::EscapeState es = ptn->escape_state(); 1467 assert(es <= PointsToNode::ArgEscape, "sanity"); 1468 if (es == PointsToNode::NoEscape) { 1469 if (find_init_values_null(ptn, _igvn) > 0) { 1470 // Adding references to NULL object does not change escape states 1471 // since it does not escape. Also no fields are added to NULL object. 1472 add_java_object_edges(null_obj, false); 1473 } 1474 } 1475 Node* n = ptn->ideal_node(); 1476 if (n->is_Allocate()) { 1477 // The object allocated by this Allocate node will never be 1478 // seen by an other thread. Mark it so that when it is 1479 // expanded no MemBarStoreStore is added. 1480 InitializeNode* ini = n->as_Allocate()->initialization(); 1481 if (ini != NULL) 1482 ini->set_does_not_escape(); 1483 } 1484 } 1485 return true; // Finished graph construction. 1486 } 1487 1488 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1489 // and check that we still have non-escaping java objects. 1490 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1491 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) { 1492 GrowableArray<PointsToNode*> escape_worklist; 1493 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1494 int ptnodes_length = ptnodes_worklist.length(); 1495 for (int next = 0; next < ptnodes_length; ++next) { 1496 PointsToNode* ptn = ptnodes_worklist.at(next); 1497 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1498 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1499 escape_worklist.push(ptn); 1500 } 1501 } 1502 // Set escape states to referenced nodes (edges list). 1503 while (escape_worklist.length() > 0) { 1504 PointsToNode* ptn = escape_worklist.pop(); 1505 PointsToNode::EscapeState es = ptn->escape_state(); 1506 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1507 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1508 es >= PointsToNode::ArgEscape) { 1509 // GlobalEscape or ArgEscape state of field means it has unknown value. 1510 if (add_edge(ptn, phantom_obj)) { 1511 // New edge was added 1512 add_field_uses_to_worklist(ptn->as_Field()); 1513 } 1514 } 1515 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1516 PointsToNode* e = i.get(); 1517 if (e->is_Arraycopy()) { 1518 assert(ptn->arraycopy_dst(), "sanity"); 1519 // Propagate only fields escape state through arraycopy edge. 1520 if (e->fields_escape_state() < field_es) { 1521 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1522 escape_worklist.push(e); 1523 } 1524 } else if (es >= field_es) { 1525 // fields_escape_state is also set to 'es' if it is less than 'es'. 1526 if (e->escape_state() < es) { 1527 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1528 escape_worklist.push(e); 1529 } 1530 } else { 1531 // Propagate field escape state. 1532 bool es_changed = false; 1533 if (e->fields_escape_state() < field_es) { 1534 set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1535 es_changed = true; 1536 } 1537 if ((e->escape_state() < field_es) && 1538 e->is_Field() && ptn->is_JavaObject() && 1539 e->as_Field()->is_oop()) { 1540 // Change escape state of referenced fields. 1541 set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1542 es_changed = true; 1543 } else if (e->escape_state() < es) { 1544 set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); 1545 es_changed = true; 1546 } 1547 if (es_changed) { 1548 escape_worklist.push(e); 1549 } 1550 } 1551 } 1552 } 1553 // Remove escaped objects from non_escaped list. 1554 for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) { 1555 JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); 1556 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1557 non_escaped_allocs_worklist.delete_at(next); 1558 } 1559 if (ptn->escape_state() == PointsToNode::NoEscape) { 1560 // Find fields in non-escaped allocations which have unknown value. 1561 find_init_values_phantom(ptn); 1562 } 1563 } 1564 return (non_escaped_allocs_worklist.length() > 0); 1565 } 1566 1567 // Add all references to JavaObject node by walking over all uses. 1568 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1569 int new_edges = 0; 1570 if (populate_worklist) { 1571 // Populate _worklist by uses of jobj's uses. 1572 for (UseIterator i(jobj); i.has_next(); i.next()) { 1573 PointsToNode* use = i.get(); 1574 if (use->is_Arraycopy()) { 1575 continue; 1576 } 1577 add_uses_to_worklist(use); 1578 if (use->is_Field() && use->as_Field()->is_oop()) { 1579 // Put on worklist all field's uses (loads) and 1580 // related field nodes (same base and offset). 1581 add_field_uses_to_worklist(use->as_Field()); 1582 } 1583 } 1584 } 1585 for (int l = 0; l < _worklist.length(); l++) { 1586 PointsToNode* use = _worklist.at(l); 1587 if (PointsToNode::is_base_use(use)) { 1588 // Add reference from jobj to field and from field to jobj (field's base). 1589 use = PointsToNode::get_use_node(use)->as_Field(); 1590 if (add_base(use->as_Field(), jobj)) { 1591 new_edges++; 1592 } 1593 continue; 1594 } 1595 assert(!use->is_JavaObject(), "sanity"); 1596 if (use->is_Arraycopy()) { 1597 if (jobj == null_obj) { // NULL object does not have field edges 1598 continue; 1599 } 1600 // Added edge from Arraycopy node to arraycopy's source java object 1601 if (add_edge(use, jobj)) { 1602 jobj->set_arraycopy_src(); 1603 new_edges++; 1604 } 1605 // and stop here. 1606 continue; 1607 } 1608 if (!add_edge(use, jobj)) { 1609 continue; // No new edge added, there was such edge already. 1610 } 1611 new_edges++; 1612 if (use->is_LocalVar()) { 1613 add_uses_to_worklist(use); 1614 if (use->arraycopy_dst()) { 1615 for (EdgeIterator i(use); i.has_next(); i.next()) { 1616 PointsToNode* e = i.get(); 1617 if (e->is_Arraycopy()) { 1618 if (jobj == null_obj) { // NULL object does not have field edges 1619 continue; 1620 } 1621 // Add edge from arraycopy's destination java object to Arraycopy node. 1622 if (add_edge(jobj, e)) { 1623 new_edges++; 1624 jobj->set_arraycopy_dst(); 1625 } 1626 } 1627 } 1628 } 1629 } else { 1630 // Added new edge to stored in field values. 1631 // Put on worklist all field's uses (loads) and 1632 // related field nodes (same base and offset). 1633 add_field_uses_to_worklist(use->as_Field()); 1634 } 1635 } 1636 _worklist.clear(); 1637 _in_worklist.reset(); 1638 return new_edges; 1639 } 1640 1641 // Put on worklist all related field nodes. 1642 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1643 assert(field->is_oop(), "sanity"); 1644 int offset = field->offset(); 1645 add_uses_to_worklist(field); 1646 // Loop over all bases of this field and push on worklist Field nodes 1647 // with the same offset and base (since they may reference the same field). 1648 for (BaseIterator i(field); i.has_next(); i.next()) { 1649 PointsToNode* base = i.get(); 1650 add_fields_to_worklist(field, base); 1651 // Check if the base was source object of arraycopy and go over arraycopy's 1652 // destination objects since values stored to a field of source object are 1653 // accessible by uses (loads) of fields of destination objects. 1654 if (base->arraycopy_src()) { 1655 for (UseIterator j(base); j.has_next(); j.next()) { 1656 PointsToNode* arycp = j.get(); 1657 if (arycp->is_Arraycopy()) { 1658 for (UseIterator k(arycp); k.has_next(); k.next()) { 1659 PointsToNode* abase = k.get(); 1660 if (abase->arraycopy_dst() && abase != base) { 1661 // Look for the same arraycopy reference. 1662 add_fields_to_worklist(field, abase); 1663 } 1664 } 1665 } 1666 } 1667 } 1668 } 1669 } 1670 1671 // Put on worklist all related field nodes. 1672 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1673 int offset = field->offset(); 1674 if (base->is_LocalVar()) { 1675 for (UseIterator j(base); j.has_next(); j.next()) { 1676 PointsToNode* f = j.get(); 1677 if (PointsToNode::is_base_use(f)) { // Field 1678 f = PointsToNode::get_use_node(f); 1679 if (f == field || !f->as_Field()->is_oop()) { 1680 continue; 1681 } 1682 int offs = f->as_Field()->offset(); 1683 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1684 add_to_worklist(f); 1685 } 1686 } 1687 } 1688 } else { 1689 assert(base->is_JavaObject(), "sanity"); 1690 if (// Skip phantom_object since it is only used to indicate that 1691 // this field's content globally escapes. 1692 (base != phantom_obj) && 1693 // NULL object node does not have fields. 1694 (base != null_obj)) { 1695 for (EdgeIterator i(base); i.has_next(); i.next()) { 1696 PointsToNode* f = i.get(); 1697 // Skip arraycopy edge since store to destination object field 1698 // does not update value in source object field. 1699 if (f->is_Arraycopy()) { 1700 assert(base->arraycopy_dst(), "sanity"); 1701 continue; 1702 } 1703 if (f == field || !f->as_Field()->is_oop()) { 1704 continue; 1705 } 1706 int offs = f->as_Field()->offset(); 1707 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1708 add_to_worklist(f); 1709 } 1710 } 1711 } 1712 } 1713 } 1714 1715 // Find fields which have unknown value. 1716 int ConnectionGraph::find_field_value(FieldNode* field) { 1717 // Escaped fields should have init value already. 1718 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1719 int new_edges = 0; 1720 for (BaseIterator i(field); i.has_next(); i.next()) { 1721 PointsToNode* base = i.get(); 1722 if (base->is_JavaObject()) { 1723 // Skip Allocate's fields which will be processed later. 1724 if (base->ideal_node()->is_Allocate()) { 1725 return 0; 1726 } 1727 assert(base == null_obj, "only NULL ptr base expected here"); 1728 } 1729 } 1730 if (add_edge(field, phantom_obj)) { 1731 // New edge was added 1732 new_edges++; 1733 add_field_uses_to_worklist(field); 1734 } 1735 return new_edges; 1736 } 1737 1738 // Find fields initializing values for allocations. 1739 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { 1740 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1741 PointsToNode* init_val = phantom_obj; 1742 Node* alloc = pta->ideal_node(); 1743 1744 // Do nothing for Allocate nodes since its fields values are 1745 // "known" unless they are initialized by arraycopy/clone. 1746 if (alloc->is_Allocate() && !pta->arraycopy_dst()) { 1747 if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) { 1748 // Non-flattened inline type arrays are initialized with 1749 // the default value instead of null. Handle them here. 1750 init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx); 1751 assert(init_val != NULL, "default value should be registered"); 1752 } else { 1753 return 0; 1754 } 1755 } 1756 // Non-escaped allocation returned from Java or runtime call has unknown values in fields. 1757 assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity"); 1758 #ifdef ASSERT 1759 if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == NULL) { 1760 const char* name = alloc->as_CallStaticJava()->_name; 1761 assert(strncmp(name, "_multianewarray", 15) == 0 || 1762 strncmp(name, "_load_unknown_inline", 20) == 0, "sanity"); 1763 } 1764 #endif 1765 // Non-escaped allocation returned from Java or runtime call have unknown values in fields. 1766 int new_edges = 0; 1767 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1768 PointsToNode* field = i.get(); 1769 if (field->is_Field() && field->as_Field()->is_oop()) { 1770 if (add_edge(field, init_val)) { 1771 // New edge was added 1772 new_edges++; 1773 add_field_uses_to_worklist(field->as_Field()); 1774 } 1775 } 1776 } 1777 return new_edges; 1778 } 1779 1780 // Find fields initializing values for allocations. 1781 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* phase) { 1782 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1783 Node* alloc = pta->ideal_node(); 1784 // Do nothing for Call nodes since its fields values are unknown. 1785 if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != NULL) { 1786 return 0; 1787 } 1788 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1789 bool visited_bottom_offset = false; 1790 GrowableArray<int> offsets_worklist; 1791 int new_edges = 0; 1792 1793 // Check if an oop field's initializing value is recorded and add 1794 // a corresponding NULL if field's value if it is not recorded. 1795 // Connection Graph does not record a default initialization by NULL 1796 // captured by Initialize node. 1797 // 1798 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1799 PointsToNode* field = i.get(); // Field (AddP) 1800 if (!field->is_Field() || !field->as_Field()->is_oop()) { 1801 continue; // Not oop field 1802 } 1803 int offset = field->as_Field()->offset(); 1804 if (offset == Type::OffsetBot) { 1805 if (!visited_bottom_offset) { 1806 // OffsetBot is used to reference array's element, 1807 // always add reference to NULL to all Field nodes since we don't 1808 // known which element is referenced. 1809 if (add_edge(field, null_obj)) { 1810 // New edge was added 1811 new_edges++; 1812 add_field_uses_to_worklist(field->as_Field()); 1813 visited_bottom_offset = true; 1814 } 1815 } 1816 } else { 1817 // Check only oop fields. 1818 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1819 if (adr_type->isa_rawptr()) { 1820 #ifdef ASSERT 1821 // Raw pointers are used for initializing stores so skip it 1822 // since it should be recorded already 1823 Node* base = get_addp_base(field->ideal_node()); 1824 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 1825 #endif 1826 continue; 1827 } 1828 if (!offsets_worklist.contains(offset)) { 1829 offsets_worklist.append(offset); 1830 Node* value = NULL; 1831 if (ini != NULL) { 1832 // StoreP::memory_type() == T_ADDRESS 1833 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1834 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1835 // Make sure initializing store has the same type as this AddP. 1836 // This AddP may reference non existing field because it is on a 1837 // dead branch of bimorphic call which is not eliminated yet. 1838 if (store != NULL && store->is_Store() && 1839 store->as_Store()->memory_type() == ft) { 1840 value = store->in(MemNode::ValueIn); 1841 #ifdef ASSERT 1842 if (VerifyConnectionGraph) { 1843 // Verify that AddP already points to all objects the value points to. 1844 PointsToNode* val = ptnode_adr(value->_idx); 1845 assert((val != NULL), "should be processed already"); 1846 PointsToNode* missed_obj = NULL; 1847 if (val->is_JavaObject()) { 1848 if (!field->points_to(val->as_JavaObject())) { 1849 missed_obj = val; 1850 } 1851 } else { 1852 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1853 tty->print_cr("----------init store has invalid value -----"); 1854 store->dump(); 1855 val->dump(); 1856 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1857 } 1858 for (EdgeIterator j(val); j.has_next(); j.next()) { 1859 PointsToNode* obj = j.get(); 1860 if (obj->is_JavaObject()) { 1861 if (!field->points_to(obj->as_JavaObject())) { 1862 missed_obj = obj; 1863 break; 1864 } 1865 } 1866 } 1867 } 1868 if (missed_obj != NULL) { 1869 tty->print_cr("----------field---------------------------------"); 1870 field->dump(); 1871 tty->print_cr("----------missed reference to object------------"); 1872 missed_obj->dump(); 1873 tty->print_cr("----------object referenced by init store-------"); 1874 store->dump(); 1875 val->dump(); 1876 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1877 } 1878 } 1879 #endif 1880 } else { 1881 // There could be initializing stores which follow allocation. 1882 // For example, a volatile field store is not collected 1883 // by Initialize node. 1884 // 1885 // Need to check for dependent loads to separate such stores from 1886 // stores which follow loads. For now, add initial value NULL so 1887 // that compare pointers optimization works correctly. 1888 } 1889 } 1890 if (value == NULL) { 1891 // A field's initializing value was not recorded. Add NULL. 1892 if (add_edge(field, null_obj)) { 1893 // New edge was added 1894 new_edges++; 1895 add_field_uses_to_worklist(field->as_Field()); 1896 } 1897 } 1898 } 1899 } 1900 } 1901 return new_edges; 1902 } 1903 1904 // Adjust scalar_replaceable state after Connection Graph is built. 1905 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1906 // Search for non-escaping objects which are not scalar replaceable 1907 // and mark them to propagate the state to referenced objects. 1908 1909 for (UseIterator i(jobj); i.has_next(); i.next()) { 1910 PointsToNode* use = i.get(); 1911 if (use->is_Arraycopy()) { 1912 continue; 1913 } 1914 if (use->is_Field()) { 1915 FieldNode* field = use->as_Field(); 1916 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1917 // 1. An object is not scalar replaceable if the field into which it is 1918 // stored has unknown offset (stored into unknown element of an array). 1919 if (field->offset() == Type::OffsetBot) { 1920 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset")); 1921 return; 1922 } 1923 for (BaseIterator i(field); i.has_next(); i.next()) { 1924 PointsToNode* base = i.get(); 1925 // 2. An object is not scalar replaceable if the field into which it is 1926 // stored has multiple bases one of which is null. 1927 if ((base == null_obj) && (field->base_count() > 1)) { 1928 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base")); 1929 return; 1930 } 1931 // 2.5. An object is not scalar replaceable if the field into which it is 1932 // stored has NSR base. 1933 if (!base->scalar_replaceable()) { 1934 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 1935 return; 1936 } 1937 } 1938 } 1939 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1940 // 3. An object is not scalar replaceable if it is merged with other objects. 1941 for (EdgeIterator j(use); j.has_next(); j.next()) { 1942 PointsToNode* ptn = j.get(); 1943 if (ptn->is_JavaObject() && ptn != jobj) { 1944 // Mark all objects. 1945 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn))); 1946 set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj))); 1947 } 1948 } 1949 if (!jobj->scalar_replaceable()) { 1950 return; 1951 } 1952 } 1953 1954 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1955 if (j.get()->is_Arraycopy()) { 1956 continue; 1957 } 1958 1959 // Non-escaping object node should point only to field nodes. 1960 FieldNode* field = j.get()->as_Field(); 1961 int offset = field->as_Field()->offset(); 1962 1963 // 4. An object is not scalar replaceable if it has a field with unknown 1964 // offset (array's element is accessed in loop). 1965 if (offset == Type::OffsetBot) { 1966 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset")); 1967 return; 1968 } 1969 // 5. Currently an object is not scalar replaceable if a LoadStore node 1970 // access its field since the field value is unknown after it. 1971 // 1972 Node* n = field->ideal_node(); 1973 1974 // Test for an unsafe access that was parsed as maybe off heap 1975 // (with a CheckCastPP to raw memory). 1976 assert(n->is_AddP(), "expect an address computation"); 1977 if (n->in(AddPNode::Base)->is_top() && 1978 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1979 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1980 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1981 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access")); 1982 return; 1983 } 1984 1985 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1986 Node* u = n->fast_out(i); 1987 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1988 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access")); 1989 return; 1990 } 1991 } 1992 1993 // 6. Or the address may point to more then one object. This may produce 1994 // the false positive result (set not scalar replaceable) 1995 // since the flow-insensitive escape analysis can't separate 1996 // the case when stores overwrite the field's value from the case 1997 // when stores happened on different control branches. 1998 // 1999 // Note: it will disable scalar replacement in some cases: 2000 // 2001 // Point p[] = new Point[1]; 2002 // p[0] = new Point(); // Will be not scalar replaced 2003 // 2004 // but it will save us from incorrect optimizations in next cases: 2005 // 2006 // Point p[] = new Point[1]; 2007 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 2008 // 2009 if (field->base_count() > 1) { 2010 for (BaseIterator i(field); i.has_next(); i.next()) { 2011 PointsToNode* base = i.get(); 2012 // Don't take into account LocalVar nodes which 2013 // may point to only one object which should be also 2014 // this field's base by now. 2015 if (base->is_JavaObject() && base != jobj) { 2016 // Mark all bases. 2017 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object")); 2018 set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object")); 2019 } 2020 } 2021 } 2022 } 2023 } 2024 2025 // Propagate NSR (Not scalar replaceable) state. 2026 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist) { 2027 int jobj_length = jobj_worklist.length(); 2028 bool found_nsr_alloc = true; 2029 while (found_nsr_alloc) { 2030 found_nsr_alloc = false; 2031 for (int next = 0; next < jobj_length; ++next) { 2032 JavaObjectNode* jobj = jobj_worklist.at(next); 2033 for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) { 2034 PointsToNode* use = i.get(); 2035 if (use->is_Field()) { 2036 FieldNode* field = use->as_Field(); 2037 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 2038 assert(field->offset() != Type::OffsetBot, "sanity"); 2039 for (BaseIterator i(field); i.has_next(); i.next()) { 2040 PointsToNode* base = i.get(); 2041 // An object is not scalar replaceable if the field into which 2042 // it is stored has NSR base. 2043 if ((base != null_obj) && !base->scalar_replaceable()) { 2044 set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base")); 2045 found_nsr_alloc = true; 2046 break; 2047 } 2048 } 2049 } 2050 } 2051 } 2052 } 2053 } 2054 2055 #ifdef ASSERT 2056 void ConnectionGraph::verify_connection_graph( 2057 GrowableArray<PointsToNode*>& ptnodes_worklist, 2058 GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist, 2059 GrowableArray<JavaObjectNode*>& java_objects_worklist, 2060 GrowableArray<Node*>& addp_worklist) { 2061 // Verify that graph is complete - no new edges could be added. 2062 int java_objects_length = java_objects_worklist.length(); 2063 int non_escaped_length = non_escaped_allocs_worklist.length(); 2064 int new_edges = 0; 2065 for (int next = 0; next < java_objects_length; ++next) { 2066 JavaObjectNode* ptn = java_objects_worklist.at(next); 2067 new_edges += add_java_object_edges(ptn, true); 2068 } 2069 assert(new_edges == 0, "graph was not complete"); 2070 // Verify that escape state is final. 2071 int length = non_escaped_allocs_worklist.length(); 2072 find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist); 2073 assert((non_escaped_length == non_escaped_allocs_worklist.length()) && 2074 (non_escaped_length == length) && 2075 (_worklist.length() == 0), "escape state was not final"); 2076 2077 // Verify fields information. 2078 int addp_length = addp_worklist.length(); 2079 for (int next = 0; next < addp_length; ++next ) { 2080 Node* n = addp_worklist.at(next); 2081 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 2082 if (field->is_oop()) { 2083 // Verify that field has all bases 2084 Node* base = get_addp_base(n); 2085 PointsToNode* ptn = ptnode_adr(base->_idx); 2086 if (ptn->is_JavaObject()) { 2087 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 2088 } else { 2089 assert(ptn->is_LocalVar(), "sanity"); 2090 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2091 PointsToNode* e = i.get(); 2092 if (e->is_JavaObject()) { 2093 assert(field->has_base(e->as_JavaObject()), "sanity"); 2094 } 2095 } 2096 } 2097 // Verify that all fields have initializing values. 2098 if (field->edge_count() == 0) { 2099 tty->print_cr("----------field does not have references----------"); 2100 field->dump(); 2101 for (BaseIterator i(field); i.has_next(); i.next()) { 2102 PointsToNode* base = i.get(); 2103 tty->print_cr("----------field has next base---------------------"); 2104 base->dump(); 2105 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 2106 tty->print_cr("----------base has fields-------------------------"); 2107 for (EdgeIterator j(base); j.has_next(); j.next()) { 2108 j.get()->dump(); 2109 } 2110 tty->print_cr("----------base has references---------------------"); 2111 for (UseIterator j(base); j.has_next(); j.next()) { 2112 j.get()->dump(); 2113 } 2114 } 2115 } 2116 for (UseIterator i(field); i.has_next(); i.next()) { 2117 i.get()->dump(); 2118 } 2119 assert(field->edge_count() > 0, "sanity"); 2120 } 2121 } 2122 } 2123 } 2124 #endif 2125 2126 // Optimize ideal graph. 2127 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 2128 GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) { 2129 Compile* C = _compile; 2130 PhaseIterGVN* igvn = _igvn; 2131 if (EliminateLocks) { 2132 // Mark locks before changing ideal graph. 2133 int cnt = C->macro_count(); 2134 for (int i = 0; i < cnt; i++) { 2135 Node *n = C->macro_node(i); 2136 if (n->is_AbstractLock()) { // Lock and Unlock nodes 2137 AbstractLockNode* alock = n->as_AbstractLock(); 2138 if (!alock->is_non_esc_obj()) { 2139 const Type* obj_type = igvn->type(alock->obj_node()); 2140 if (not_global_escape(alock->obj_node()) && !obj_type->is_inlinetypeptr()) { 2141 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 2142 // The lock could be marked eliminated by lock coarsening 2143 // code during first IGVN before EA. Replace coarsened flag 2144 // to eliminate all associated locks/unlocks. 2145 #ifdef ASSERT 2146 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 2147 #endif 2148 alock->set_non_esc_obj(); 2149 } 2150 } 2151 } 2152 } 2153 } 2154 2155 if (OptimizePtrCompare) { 2156 for (int i = 0; i < ptr_cmp_worklist.length(); i++) { 2157 Node *n = ptr_cmp_worklist.at(i); 2158 const TypeInt* tcmp = optimize_ptr_compare(n); 2159 if (tcmp->singleton()) { 2160 Node* cmp = igvn->makecon(tcmp); 2161 #ifndef PRODUCT 2162 if (PrintOptimizePtrCompare) { 2163 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ")); 2164 if (Verbose) { 2165 n->dump(1); 2166 } 2167 } 2168 #endif 2169 igvn->replace_node(n, cmp); 2170 } 2171 } 2172 } 2173 2174 // For MemBarStoreStore nodes added in library_call.cpp, check 2175 // escape status of associated AllocateNode and optimize out 2176 // MemBarStoreStore node if the allocated object never escapes. 2177 for (int i = 0; i < storestore_worklist.length(); i++) { 2178 Node* storestore = storestore_worklist.at(i); 2179 Node* alloc = storestore->in(MemBarNode::Precedent)->in(0); 2180 if (alloc->is_Allocate() && not_global_escape(alloc)) { 2181 if (alloc->in(AllocateNode::InlineType) != NULL) { 2182 // Non-escaping inline type buffer allocations don't require a membar 2183 storestore->as_MemBar()->remove(_igvn); 2184 } else { 2185 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 2186 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 2187 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 2188 igvn->register_new_node_with_optimizer(mb); 2189 igvn->replace_node(storestore, mb); 2190 } 2191 } 2192 } 2193 } 2194 2195 // Optimize objects compare. 2196 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { 2197 assert(OptimizePtrCompare, "sanity"); 2198 assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be"); 2199 const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO 2200 const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE 2201 const TypeInt* UNKNOWN = TypeInt::CC; // [-1, 0,1] 2202 2203 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 2204 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 2205 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 2206 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 2207 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 2208 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 2209 2210 // Check simple cases first. 2211 if (jobj1 != NULL) { 2212 if (jobj1->escape_state() == PointsToNode::NoEscape) { 2213 if (jobj1 == jobj2) { 2214 // Comparing the same not escaping object. 2215 return EQ; 2216 } 2217 Node* obj = jobj1->ideal_node(); 2218 // Comparing not escaping allocation. 2219 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2220 !ptn2->points_to(jobj1)) { 2221 return NE; // This includes nullness check. 2222 } 2223 } 2224 } 2225 if (jobj2 != NULL) { 2226 if (jobj2->escape_state() == PointsToNode::NoEscape) { 2227 Node* obj = jobj2->ideal_node(); 2228 // Comparing not escaping allocation. 2229 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 2230 !ptn1->points_to(jobj2)) { 2231 return NE; // This includes nullness check. 2232 } 2233 } 2234 } 2235 if (jobj1 != NULL && jobj1 != phantom_obj && 2236 jobj2 != NULL && jobj2 != phantom_obj && 2237 jobj1->ideal_node()->is_Con() && 2238 jobj2->ideal_node()->is_Con()) { 2239 // Klass or String constants compare. Need to be careful with 2240 // compressed pointers - compare types of ConN and ConP instead of nodes. 2241 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 2242 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 2243 if (t1->make_ptr() == t2->make_ptr()) { 2244 return EQ; 2245 } else { 2246 return NE; 2247 } 2248 } 2249 if (ptn1->meet(ptn2)) { 2250 return UNKNOWN; // Sets are not disjoint 2251 } 2252 2253 // Sets are disjoint. 2254 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 2255 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 2256 bool set1_has_null_ptr = ptn1->points_to(null_obj); 2257 bool set2_has_null_ptr = ptn2->points_to(null_obj); 2258 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 2259 (set2_has_unknown_ptr && set1_has_null_ptr)) { 2260 // Check nullness of unknown object. 2261 return UNKNOWN; 2262 } 2263 2264 // Disjointness by itself is not sufficient since 2265 // alias analysis is not complete for escaped objects. 2266 // Disjoint sets are definitely unrelated only when 2267 // at least one set has only not escaping allocations. 2268 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 2269 if (ptn1->non_escaping_allocation()) { 2270 return NE; 2271 } 2272 } 2273 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2274 if (ptn2->non_escaping_allocation()) { 2275 return NE; 2276 } 2277 } 2278 return UNKNOWN; 2279 } 2280 2281 // Connection Graph construction functions. 2282 2283 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2284 PointsToNode* ptadr = _nodes.at(n->_idx); 2285 if (ptadr != NULL) { 2286 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2287 return; 2288 } 2289 Compile* C = _compile; 2290 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2291 map_ideal_node(n, ptadr); 2292 } 2293 2294 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2295 PointsToNode* ptadr = _nodes.at(n->_idx); 2296 if (ptadr != NULL) { 2297 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2298 return; 2299 } 2300 Compile* C = _compile; 2301 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2302 map_ideal_node(n, ptadr); 2303 } 2304 2305 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2306 PointsToNode* ptadr = _nodes.at(n->_idx); 2307 if (ptadr != NULL) { 2308 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2309 return; 2310 } 2311 bool unsafe = false; 2312 bool is_oop = is_oop_field(n, offset, &unsafe); 2313 if (unsafe) { 2314 es = PointsToNode::GlobalEscape; 2315 } 2316 Compile* C = _compile; 2317 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2318 map_ideal_node(n, field); 2319 } 2320 2321 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2322 PointsToNode* src, PointsToNode* dst) { 2323 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2324 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2325 PointsToNode* ptadr = _nodes.at(n->_idx); 2326 if (ptadr != NULL) { 2327 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2328 return; 2329 } 2330 Compile* C = _compile; 2331 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2332 map_ideal_node(n, ptadr); 2333 // Add edge from arraycopy node to source object. 2334 (void)add_edge(ptadr, src); 2335 src->set_arraycopy_src(); 2336 // Add edge from destination object to arraycopy node. 2337 (void)add_edge(dst, ptadr); 2338 dst->set_arraycopy_dst(); 2339 } 2340 2341 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2342 const Type* adr_type = n->as_AddP()->bottom_type(); 2343 int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot; 2344 BasicType bt = T_INT; 2345 if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) { 2346 // Check only oop fields. 2347 if (!adr_type->isa_aryptr() || 2348 adr_type->isa_aryptr()->elem() == Type::BOTTOM || 2349 adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) { 2350 // OffsetBot is used to reference array's element. Ignore first AddP. 2351 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2352 bt = T_OBJECT; 2353 } 2354 } 2355 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2356 if (adr_type->isa_instptr()) { 2357 ciField* field = _compile->alias_type(adr_type->is_ptr())->field(); 2358 if (field != NULL) { 2359 bt = field->layout_type(); 2360 } else { 2361 // Check for unsafe oop field access 2362 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2363 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2364 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2365 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2366 bt = T_OBJECT; 2367 (*unsafe) = true; 2368 } 2369 } 2370 } else if (adr_type->isa_aryptr()) { 2371 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2372 // Ignore array length load. 2373 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2374 // Ignore first AddP. 2375 } else { 2376 const Type* elemtype = adr_type->is_aryptr()->elem(); 2377 if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) { 2378 ciInlineKlass* vk = elemtype->inline_klass(); 2379 field_offset += vk->first_field_offset(); 2380 bt = vk->get_field_by_offset(field_offset, false)->layout_type(); 2381 } else { 2382 bt = elemtype->array_element_basic_type(); 2383 } 2384 } 2385 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2386 // Allocation initialization, ThreadLocal field access, unsafe access 2387 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2388 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2389 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2390 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2391 bt = T_OBJECT; 2392 } 2393 } 2394 } 2395 // Note: T_NARROWOOP is not classed as a real reference type 2396 return (is_reference_type(bt) || bt == T_NARROWOOP); 2397 } 2398 2399 // Returns unique pointed java object or NULL. 2400 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2401 assert(!_collecting, "should not call when constructed graph"); 2402 // If the node was created after the escape computation we can't answer. 2403 uint idx = n->_idx; 2404 if (idx >= nodes_size()) { 2405 return NULL; 2406 } 2407 PointsToNode* ptn = ptnode_adr(idx); 2408 if (ptn == NULL) { 2409 return NULL; 2410 } 2411 if (ptn->is_JavaObject()) { 2412 return ptn->as_JavaObject(); 2413 } 2414 assert(ptn->is_LocalVar(), "sanity"); 2415 // Check all java objects it points to. 2416 JavaObjectNode* jobj = NULL; 2417 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2418 PointsToNode* e = i.get(); 2419 if (e->is_JavaObject()) { 2420 if (jobj == NULL) { 2421 jobj = e->as_JavaObject(); 2422 } else if (jobj != e) { 2423 return NULL; 2424 } 2425 } 2426 } 2427 return jobj; 2428 } 2429 2430 // Return true if this node points only to non-escaping allocations. 2431 bool PointsToNode::non_escaping_allocation() { 2432 if (is_JavaObject()) { 2433 Node* n = ideal_node(); 2434 if (n->is_Allocate() || n->is_CallStaticJava()) { 2435 return (escape_state() == PointsToNode::NoEscape); 2436 } else { 2437 return false; 2438 } 2439 } 2440 assert(is_LocalVar(), "sanity"); 2441 // Check all java objects it points to. 2442 for (EdgeIterator i(this); i.has_next(); i.next()) { 2443 PointsToNode* e = i.get(); 2444 if (e->is_JavaObject()) { 2445 Node* n = e->ideal_node(); 2446 if ((e->escape_state() != PointsToNode::NoEscape) || 2447 !(n->is_Allocate() || n->is_CallStaticJava())) { 2448 return false; 2449 } 2450 } 2451 } 2452 return true; 2453 } 2454 2455 // Return true if we know the node does not escape globally. 2456 bool ConnectionGraph::not_global_escape(Node *n) { 2457 assert(!_collecting, "should not call during graph construction"); 2458 // If the node was created after the escape computation we can't answer. 2459 uint idx = n->_idx; 2460 if (idx >= nodes_size()) { 2461 return false; 2462 } 2463 PointsToNode* ptn = ptnode_adr(idx); 2464 if (ptn == NULL) { 2465 return false; // not in congraph (e.g. ConI) 2466 } 2467 PointsToNode::EscapeState es = ptn->escape_state(); 2468 // If we have already computed a value, return it. 2469 if (es >= PointsToNode::GlobalEscape) { 2470 return false; 2471 } 2472 if (ptn->is_JavaObject()) { 2473 return true; // (es < PointsToNode::GlobalEscape); 2474 } 2475 assert(ptn->is_LocalVar(), "sanity"); 2476 // Check all java objects it points to. 2477 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2478 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) { 2479 return false; 2480 } 2481 } 2482 return true; 2483 } 2484 2485 2486 // Helper functions 2487 2488 // Return true if this node points to specified node or nodes it points to. 2489 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2490 if (is_JavaObject()) { 2491 return (this == ptn); 2492 } 2493 assert(is_LocalVar() || is_Field(), "sanity"); 2494 for (EdgeIterator i(this); i.has_next(); i.next()) { 2495 if (i.get() == ptn) { 2496 return true; 2497 } 2498 } 2499 return false; 2500 } 2501 2502 // Return true if one node points to an other. 2503 bool PointsToNode::meet(PointsToNode* ptn) { 2504 if (this == ptn) { 2505 return true; 2506 } else if (ptn->is_JavaObject()) { 2507 return this->points_to(ptn->as_JavaObject()); 2508 } else if (this->is_JavaObject()) { 2509 return ptn->points_to(this->as_JavaObject()); 2510 } 2511 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2512 int ptn_count = ptn->edge_count(); 2513 for (EdgeIterator i(this); i.has_next(); i.next()) { 2514 PointsToNode* this_e = i.get(); 2515 for (int j = 0; j < ptn_count; j++) { 2516 if (this_e == ptn->edge(j)) { 2517 return true; 2518 } 2519 } 2520 } 2521 return false; 2522 } 2523 2524 #ifdef ASSERT 2525 // Return true if bases point to this java object. 2526 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2527 for (BaseIterator i(this); i.has_next(); i.next()) { 2528 if (i.get() == jobj) { 2529 return true; 2530 } 2531 } 2532 return false; 2533 } 2534 #endif 2535 2536 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2537 // Handle simple case first. 2538 assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access"); 2539 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2540 return true; 2541 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2542 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2543 Node* addp_use = addp->fast_out(i); 2544 if (addp_use->is_Store()) { 2545 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2546 if (addp_use->fast_out(j)->is_Initialize()) { 2547 return true; 2548 } 2549 } 2550 } 2551 } 2552 } 2553 return false; 2554 } 2555 2556 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2557 const Type *adr_type = phase->type(adr); 2558 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) { 2559 // We are computing a raw address for a store captured by an Initialize 2560 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2561 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2562 assert(offs != Type::OffsetBot || 2563 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2564 "offset must be a constant or it is initialization of array"); 2565 return offs; 2566 } 2567 return adr_type->is_ptr()->flattened_offset(); 2568 } 2569 2570 Node* ConnectionGraph::get_addp_base(Node *addp) { 2571 assert(addp->is_AddP(), "must be AddP"); 2572 // 2573 // AddP cases for Base and Address inputs: 2574 // case #1. Direct object's field reference: 2575 // Allocate 2576 // | 2577 // Proj #5 ( oop result ) 2578 // | 2579 // CheckCastPP (cast to instance type) 2580 // | | 2581 // AddP ( base == address ) 2582 // 2583 // case #2. Indirect object's field reference: 2584 // Phi 2585 // | 2586 // CastPP (cast to instance type) 2587 // | | 2588 // AddP ( base == address ) 2589 // 2590 // case #3. Raw object's field reference for Initialize node: 2591 // Allocate 2592 // | 2593 // Proj #5 ( oop result ) 2594 // top | 2595 // \ | 2596 // AddP ( base == top ) 2597 // 2598 // case #4. Array's element reference: 2599 // {CheckCastPP | CastPP} 2600 // | | | 2601 // | AddP ( array's element offset ) 2602 // | | 2603 // AddP ( array's offset ) 2604 // 2605 // case #5. Raw object's field reference for arraycopy stub call: 2606 // The inline_native_clone() case when the arraycopy stub is called 2607 // after the allocation before Initialize and CheckCastPP nodes. 2608 // Allocate 2609 // | 2610 // Proj #5 ( oop result ) 2611 // | | 2612 // AddP ( base == address ) 2613 // 2614 // case #6. Constant Pool, ThreadLocal, CastX2P or 2615 // Raw object's field reference: 2616 // {ConP, ThreadLocal, CastX2P, raw Load} 2617 // top | 2618 // \ | 2619 // AddP ( base == top ) 2620 // 2621 // case #7. Klass's field reference. 2622 // LoadKlass 2623 // | | 2624 // AddP ( base == address ) 2625 // 2626 // case #8. narrow Klass's field reference. 2627 // LoadNKlass 2628 // | 2629 // DecodeN 2630 // | | 2631 // AddP ( base == address ) 2632 // 2633 // case #9. Mixed unsafe access 2634 // {instance} 2635 // | 2636 // CheckCastPP (raw) 2637 // top | 2638 // \ | 2639 // AddP ( base == top ) 2640 // 2641 Node *base = addp->in(AddPNode::Base); 2642 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2643 base = addp->in(AddPNode::Address); 2644 while (base->is_AddP()) { 2645 // Case #6 (unsafe access) may have several chained AddP nodes. 2646 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2647 base = base->in(AddPNode::Address); 2648 } 2649 if (base->Opcode() == Op_CheckCastPP && 2650 base->bottom_type()->isa_rawptr() && 2651 _igvn->type(base->in(1))->isa_oopptr()) { 2652 base = base->in(1); // Case #9 2653 } else { 2654 Node* uncast_base = base->uncast(); 2655 int opcode = uncast_base->Opcode(); 2656 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2657 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2658 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2659 is_captured_store_address(addp), "sanity"); 2660 } 2661 } 2662 return base; 2663 } 2664 2665 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2666 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2667 Node* addp2 = addp->raw_out(0); 2668 if (addp->outcnt() == 1 && addp2->is_AddP() && 2669 addp2->in(AddPNode::Base) == n && 2670 addp2->in(AddPNode::Address) == addp) { 2671 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2672 // 2673 // Find array's offset to push it on worklist first and 2674 // as result process an array's element offset first (pushed second) 2675 // to avoid CastPP for the array's offset. 2676 // Otherwise the inserted CastPP (LocalVar) will point to what 2677 // the AddP (Field) points to. Which would be wrong since 2678 // the algorithm expects the CastPP has the same point as 2679 // as AddP's base CheckCastPP (LocalVar). 2680 // 2681 // ArrayAllocation 2682 // | 2683 // CheckCastPP 2684 // | 2685 // memProj (from ArrayAllocation CheckCastPP) 2686 // | || 2687 // | || Int (element index) 2688 // | || | ConI (log(element size)) 2689 // | || | / 2690 // | || LShift 2691 // | || / 2692 // | AddP (array's element offset) 2693 // | | 2694 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2695 // | / / 2696 // AddP (array's offset) 2697 // | 2698 // Load/Store (memory operation on array's element) 2699 // 2700 return addp2; 2701 } 2702 return NULL; 2703 } 2704 2705 // 2706 // Adjust the type and inputs of an AddP which computes the 2707 // address of a field of an instance 2708 // 2709 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2710 PhaseGVN* igvn = _igvn; 2711 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2712 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2713 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2714 if (t == NULL) { 2715 // We are computing a raw address for a store captured by an Initialize 2716 // compute an appropriate address type (cases #3 and #5). 2717 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2718 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2719 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2720 assert(offs != Type::OffsetBot, "offset must be a constant"); 2721 if (base_t->isa_aryptr() != NULL) { 2722 // In the case of a flattened inline type array, each field has its 2723 // own slice so we need to extract the field being accessed from 2724 // the address computation 2725 t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr(); 2726 } else { 2727 t = base_t->add_offset(offs)->is_oopptr(); 2728 } 2729 } 2730 int inst_id = base_t->instance_id(); 2731 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2732 "old type must be non-instance or match new type"); 2733 2734 // The type 't' could be subclass of 'base_t'. 2735 // As result t->offset() could be large then base_t's size and it will 2736 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2737 // constructor verifies correctness of the offset. 2738 // 2739 // It could happened on subclass's branch (from the type profiling 2740 // inlining) which was not eliminated during parsing since the exactness 2741 // of the allocation type was not propagated to the subclass type check. 2742 // 2743 // Or the type 't' could be not related to 'base_t' at all. 2744 // It could happen when CHA type is different from MDO type on a dead path 2745 // (for example, from instanceof check) which is not collapsed during parsing. 2746 // 2747 // Do nothing for such AddP node and don't process its users since 2748 // this code branch will go away. 2749 // 2750 if (!t->is_known_instance() && 2751 !base_t->maybe_java_subtype_of(t)) { 2752 return false; // bail out 2753 } 2754 const TypePtr* tinst = base_t->add_offset(t->offset()); 2755 if (tinst->isa_aryptr() && t->isa_aryptr()) { 2756 // In the case of a flattened inline type array, each field has its 2757 // own slice so we need to keep track of the field being accessed. 2758 tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get()); 2759 // Keep array properties (not flat/null-free) 2760 tinst = tinst->is_aryptr()->update_properties(t->is_aryptr()); 2761 if (tinst == NULL) { 2762 return false; // Skip dead path with inconsistent properties 2763 } 2764 } 2765 2766 // Do NOT remove the next line: ensure a new alias index is allocated 2767 // for the instance type. Note: C++ will not remove it since the call 2768 // has side effect. 2769 int alias_idx = _compile->get_alias_index(tinst); 2770 igvn->set_type(addp, tinst); 2771 // record the allocation in the node map 2772 set_map(addp, get_map(base->_idx)); 2773 // Set addp's Base and Address to 'base'. 2774 Node *abase = addp->in(AddPNode::Base); 2775 Node *adr = addp->in(AddPNode::Address); 2776 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2777 adr->in(0)->_idx == (uint)inst_id) { 2778 // Skip AddP cases #3 and #5. 2779 } else { 2780 assert(!abase->is_top(), "sanity"); // AddP case #3 2781 if (abase != base) { 2782 igvn->hash_delete(addp); 2783 addp->set_req(AddPNode::Base, base); 2784 if (abase == adr) { 2785 addp->set_req(AddPNode::Address, base); 2786 } else { 2787 // AddP case #4 (adr is array's element offset AddP node) 2788 #ifdef ASSERT 2789 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2790 assert(adr->is_AddP() && atype != NULL && 2791 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2792 #endif 2793 } 2794 igvn->hash_insert(addp); 2795 } 2796 } 2797 // Put on IGVN worklist since at least addp's type was changed above. 2798 record_for_optimizer(addp); 2799 return true; 2800 } 2801 2802 // 2803 // Create a new version of orig_phi if necessary. Returns either the newly 2804 // created phi or an existing phi. Sets create_new to indicate whether a new 2805 // phi was created. Cache the last newly created phi in the node map. 2806 // 2807 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2808 Compile *C = _compile; 2809 PhaseGVN* igvn = _igvn; 2810 new_created = false; 2811 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2812 // nothing to do if orig_phi is bottom memory or matches alias_idx 2813 if (phi_alias_idx == alias_idx) { 2814 return orig_phi; 2815 } 2816 // Have we recently created a Phi for this alias index? 2817 PhiNode *result = get_map_phi(orig_phi->_idx); 2818 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2819 return result; 2820 } 2821 // Previous check may fail when the same wide memory Phi was split into Phis 2822 // for different memory slices. Search all Phis for this region. 2823 if (result != NULL) { 2824 Node* region = orig_phi->in(0); 2825 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2826 Node* phi = region->fast_out(i); 2827 if (phi->is_Phi() && 2828 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2829 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2830 return phi->as_Phi(); 2831 } 2832 } 2833 } 2834 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2835 if (C->do_escape_analysis() == true && !C->failing()) { 2836 // Retry compilation without escape analysis. 2837 // If this is the first failure, the sentinel string will "stick" 2838 // to the Compile object, and the C2Compiler will see it and retry. 2839 C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 2840 } 2841 return NULL; 2842 } 2843 orig_phi_worklist.append_if_missing(orig_phi); 2844 const TypePtr *atype = C->get_adr_type(alias_idx); 2845 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2846 C->copy_node_notes_to(result, orig_phi); 2847 igvn->set_type(result, result->bottom_type()); 2848 record_for_optimizer(result); 2849 set_map(orig_phi, result); 2850 new_created = true; 2851 return result; 2852 } 2853 2854 // 2855 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2856 // specified alias index. 2857 // 2858 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2859 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2860 Compile *C = _compile; 2861 PhaseGVN* igvn = _igvn; 2862 bool new_phi_created; 2863 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2864 if (!new_phi_created) { 2865 return result; 2866 } 2867 GrowableArray<PhiNode *> phi_list; 2868 GrowableArray<uint> cur_input; 2869 PhiNode *phi = orig_phi; 2870 uint idx = 1; 2871 bool finished = false; 2872 while(!finished) { 2873 while (idx < phi->req()) { 2874 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2875 if (mem != NULL && mem->is_Phi()) { 2876 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2877 if (new_phi_created) { 2878 // found an phi for which we created a new split, push current one on worklist and begin 2879 // processing new one 2880 phi_list.push(phi); 2881 cur_input.push(idx); 2882 phi = mem->as_Phi(); 2883 result = newphi; 2884 idx = 1; 2885 continue; 2886 } else { 2887 mem = newphi; 2888 } 2889 } 2890 if (C->failing()) { 2891 return NULL; 2892 } 2893 result->set_req(idx++, mem); 2894 } 2895 #ifdef ASSERT 2896 // verify that the new Phi has an input for each input of the original 2897 assert( phi->req() == result->req(), "must have same number of inputs."); 2898 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2899 #endif 2900 // Check if all new phi's inputs have specified alias index. 2901 // Otherwise use old phi. 2902 for (uint i = 1; i < phi->req(); i++) { 2903 Node* in = result->in(i); 2904 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2905 } 2906 // we have finished processing a Phi, see if there are any more to do 2907 finished = (phi_list.length() == 0 ); 2908 if (!finished) { 2909 phi = phi_list.pop(); 2910 idx = cur_input.pop(); 2911 PhiNode *prev_result = get_map_phi(phi->_idx); 2912 prev_result->set_req(idx++, result); 2913 result = prev_result; 2914 } 2915 } 2916 return result; 2917 } 2918 2919 // 2920 // The next methods are derived from methods in MemNode. 2921 // 2922 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2923 Node *mem = mmem; 2924 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2925 // means an array I have not precisely typed yet. Do not do any 2926 // alias stuff with it any time soon. 2927 if (toop->base() != Type::AnyPtr && 2928 !(toop->isa_instptr() && 2929 toop->is_instptr()->instance_klass()->is_java_lang_Object() && 2930 toop->offset() == Type::OffsetBot)) { 2931 mem = mmem->memory_at(alias_idx); 2932 // Update input if it is progress over what we have now 2933 } 2934 return mem; 2935 } 2936 2937 // 2938 // Move memory users to their memory slices. 2939 // 2940 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2941 Compile* C = _compile; 2942 PhaseGVN* igvn = _igvn; 2943 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2944 assert(tp != NULL, "ptr type"); 2945 int alias_idx = C->get_alias_index(tp); 2946 int general_idx = C->get_general_index(alias_idx); 2947 2948 // Move users first 2949 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2950 Node* use = n->fast_out(i); 2951 if (use->is_MergeMem()) { 2952 MergeMemNode* mmem = use->as_MergeMem(); 2953 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2954 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2955 continue; // Nothing to do 2956 } 2957 // Replace previous general reference to mem node. 2958 uint orig_uniq = C->unique(); 2959 Node* m = find_inst_mem(n, general_idx, orig_phis); 2960 assert(orig_uniq == C->unique(), "no new nodes"); 2961 mmem->set_memory_at(general_idx, m); 2962 --imax; 2963 --i; 2964 } else if (use->is_MemBar()) { 2965 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2966 if (use->req() > MemBarNode::Precedent && 2967 use->in(MemBarNode::Precedent) == n) { 2968 // Don't move related membars. 2969 record_for_optimizer(use); 2970 continue; 2971 } 2972 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2973 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2974 alias_idx == general_idx) { 2975 continue; // Nothing to do 2976 } 2977 // Move to general memory slice. 2978 uint orig_uniq = C->unique(); 2979 Node* m = find_inst_mem(n, general_idx, orig_phis); 2980 assert(orig_uniq == C->unique(), "no new nodes"); 2981 igvn->hash_delete(use); 2982 imax -= use->replace_edge(n, m, igvn); 2983 igvn->hash_insert(use); 2984 record_for_optimizer(use); 2985 --i; 2986 #ifdef ASSERT 2987 } else if (use->is_Mem()) { 2988 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2989 // Don't move related cardmark. 2990 continue; 2991 } 2992 // Memory nodes should have new memory input. 2993 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2994 assert(tp != NULL, "ptr type"); 2995 int idx = C->get_alias_index(tp); 2996 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2997 "Following memory nodes should have new memory input or be on the same memory slice"); 2998 } else if (use->is_Phi()) { 2999 // Phi nodes should be split and moved already. 3000 tp = use->as_Phi()->adr_type()->isa_ptr(); 3001 assert(tp != NULL, "ptr type"); 3002 int idx = C->get_alias_index(tp); 3003 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 3004 } else { 3005 use->dump(); 3006 assert(false, "should not be here"); 3007 #endif 3008 } 3009 } 3010 } 3011 3012 // 3013 // Search memory chain of "mem" to find a MemNode whose address 3014 // is the specified alias index. 3015 // 3016 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 3017 if (orig_mem == NULL) { 3018 return orig_mem; 3019 } 3020 Compile* C = _compile; 3021 PhaseGVN* igvn = _igvn; 3022 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 3023 bool is_instance = (toop != NULL) && toop->is_known_instance(); 3024 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 3025 Node *prev = NULL; 3026 Node *result = orig_mem; 3027 while (prev != result) { 3028 prev = result; 3029 if (result == start_mem) { 3030 break; // hit one of our sentinels 3031 } 3032 if (result->is_Mem()) { 3033 const Type *at = igvn->type(result->in(MemNode::Address)); 3034 if (at == Type::TOP) { 3035 break; // Dead 3036 } 3037 assert (at->isa_ptr() != NULL, "pointer type required."); 3038 int idx = C->get_alias_index(at->is_ptr()); 3039 if (idx == alias_idx) { 3040 break; // Found 3041 } 3042 if (!is_instance && (at->isa_oopptr() == NULL || 3043 !at->is_oopptr()->is_known_instance())) { 3044 break; // Do not skip store to general memory slice. 3045 } 3046 result = result->in(MemNode::Memory); 3047 } 3048 if (!is_instance) { 3049 continue; // don't search further for non-instance types 3050 } 3051 // skip over a call which does not affect this memory slice 3052 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 3053 Node *proj_in = result->in(0); 3054 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 3055 break; // hit one of our sentinels 3056 } else if (proj_in->is_Call()) { 3057 // ArrayCopy node processed here as well 3058 CallNode *call = proj_in->as_Call(); 3059 if (!call->may_modify(toop, igvn)) { 3060 result = call->in(TypeFunc::Memory); 3061 } 3062 } else if (proj_in->is_Initialize()) { 3063 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 3064 // Stop if this is the initialization for the object instance which 3065 // which contains this memory slice, otherwise skip over it. 3066 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 3067 result = proj_in->in(TypeFunc::Memory); 3068 } 3069 } else if (proj_in->is_MemBar()) { 3070 // Check if there is an array copy for a clone 3071 // Step over GC barrier when ReduceInitialCardMarks is disabled 3072 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 3073 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 3074 3075 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 3076 // Stop if it is a clone 3077 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 3078 if (ac->may_modify(toop, igvn)) { 3079 break; 3080 } 3081 } 3082 result = proj_in->in(TypeFunc::Memory); 3083 } 3084 } else if (result->is_MergeMem()) { 3085 MergeMemNode *mmem = result->as_MergeMem(); 3086 result = step_through_mergemem(mmem, alias_idx, toop); 3087 if (result == mmem->base_memory()) { 3088 // Didn't find instance memory, search through general slice recursively. 3089 result = mmem->memory_at(C->get_general_index(alias_idx)); 3090 result = find_inst_mem(result, alias_idx, orig_phis); 3091 if (C->failing()) { 3092 return NULL; 3093 } 3094 mmem->set_memory_at(alias_idx, result); 3095 } 3096 } else if (result->is_Phi() && 3097 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 3098 Node *un = result->as_Phi()->unique_input(igvn); 3099 if (un != NULL) { 3100 orig_phis.append_if_missing(result->as_Phi()); 3101 result = un; 3102 } else { 3103 break; 3104 } 3105 } else if (result->is_ClearArray()) { 3106 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 3107 // Can not bypass initialization of the instance 3108 // we are looking for. 3109 break; 3110 } 3111 // Otherwise skip it (the call updated 'result' value). 3112 } else if (result->Opcode() == Op_SCMemProj) { 3113 Node* mem = result->in(0); 3114 Node* adr = NULL; 3115 if (mem->is_LoadStore()) { 3116 adr = mem->in(MemNode::Address); 3117 } else { 3118 assert(mem->Opcode() == Op_EncodeISOArray || 3119 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 3120 adr = mem->in(3); // Memory edge corresponds to destination array 3121 } 3122 const Type *at = igvn->type(adr); 3123 if (at != Type::TOP) { 3124 assert(at->isa_ptr() != NULL, "pointer type required."); 3125 int idx = C->get_alias_index(at->is_ptr()); 3126 if (idx == alias_idx) { 3127 // Assert in debug mode 3128 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 3129 break; // In product mode return SCMemProj node 3130 } 3131 } 3132 result = mem->in(MemNode::Memory); 3133 } else if (result->Opcode() == Op_StrInflatedCopy) { 3134 Node* adr = result->in(3); // Memory edge corresponds to destination array 3135 const Type *at = igvn->type(adr); 3136 if (at != Type::TOP) { 3137 assert(at->isa_ptr() != NULL, "pointer type required."); 3138 int idx = C->get_alias_index(at->is_ptr()); 3139 if (idx == alias_idx) { 3140 // Assert in debug mode 3141 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 3142 break; // In product mode return SCMemProj node 3143 } 3144 } 3145 result = result->in(MemNode::Memory); 3146 } 3147 } 3148 if (result->is_Phi()) { 3149 PhiNode *mphi = result->as_Phi(); 3150 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 3151 const TypePtr *t = mphi->adr_type(); 3152 if (!is_instance) { 3153 // Push all non-instance Phis on the orig_phis worklist to update inputs 3154 // during Phase 4 if needed. 3155 orig_phis.append_if_missing(mphi); 3156 } else if (C->get_alias_index(t) != alias_idx) { 3157 // Create a new Phi with the specified alias index type. 3158 result = split_memory_phi(mphi, alias_idx, orig_phis); 3159 } 3160 } 3161 // the result is either MemNode, PhiNode, InitializeNode. 3162 return result; 3163 } 3164 3165 // 3166 // Convert the types of non-escaped object to instance types where possible, 3167 // propagate the new type information through the graph, and update memory 3168 // edges and MergeMem inputs to reflect the new type. 3169 // 3170 // We start with allocations (and calls which may be allocations) on alloc_worklist. 3171 // The processing is done in 4 phases: 3172 // 3173 // Phase 1: Process possible allocations from alloc_worklist. Create instance 3174 // types for the CheckCastPP for allocations where possible. 3175 // Propagate the new types through users as follows: 3176 // casts and Phi: push users on alloc_worklist 3177 // AddP: cast Base and Address inputs to the instance type 3178 // push any AddP users on alloc_worklist and push any memnode 3179 // users onto memnode_worklist. 3180 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3181 // search the Memory chain for a store with the appropriate type 3182 // address type. If a Phi is found, create a new version with 3183 // the appropriate memory slices from each of the Phi inputs. 3184 // For stores, process the users as follows: 3185 // MemNode: push on memnode_worklist 3186 // MergeMem: push on mergemem_worklist 3187 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 3188 // moving the first node encountered of each instance type to the 3189 // the input corresponding to its alias index. 3190 // appropriate memory slice. 3191 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 3192 // 3193 // In the following example, the CheckCastPP nodes are the cast of allocation 3194 // results and the allocation of node 29 is non-escaped and eligible to be an 3195 // instance type. 3196 // 3197 // We start with: 3198 // 3199 // 7 Parm #memory 3200 // 10 ConI "12" 3201 // 19 CheckCastPP "Foo" 3202 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3203 // 29 CheckCastPP "Foo" 3204 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 3205 // 3206 // 40 StoreP 25 7 20 ... alias_index=4 3207 // 50 StoreP 35 40 30 ... alias_index=4 3208 // 60 StoreP 45 50 20 ... alias_index=4 3209 // 70 LoadP _ 60 30 ... alias_index=4 3210 // 80 Phi 75 50 60 Memory alias_index=4 3211 // 90 LoadP _ 80 30 ... alias_index=4 3212 // 100 LoadP _ 80 20 ... alias_index=4 3213 // 3214 // 3215 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 3216 // and creating a new alias index for node 30. This gives: 3217 // 3218 // 7 Parm #memory 3219 // 10 ConI "12" 3220 // 19 CheckCastPP "Foo" 3221 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3222 // 29 CheckCastPP "Foo" iid=24 3223 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3224 // 3225 // 40 StoreP 25 7 20 ... alias_index=4 3226 // 50 StoreP 35 40 30 ... alias_index=6 3227 // 60 StoreP 45 50 20 ... alias_index=4 3228 // 70 LoadP _ 60 30 ... alias_index=6 3229 // 80 Phi 75 50 60 Memory alias_index=4 3230 // 90 LoadP _ 80 30 ... alias_index=6 3231 // 100 LoadP _ 80 20 ... alias_index=4 3232 // 3233 // In phase 2, new memory inputs are computed for the loads and stores, 3234 // And a new version of the phi is created. In phase 4, the inputs to 3235 // node 80 are updated and then the memory nodes are updated with the 3236 // values computed in phase 2. This results in: 3237 // 3238 // 7 Parm #memory 3239 // 10 ConI "12" 3240 // 19 CheckCastPP "Foo" 3241 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 3242 // 29 CheckCastPP "Foo" iid=24 3243 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 3244 // 3245 // 40 StoreP 25 7 20 ... alias_index=4 3246 // 50 StoreP 35 7 30 ... alias_index=6 3247 // 60 StoreP 45 40 20 ... alias_index=4 3248 // 70 LoadP _ 50 30 ... alias_index=6 3249 // 80 Phi 75 40 60 Memory alias_index=4 3250 // 120 Phi 75 50 50 Memory alias_index=6 3251 // 90 LoadP _ 120 30 ... alias_index=6 3252 // 100 LoadP _ 80 20 ... alias_index=4 3253 // 3254 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, 3255 GrowableArray<ArrayCopyNode*> &arraycopy_worklist, 3256 GrowableArray<MergeMemNode*> &mergemem_worklist) { 3257 GrowableArray<Node *> memnode_worklist; 3258 GrowableArray<PhiNode *> orig_phis; 3259 PhaseIterGVN *igvn = _igvn; 3260 uint new_index_start = (uint) _compile->num_alias_types(); 3261 VectorSet visited; 3262 ideal_nodes.clear(); // Reset for use with set_map/get_map. 3263 uint unique_old = _compile->unique(); 3264 3265 // Phase 1: Process possible allocations from alloc_worklist. 3266 // Create instance types for the CheckCastPP for allocations where possible. 3267 // 3268 // (Note: don't forget to change the order of the second AddP node on 3269 // the alloc_worklist if the order of the worklist processing is changed, 3270 // see the comment in find_second_addp().) 3271 // 3272 while (alloc_worklist.length() != 0) { 3273 Node *n = alloc_worklist.pop(); 3274 uint ni = n->_idx; 3275 if (n->is_Call()) { 3276 CallNode *alloc = n->as_Call(); 3277 // copy escape information to call node 3278 PointsToNode* ptn = ptnode_adr(alloc->_idx); 3279 PointsToNode::EscapeState es = ptn->escape_state(); 3280 // We have an allocation or call which returns a Java object, 3281 // see if it is non-escaped. 3282 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) { 3283 continue; 3284 } 3285 // Find CheckCastPP for the allocate or for the return value of a call 3286 n = alloc->result_cast(); 3287 if (n == NULL) { // No uses except Initialize node 3288 if (alloc->is_Allocate()) { 3289 // Set the scalar_replaceable flag for allocation 3290 // so it could be eliminated if it has no uses. 3291 alloc->as_Allocate()->_is_scalar_replaceable = true; 3292 } 3293 continue; 3294 } 3295 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 3296 // we could reach here for allocate case if one init is associated with many allocs. 3297 if (alloc->is_Allocate()) { 3298 alloc->as_Allocate()->_is_scalar_replaceable = false; 3299 } 3300 continue; 3301 } 3302 3303 // The inline code for Object.clone() casts the allocation result to 3304 // java.lang.Object and then to the actual type of the allocated 3305 // object. Detect this case and use the second cast. 3306 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3307 // the allocation result is cast to java.lang.Object and then 3308 // to the actual Array type. 3309 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3310 && (alloc->is_AllocateArray() || 3311 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { 3312 Node *cast2 = NULL; 3313 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3314 Node *use = n->fast_out(i); 3315 if (use->is_CheckCastPP()) { 3316 cast2 = use; 3317 break; 3318 } 3319 } 3320 if (cast2 != NULL) { 3321 n = cast2; 3322 } else { 3323 // Non-scalar replaceable if the allocation type is unknown statically 3324 // (reflection allocation), the object can't be restored during 3325 // deoptimization without precise type. 3326 continue; 3327 } 3328 } 3329 3330 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3331 if (t == NULL) { 3332 continue; // not a TypeOopPtr 3333 } 3334 if (!t->klass_is_exact()) { 3335 continue; // not an unique type 3336 } 3337 if (alloc->is_Allocate()) { 3338 // Set the scalar_replaceable flag for allocation 3339 // so it could be eliminated. 3340 alloc->as_Allocate()->_is_scalar_replaceable = true; 3341 } 3342 set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state 3343 // in order for an object to be scalar-replaceable, it must be: 3344 // - a direct allocation (not a call returning an object) 3345 // - non-escaping 3346 // - eligible to be a unique type 3347 // - not determined to be ineligible by escape analysis 3348 set_map(alloc, n); 3349 set_map(n, alloc); 3350 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3351 igvn->hash_delete(n); 3352 igvn->set_type(n, tinst); 3353 n->raise_bottom_type(tinst); 3354 igvn->hash_insert(n); 3355 record_for_optimizer(n); 3356 // Allocate an alias index for the header fields. Accesses to 3357 // the header emitted during macro expansion wouldn't have 3358 // correct memory state otherwise. 3359 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3360 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3361 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3362 3363 // First, put on the worklist all Field edges from Connection Graph 3364 // which is more accurate than putting immediate users from Ideal Graph. 3365 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3366 PointsToNode* tgt = e.get(); 3367 if (tgt->is_Arraycopy()) { 3368 continue; 3369 } 3370 Node* use = tgt->ideal_node(); 3371 assert(tgt->is_Field() && use->is_AddP(), 3372 "only AddP nodes are Field edges in CG"); 3373 if (use->outcnt() > 0) { // Don't process dead nodes 3374 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3375 if (addp2 != NULL) { 3376 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3377 alloc_worklist.append_if_missing(addp2); 3378 } 3379 alloc_worklist.append_if_missing(use); 3380 } 3381 } 3382 3383 // An allocation may have an Initialize which has raw stores. Scan 3384 // the users of the raw allocation result and push AddP users 3385 // on alloc_worklist. 3386 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3387 assert (raw_result != NULL, "must have an allocation result"); 3388 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3389 Node *use = raw_result->fast_out(i); 3390 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3391 Node* addp2 = find_second_addp(use, raw_result); 3392 if (addp2 != NULL) { 3393 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3394 alloc_worklist.append_if_missing(addp2); 3395 } 3396 alloc_worklist.append_if_missing(use); 3397 } else if (use->is_MemBar()) { 3398 memnode_worklist.append_if_missing(use); 3399 } 3400 } 3401 } 3402 } else if (n->is_AddP()) { 3403 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3404 if (jobj == NULL || jobj == phantom_obj) { 3405 #ifdef ASSERT 3406 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3407 ptnode_adr(n->_idx)->dump(); 3408 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3409 #endif 3410 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3411 return; 3412 } 3413 Node *base = get_map(jobj->idx()); // CheckCastPP node 3414 if (!split_AddP(n, base)) continue; // wrong type from dead path 3415 } else if (n->is_Phi() || 3416 n->is_CheckCastPP() || 3417 n->is_EncodeP() || 3418 n->is_DecodeN() || 3419 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3420 if (visited.test_set(n->_idx)) { 3421 assert(n->is_Phi(), "loops only through Phi's"); 3422 continue; // already processed 3423 } 3424 JavaObjectNode* jobj = unique_java_object(n); 3425 if (jobj == NULL || jobj == phantom_obj) { 3426 #ifdef ASSERT 3427 ptnode_adr(n->_idx)->dump(); 3428 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3429 #endif 3430 _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); 3431 return; 3432 } else { 3433 Node *val = get_map(jobj->idx()); // CheckCastPP node 3434 TypeNode *tn = n->as_Type(); 3435 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3436 assert(tinst != NULL && tinst->is_known_instance() && 3437 tinst->instance_id() == jobj->idx() , "instance type expected."); 3438 3439 const Type *tn_type = igvn->type(tn); 3440 const TypeOopPtr *tn_t; 3441 if (tn_type->isa_narrowoop()) { 3442 tn_t = tn_type->make_ptr()->isa_oopptr(); 3443 } else { 3444 tn_t = tn_type->isa_oopptr(); 3445 } 3446 if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) { 3447 if (tn_t->isa_aryptr()) { 3448 // Keep array properties (not flat/null-free) 3449 tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr()); 3450 if (tinst == NULL) { 3451 continue; // Skip dead path with inconsistent properties 3452 } 3453 } 3454 if (tn_type->isa_narrowoop()) { 3455 tn_type = tinst->make_narrowoop(); 3456 } else { 3457 tn_type = tinst; 3458 } 3459 igvn->hash_delete(tn); 3460 igvn->set_type(tn, tn_type); 3461 tn->set_type(tn_type); 3462 igvn->hash_insert(tn); 3463 record_for_optimizer(n); 3464 } else { 3465 assert(tn_type == TypePtr::NULL_PTR || 3466 tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t), 3467 "unexpected type"); 3468 continue; // Skip dead path with different type 3469 } 3470 } 3471 } else { 3472 debug_only(n->dump();) 3473 assert(false, "EA: unexpected node"); 3474 continue; 3475 } 3476 // push allocation's users on appropriate worklist 3477 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3478 Node *use = n->fast_out(i); 3479 if (use->is_Mem() && use->in(MemNode::Address) == n) { 3480 // Load/store to instance's field 3481 memnode_worklist.append_if_missing(use); 3482 } else if (use->is_MemBar()) { 3483 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3484 memnode_worklist.append_if_missing(use); 3485 } 3486 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3487 Node* addp2 = find_second_addp(use, n); 3488 if (addp2 != NULL) { 3489 alloc_worklist.append_if_missing(addp2); 3490 } 3491 alloc_worklist.append_if_missing(use); 3492 } else if (use->is_Phi() || 3493 use->is_CheckCastPP() || 3494 use->is_EncodeNarrowPtr() || 3495 use->is_DecodeNarrowPtr() || 3496 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3497 alloc_worklist.append_if_missing(use); 3498 #ifdef ASSERT 3499 } else if (use->is_Mem()) { 3500 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3501 } else if (use->is_MergeMem()) { 3502 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3503 } else if (use->is_SafePoint()) { 3504 // Look for MergeMem nodes for calls which reference unique allocation 3505 // (through CheckCastPP nodes) even for debug info. 3506 Node* m = use->in(TypeFunc::Memory); 3507 if (m->is_MergeMem()) { 3508 assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3509 } 3510 } else if (use->Opcode() == Op_EncodeISOArray) { 3511 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3512 // EncodeISOArray overwrites destination array 3513 memnode_worklist.append_if_missing(use); 3514 } 3515 } else if (use->Opcode() == Op_Return) { 3516 // Allocation is referenced by field of returned inline type 3517 assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode"); 3518 } else { 3519 uint op = use->Opcode(); 3520 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3521 (use->in(MemNode::Memory) == n)) { 3522 // They overwrite memory edge corresponding to destination array, 3523 memnode_worklist.append_if_missing(use); 3524 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3525 op == Op_CastP2X || op == Op_StoreCM || 3526 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || 3527 op == Op_CountPositives || 3528 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3529 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3530 op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck || 3531 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3532 n->dump(); 3533 use->dump(); 3534 assert(false, "EA: missing allocation reference path"); 3535 } 3536 #endif 3537 } 3538 } 3539 3540 } 3541 3542 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3543 // type, record it in the ArrayCopy node so we know what memory this 3544 // node uses/modified. 3545 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3546 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3547 Node* dest = ac->in(ArrayCopyNode::Dest); 3548 if (dest->is_AddP()) { 3549 dest = get_addp_base(dest); 3550 } 3551 JavaObjectNode* jobj = unique_java_object(dest); 3552 if (jobj != NULL) { 3553 Node *base = get_map(jobj->idx()); 3554 if (base != NULL) { 3555 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3556 ac->_dest_type = base_t; 3557 } 3558 } 3559 Node* src = ac->in(ArrayCopyNode::Src); 3560 if (src->is_AddP()) { 3561 src = get_addp_base(src); 3562 } 3563 jobj = unique_java_object(src); 3564 if (jobj != NULL) { 3565 Node* base = get_map(jobj->idx()); 3566 if (base != NULL) { 3567 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3568 ac->_src_type = base_t; 3569 } 3570 } 3571 } 3572 3573 // New alias types were created in split_AddP(). 3574 uint new_index_end = (uint) _compile->num_alias_types(); 3575 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3576 3577 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3578 // compute new values for Memory inputs (the Memory inputs are not 3579 // actually updated until phase 4.) 3580 if (memnode_worklist.length() == 0) 3581 return; // nothing to do 3582 while (memnode_worklist.length() != 0) { 3583 Node *n = memnode_worklist.pop(); 3584 if (visited.test_set(n->_idx)) { 3585 continue; 3586 } 3587 if (n->is_Phi() || n->is_ClearArray()) { 3588 // we don't need to do anything, but the users must be pushed 3589 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3590 // we don't need to do anything, but the users must be pushed 3591 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3592 if (n == NULL) { 3593 continue; 3594 } 3595 } else if (n->Opcode() == Op_StrCompressedCopy || 3596 n->Opcode() == Op_EncodeISOArray) { 3597 // get the memory projection 3598 n = n->find_out_with(Op_SCMemProj); 3599 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3600 } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != NULL && 3601 strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 3602 n = n->as_CallLeaf()->proj_out(TypeFunc::Memory); 3603 } else { 3604 assert(n->is_Mem(), "memory node required."); 3605 Node *addr = n->in(MemNode::Address); 3606 const Type *addr_t = igvn->type(addr); 3607 if (addr_t == Type::TOP) { 3608 continue; 3609 } 3610 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3611 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3612 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3613 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3614 if (_compile->failing()) { 3615 return; 3616 } 3617 if (mem != n->in(MemNode::Memory)) { 3618 // We delay the memory edge update since we need old one in 3619 // MergeMem code below when instances memory slices are separated. 3620 set_map(n, mem); 3621 } 3622 if (n->is_Load()) { 3623 continue; // don't push users 3624 } else if (n->is_LoadStore()) { 3625 // get the memory projection 3626 n = n->find_out_with(Op_SCMemProj); 3627 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3628 } 3629 } 3630 // push user on appropriate worklist 3631 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3632 Node *use = n->fast_out(i); 3633 if (use->is_Phi() || use->is_ClearArray()) { 3634 memnode_worklist.append_if_missing(use); 3635 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3636 if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores 3637 continue; 3638 } 3639 memnode_worklist.append_if_missing(use); 3640 } else if (use->is_MemBar()) { 3641 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3642 memnode_worklist.append_if_missing(use); 3643 } 3644 #ifdef ASSERT 3645 } else if (use->is_Mem()) { 3646 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3647 } else if (use->is_MergeMem()) { 3648 assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3649 } else if (use->Opcode() == Op_EncodeISOArray) { 3650 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3651 // EncodeISOArray overwrites destination array 3652 memnode_worklist.append_if_missing(use); 3653 } 3654 } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != NULL && 3655 strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) { 3656 // store_unknown_inline overwrites destination array 3657 memnode_worklist.append_if_missing(use); 3658 } else { 3659 uint op = use->Opcode(); 3660 if ((use->in(MemNode::Memory) == n) && 3661 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3662 // They overwrite memory edge corresponding to destination array, 3663 memnode_worklist.append_if_missing(use); 3664 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3665 op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives || 3666 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3667 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) { 3668 n->dump(); 3669 use->dump(); 3670 assert(false, "EA: missing memory path"); 3671 } 3672 #endif 3673 } 3674 } 3675 } 3676 3677 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3678 // Walk each memory slice moving the first node encountered of each 3679 // instance type to the input corresponding to its alias index. 3680 uint length = mergemem_worklist.length(); 3681 for( uint next = 0; next < length; ++next ) { 3682 MergeMemNode* nmm = mergemem_worklist.at(next); 3683 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3684 // Note: we don't want to use MergeMemStream here because we only want to 3685 // scan inputs which exist at the start, not ones we add during processing. 3686 // Note 2: MergeMem may already contains instance memory slices added 3687 // during find_inst_mem() call when memory nodes were processed above. 3688 igvn->hash_delete(nmm); 3689 uint nslices = MIN2(nmm->req(), new_index_start); 3690 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3691 Node* mem = nmm->in(i); 3692 Node* cur = NULL; 3693 if (mem == NULL || mem->is_top()) { 3694 continue; 3695 } 3696 // First, update mergemem by moving memory nodes to corresponding slices 3697 // if their type became more precise since this mergemem was created. 3698 while (mem->is_Mem()) { 3699 const Type *at = igvn->type(mem->in(MemNode::Address)); 3700 if (at != Type::TOP) { 3701 assert (at->isa_ptr() != NULL, "pointer type required."); 3702 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3703 if (idx == i) { 3704 if (cur == NULL) { 3705 cur = mem; 3706 } 3707 } else { 3708 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3709 nmm->set_memory_at(idx, mem); 3710 } 3711 } 3712 } 3713 mem = mem->in(MemNode::Memory); 3714 } 3715 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3716 // Find any instance of the current type if we haven't encountered 3717 // already a memory slice of the instance along the memory chain. 3718 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3719 if((uint)_compile->get_general_index(ni) == i) { 3720 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3721 if (nmm->is_empty_memory(m)) { 3722 Node* result = find_inst_mem(mem, ni, orig_phis); 3723 if (_compile->failing()) { 3724 return; 3725 } 3726 nmm->set_memory_at(ni, result); 3727 } 3728 } 3729 } 3730 } 3731 // Find the rest of instances values 3732 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3733 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3734 Node* result = step_through_mergemem(nmm, ni, tinst); 3735 if (result == nmm->base_memory()) { 3736 // Didn't find instance memory, search through general slice recursively. 3737 result = nmm->memory_at(_compile->get_general_index(ni)); 3738 result = find_inst_mem(result, ni, orig_phis); 3739 if (_compile->failing()) { 3740 return; 3741 } 3742 nmm->set_memory_at(ni, result); 3743 } 3744 } 3745 igvn->hash_insert(nmm); 3746 record_for_optimizer(nmm); 3747 } 3748 3749 // Phase 4: Update the inputs of non-instance memory Phis and 3750 // the Memory input of memnodes 3751 // First update the inputs of any non-instance Phi's from 3752 // which we split out an instance Phi. Note we don't have 3753 // to recursively process Phi's encountered on the input memory 3754 // chains as is done in split_memory_phi() since they will 3755 // also be processed here. 3756 for (int j = 0; j < orig_phis.length(); j++) { 3757 PhiNode *phi = orig_phis.at(j); 3758 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3759 igvn->hash_delete(phi); 3760 for (uint i = 1; i < phi->req(); i++) { 3761 Node *mem = phi->in(i); 3762 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3763 if (_compile->failing()) { 3764 return; 3765 } 3766 if (mem != new_mem) { 3767 phi->set_req(i, new_mem); 3768 } 3769 } 3770 igvn->hash_insert(phi); 3771 record_for_optimizer(phi); 3772 } 3773 3774 // Update the memory inputs of MemNodes with the value we computed 3775 // in Phase 2 and move stores memory users to corresponding memory slices. 3776 // Disable memory split verification code until the fix for 6984348. 3777 // Currently it produces false negative results since it does not cover all cases. 3778 #if 0 // ifdef ASSERT 3779 visited.Reset(); 3780 Node_Stack old_mems(arena, _compile->unique() >> 2); 3781 #endif 3782 for (uint i = 0; i < ideal_nodes.size(); i++) { 3783 Node* n = ideal_nodes.at(i); 3784 Node* nmem = get_map(n->_idx); 3785 assert(nmem != NULL, "sanity"); 3786 if (n->is_Mem()) { 3787 #if 0 // ifdef ASSERT 3788 Node* old_mem = n->in(MemNode::Memory); 3789 if (!visited.test_set(old_mem->_idx)) { 3790 old_mems.push(old_mem, old_mem->outcnt()); 3791 } 3792 #endif 3793 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3794 if (!n->is_Load()) { 3795 // Move memory users of a store first. 3796 move_inst_mem(n, orig_phis); 3797 } 3798 // Now update memory input 3799 igvn->hash_delete(n); 3800 n->set_req(MemNode::Memory, nmem); 3801 igvn->hash_insert(n); 3802 record_for_optimizer(n); 3803 } else { 3804 assert(n->is_Allocate() || n->is_CheckCastPP() || 3805 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3806 } 3807 } 3808 #if 0 // ifdef ASSERT 3809 // Verify that memory was split correctly 3810 while (old_mems.is_nonempty()) { 3811 Node* old_mem = old_mems.node(); 3812 uint old_cnt = old_mems.index(); 3813 old_mems.pop(); 3814 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3815 } 3816 #endif 3817 } 3818 3819 #ifndef PRODUCT 3820 int ConnectionGraph::_no_escape_counter = 0; 3821 int ConnectionGraph::_arg_escape_counter = 0; 3822 int ConnectionGraph::_global_escape_counter = 0; 3823 3824 static const char *node_type_names[] = { 3825 "UnknownType", 3826 "JavaObject", 3827 "LocalVar", 3828 "Field", 3829 "Arraycopy" 3830 }; 3831 3832 static const char *esc_names[] = { 3833 "UnknownEscape", 3834 "NoEscape", 3835 "ArgEscape", 3836 "GlobalEscape" 3837 }; 3838 3839 void PointsToNode::dump_header(bool print_state, outputStream* out) const { 3840 NodeType nt = node_type(); 3841 out->print("%s(%d) ", node_type_names[(int) nt], _pidx); 3842 if (print_state) { 3843 EscapeState es = escape_state(); 3844 EscapeState fields_es = fields_escape_state(); 3845 out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3846 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) { 3847 out->print("NSR "); 3848 } 3849 } 3850 } 3851 3852 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const { 3853 dump_header(print_state, out); 3854 if (is_Field()) { 3855 FieldNode* f = (FieldNode*)this; 3856 if (f->is_oop()) { 3857 out->print("oop "); 3858 } 3859 if (f->offset() > 0) { 3860 out->print("+%d ", f->offset()); 3861 } 3862 out->print("("); 3863 for (BaseIterator i(f); i.has_next(); i.next()) { 3864 PointsToNode* b = i.get(); 3865 out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3866 } 3867 out->print(" )"); 3868 } 3869 out->print("["); 3870 for (EdgeIterator i(this); i.has_next(); i.next()) { 3871 PointsToNode* e = i.get(); 3872 out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3873 } 3874 out->print(" ["); 3875 for (UseIterator i(this); i.has_next(); i.next()) { 3876 PointsToNode* u = i.get(); 3877 bool is_base = false; 3878 if (PointsToNode::is_base_use(u)) { 3879 is_base = true; 3880 u = PointsToNode::get_use_node(u)->as_Field(); 3881 } 3882 out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3883 } 3884 out->print(" ]] "); 3885 if (_node == NULL) { 3886 out->print("<null>%s", newline ? "\n" : ""); 3887 } else { 3888 _node->dump(newline ? "\n" : "", false, out); 3889 } 3890 } 3891 3892 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3893 bool first = true; 3894 int ptnodes_length = ptnodes_worklist.length(); 3895 for (int i = 0; i < ptnodes_length; i++) { 3896 PointsToNode *ptn = ptnodes_worklist.at(i); 3897 if (ptn == NULL || !ptn->is_JavaObject()) { 3898 continue; 3899 } 3900 PointsToNode::EscapeState es = ptn->escape_state(); 3901 if ((es != PointsToNode::NoEscape) && !Verbose) { 3902 continue; 3903 } 3904 Node* n = ptn->ideal_node(); 3905 if (n->is_Allocate() || (n->is_CallStaticJava() && 3906 n->as_CallStaticJava()->is_boxing_method())) { 3907 if (first) { 3908 tty->cr(); 3909 tty->print("======== Connection graph for "); 3910 _compile->method()->print_short_name(); 3911 tty->cr(); 3912 tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d", 3913 _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length()); 3914 tty->cr(); 3915 first = false; 3916 } 3917 ptn->dump(); 3918 // Print all locals and fields which reference this allocation 3919 for (UseIterator j(ptn); j.has_next(); j.next()) { 3920 PointsToNode* use = j.get(); 3921 if (use->is_LocalVar()) { 3922 use->dump(Verbose); 3923 } else if (Verbose) { 3924 use->dump(); 3925 } 3926 } 3927 tty->cr(); 3928 } 3929 } 3930 } 3931 3932 void ConnectionGraph::print_statistics() { 3933 tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); 3934 } 3935 3936 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) { 3937 if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation 3938 return; 3939 } 3940 for (int next = 0; next < java_objects_worklist.length(); ++next) { 3941 JavaObjectNode* ptn = java_objects_worklist.at(next); 3942 if (ptn->ideal_node()->is_Allocate()) { 3943 if (ptn->escape_state() == PointsToNode::NoEscape) { 3944 Atomic::inc(&ConnectionGraph::_no_escape_counter); 3945 } else if (ptn->escape_state() == PointsToNode::ArgEscape) { 3946 Atomic::inc(&ConnectionGraph::_arg_escape_counter); 3947 } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { 3948 Atomic::inc(&ConnectionGraph::_global_escape_counter); 3949 } else { 3950 assert(false, "Unexpected Escape State"); 3951 } 3952 } 3953 } 3954 } 3955 3956 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const { 3957 if (_compile->directive()->TraceEscapeAnalysisOption) { 3958 assert(ptn != nullptr, "should not be null"); 3959 assert(reason != nullptr, "should not be null"); 3960 ptn->dump_header(true); 3961 PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es; 3962 PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state(); 3963 tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason); 3964 } 3965 } 3966 3967 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const { 3968 if (_compile->directive()->TraceEscapeAnalysisOption) { 3969 stringStream ss; 3970 ss.print("propagated from: "); 3971 from->dump(true, &ss, false); 3972 return ss.as_string(); 3973 } else { 3974 return nullptr; 3975 } 3976 } 3977 3978 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const { 3979 if (_compile->directive()->TraceEscapeAnalysisOption) { 3980 stringStream ss; 3981 ss.print("escapes as arg to:"); 3982 call->dump("", false, &ss); 3983 return ss.as_string(); 3984 } else { 3985 return nullptr; 3986 } 3987 } 3988 3989 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const { 3990 if (_compile->directive()->TraceEscapeAnalysisOption) { 3991 stringStream ss; 3992 ss.print("is merged with other object: "); 3993 other->dump_header(true, &ss); 3994 return ss.as_string(); 3995 } else { 3996 return nullptr; 3997 } 3998 } 3999 4000 #endif 4001 4002 void ConnectionGraph::record_for_optimizer(Node *n) { 4003 _igvn->_worklist.push(n); 4004 _igvn->add_users_to_worklist(n); 4005 }