1 /* 2 * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 32 #include "gc/shenandoah/shenandoahForwarding.hpp" 33 #include "gc/shenandoah/shenandoahHeap.hpp" 34 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 35 #include "gc/shenandoah/shenandoahRuntime.hpp" 36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 37 #include "opto/arraycopynode.hpp" 38 #include "opto/block.hpp" 39 #include "opto/callnode.hpp" 40 #include "opto/castnode.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/phaseX.hpp" 43 #include "opto/rootnode.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/subnode.hpp" 46 47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { 48 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 49 if ((state->iu_barriers_count() + 50 state->load_reference_barriers_count()) > 0) { 51 assert(C->post_loop_opts_phase(), "no loop opts allowed"); 52 C->reset_post_loop_opts_phase(); // ... but we know what we are doing 53 C->clear_major_progress(); 54 PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand); 55 if (C->failing()) return false; 56 57 C->set_major_progress(); 58 if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) { 59 return false; 60 } 61 C->clear_major_progress(); 62 C->process_for_post_loop_opts_igvn(igvn); 63 if (C->failing()) return false; 64 65 C->set_post_loop_opts_phase(); // now for real! 66 } 67 return true; 68 } 69 70 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) { 71 if (!UseShenandoahGC) { 72 return false; 73 } 74 assert(iff->is_If(), "bad input"); 75 if (iff->Opcode() != Op_If) { 76 return false; 77 } 78 Node* bol = iff->in(1); 79 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { 80 return false; 81 } 82 Node* cmp = bol->in(1); 83 if (cmp->Opcode() != Op_CmpI) { 84 return false; 85 } 86 Node* in1 = cmp->in(1); 87 Node* in2 = cmp->in(2); 88 if (in2->find_int_con(-1) != 0) { 89 return false; 90 } 91 if (in1->Opcode() != Op_AndI) { 92 return false; 93 } 94 in2 = in1->in(2); 95 if (in2->find_int_con(-1) != mask) { 96 return false; 97 } 98 in1 = in1->in(1); 99 100 return is_gc_state_load(in1); 101 } 102 103 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { 104 return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED); 105 } 106 107 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { 108 if (!UseShenandoahGC) { 109 return false; 110 } 111 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { 112 return false; 113 } 114 Node* addp = n->in(MemNode::Address); 115 if (!addp->is_AddP()) { 116 return false; 117 } 118 Node* base = addp->in(AddPNode::Address); 119 Node* off = addp->in(AddPNode::Offset); 120 if (base->Opcode() != Op_ThreadLocal) { 121 return false; 122 } 123 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) { 124 return false; 125 } 126 return true; 127 } 128 129 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { 130 assert(phase->is_dominator(stop, start), "bad inputs"); 131 ResourceMark rm; 132 Unique_Node_List wq; 133 wq.push(start); 134 for (uint next = 0; next < wq.size(); next++) { 135 Node *m = wq.at(next); 136 if (m == stop) { 137 continue; 138 } 139 if (m->is_SafePoint() && !m->is_CallLeaf()) { 140 return true; 141 } 142 if (m->is_Region()) { 143 for (uint i = 1; i < m->req(); i++) { 144 wq.push(m->in(i)); 145 } 146 } else { 147 wq.push(m->in(0)); 148 } 149 } 150 return false; 151 } 152 153 #ifdef ASSERT 154 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { 155 assert(phis.size() == 0, ""); 156 157 while (true) { 158 if (in->bottom_type() == TypePtr::NULL_PTR) { 159 if (trace) {tty->print_cr("null");} 160 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { 161 if (trace) {tty->print_cr("Non oop");} 162 } else { 163 if (in->is_ConstraintCast()) { 164 in = in->in(1); 165 continue; 166 } else if (in->is_AddP()) { 167 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); 168 in = in->in(AddPNode::Address); 169 continue; 170 } else if (in->is_Con()) { 171 if (trace) { 172 tty->print("Found constant"); 173 in->dump(); 174 } 175 } else if (in->Opcode() == Op_Parm) { 176 if (trace) { 177 tty->print("Found argument"); 178 } 179 } else if (in->Opcode() == Op_CreateEx) { 180 if (trace) { 181 tty->print("Found create-exception"); 182 } 183 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { 184 if (trace) { 185 tty->print("Found raw LoadP (OSR argument?)"); 186 } 187 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 188 if (t == ShenandoahOopStore) { 189 uint i = 0; 190 for (; i < phis.size(); i++) { 191 Node* n = phis.node_at(i); 192 if (n->Opcode() == Op_ShenandoahIUBarrier) { 193 break; 194 } 195 } 196 if (i == phis.size()) { 197 return false; 198 } 199 } 200 barriers_used.push(in); 201 if (trace) {tty->print("Found barrier"); in->dump();} 202 } else if (in->Opcode() == Op_ShenandoahIUBarrier) { 203 if (t != ShenandoahOopStore) { 204 in = in->in(1); 205 continue; 206 } 207 if (trace) {tty->print("Found enqueue barrier"); in->dump();} 208 phis.push(in, in->req()); 209 in = in->in(1); 210 continue; 211 } else if (in->is_Proj() && in->in(0)->is_Allocate()) { 212 if (trace) { 213 tty->print("Found alloc"); 214 in->in(0)->dump(); 215 } 216 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { 217 if (trace) { 218 tty->print("Found Java call"); 219 } 220 } else if (in->is_Phi()) { 221 if (!visited.test_set(in->_idx)) { 222 if (trace) {tty->print("Pushed phi:"); in->dump();} 223 phis.push(in, 2); 224 in = in->in(1); 225 continue; 226 } 227 if (trace) {tty->print("Already seen phi:"); in->dump();} 228 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { 229 if (!visited.test_set(in->_idx)) { 230 if (trace) {tty->print("Pushed cmovep:"); in->dump();} 231 phis.push(in, CMoveNode::IfTrue); 232 in = in->in(CMoveNode::IfFalse); 233 continue; 234 } 235 if (trace) {tty->print("Already seen cmovep:"); in->dump();} 236 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { 237 in = in->in(1); 238 continue; 239 } else { 240 return false; 241 } 242 } 243 bool cont = false; 244 while (phis.is_nonempty()) { 245 uint idx = phis.index(); 246 Node* phi = phis.node(); 247 if (idx >= phi->req()) { 248 if (trace) {tty->print("Popped phi:"); phi->dump();} 249 phis.pop(); 250 continue; 251 } 252 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} 253 in = phi->in(idx); 254 phis.set_index(idx+1); 255 cont = true; 256 break; 257 } 258 if (!cont) { 259 break; 260 } 261 } 262 return true; 263 } 264 265 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { 266 if (n1 != nullptr) { 267 n1->dump(+10); 268 } 269 if (n2 != nullptr) { 270 n2->dump(+10); 271 } 272 fatal("%s", msg); 273 } 274 275 void ShenandoahBarrierC2Support::verify(RootNode* root) { 276 ResourceMark rm; 277 Unique_Node_List wq; 278 GrowableArray<Node*> barriers; 279 Unique_Node_List barriers_used; 280 Node_Stack phis(0); 281 VectorSet visited; 282 const bool trace = false; 283 const bool verify_no_useless_barrier = false; 284 285 wq.push(root); 286 for (uint next = 0; next < wq.size(); next++) { 287 Node *n = wq.at(next); 288 if (n->is_Load()) { 289 const bool trace = false; 290 if (trace) {tty->print("Verifying"); n->dump();} 291 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { 292 if (trace) {tty->print_cr("Load range/klass");} 293 } else { 294 const TypePtr* adr_type = n->as_Load()->adr_type(); 295 296 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { 297 if (trace) {tty->print_cr("Mark load");} 298 } else if (adr_type->isa_instptr() && 299 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && 300 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) { 301 if (trace) {tty->print_cr("Reference.get()");} 302 } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { 303 report_verify_failure("Shenandoah verification: Load should have barriers", n); 304 } 305 } 306 } else if (n->is_Store()) { 307 const bool trace = false; 308 309 if (trace) {tty->print("Verifying"); n->dump();} 310 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { 311 Node* adr = n->in(MemNode::Address); 312 bool verify = true; 313 314 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { 315 adr = adr->in(AddPNode::Address); 316 if (adr->is_AddP()) { 317 assert(adr->in(AddPNode::Base)->is_top(), ""); 318 adr = adr->in(AddPNode::Address); 319 if (adr->Opcode() == Op_LoadP && 320 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && 321 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 322 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) { 323 if (trace) {tty->print_cr("SATB prebarrier");} 324 verify = false; 325 } 326 } 327 } 328 329 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 330 report_verify_failure("Shenandoah verification: Store should have barriers", n); 331 } 332 } 333 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 334 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); 335 } 336 } else if (n->Opcode() == Op_CmpP) { 337 const bool trace = false; 338 339 Node* in1 = n->in(1); 340 Node* in2 = n->in(2); 341 if (in1->bottom_type()->isa_oopptr()) { 342 if (trace) {tty->print("Verifying"); n->dump();} 343 344 bool mark_inputs = false; 345 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || 346 (in1->is_Con() || in2->is_Con())) { 347 if (trace) {tty->print_cr("Comparison against a constant");} 348 mark_inputs = true; 349 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || 350 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { 351 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} 352 mark_inputs = true; 353 } else { 354 assert(in2->bottom_type()->isa_oopptr(), ""); 355 356 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || 357 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { 358 report_verify_failure("Shenandoah verification: Cmp should have barriers", n); 359 } 360 } 361 if (verify_no_useless_barrier && 362 mark_inputs && 363 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || 364 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { 365 phis.clear(); 366 visited.reset(); 367 } 368 } 369 } else if (n->is_LoadStore()) { 370 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && 371 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 372 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); 373 } 374 375 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 376 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); 377 } 378 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { 379 CallNode* call = n->as_Call(); 380 381 static struct { 382 const char* name; 383 struct { 384 int pos; 385 verify_type t; 386 } args[6]; 387 } calls[] = { 388 "array_partition_stub", 389 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore }, { -1, ShenandoahNone }, 390 { -1, ShenandoahNone }, { -1, ShenandoahNone }, { -1, ShenandoahNone } }, 391 "arraysort_stub", 392 { { TypeFunc::Parms, ShenandoahStore }, { -1, ShenandoahNone }, { -1, ShenandoahNone }, 393 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 394 "aescrypt_encryptBlock", 395 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 396 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 397 "aescrypt_decryptBlock", 398 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 399 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 400 "multiplyToLen", 401 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, 402 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 403 "squareToLen", 404 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, 405 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 406 "montgomery_multiply", 407 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 408 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 409 "montgomery_square", 410 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, 411 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 412 "mulAdd", 413 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 414 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 415 "vectorizedMismatch", 416 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 417 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 418 "updateBytesCRC32", 419 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 420 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 421 "updateBytesAdler32", 422 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 423 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 424 "updateBytesCRC32C", 425 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, 426 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 427 "counterMode_AESCrypt", 428 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 429 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, 430 "cipherBlockChaining_encryptAESCrypt", 431 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 432 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 433 "cipherBlockChaining_decryptAESCrypt", 434 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 435 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 436 "shenandoah_clone_barrier", 437 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 438 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 439 "ghash_processBlocks", 440 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 441 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 442 "sha1_implCompress", 443 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 444 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 445 "sha256_implCompress", 446 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 447 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 448 "sha512_implCompress", 449 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 450 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 451 "sha1_implCompressMB", 452 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 453 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 454 "sha256_implCompressMB", 455 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 456 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 457 "sha512_implCompressMB", 458 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 459 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 460 "encodeBlock", 461 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 462 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 463 "decodeBlock", 464 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 465 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 466 }; 467 468 if (call->is_call_to_arraycopystub()) { 469 Node* dest = nullptr; 470 const TypeTuple* args = n->as_Call()->_tf->domain_sig(); 471 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 472 if (args->field_at(i)->isa_ptr()) { 473 j++; 474 if (j == 2) { 475 dest = n->in(i); 476 break; 477 } 478 } 479 } 480 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || 481 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { 482 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); 483 } 484 } else if (strlen(call->_name) > 5 && 485 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { 486 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { 487 report_verify_failure("Shenandoah verification: _fill should have barriers", n); 488 } 489 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { 490 // skip 491 } else { 492 const int calls_len = sizeof(calls) / sizeof(calls[0]); 493 int i = 0; 494 for (; i < calls_len; i++) { 495 if (!strcmp(calls[i].name, call->_name)) { 496 break; 497 } 498 } 499 if (i != calls_len) { 500 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); 501 for (uint j = 0; j < args_len; j++) { 502 int pos = calls[i].args[j].pos; 503 if (pos == -1) { 504 break; 505 } 506 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { 507 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 508 } 509 } 510 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 511 if (call->in(j)->bottom_type()->make_ptr() && 512 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 513 uint k = 0; 514 for (; k < args_len && calls[i].args[k].pos != (int)j; k++); 515 if (k == args_len) { 516 fatal("arg %d for call %s not covered", j, call->_name); 517 } 518 } 519 } 520 } else { 521 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 522 if (call->in(j)->bottom_type()->make_ptr() && 523 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 524 fatal("%s not covered", call->_name); 525 } 526 } 527 } 528 } 529 } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 530 // skip 531 } else if (n->is_AddP() 532 || n->is_Phi() 533 || n->is_ConstraintCast() 534 || n->Opcode() == Op_Return 535 || n->Opcode() == Op_CMoveP 536 || n->Opcode() == Op_CMoveN 537 || n->Opcode() == Op_Rethrow 538 || n->is_MemBar() 539 || n->Opcode() == Op_Conv2B 540 || n->Opcode() == Op_SafePoint 541 || n->is_CallJava() 542 || n->Opcode() == Op_Unlock 543 || n->Opcode() == Op_EncodeP 544 || n->Opcode() == Op_DecodeN) { 545 // nothing to do 546 } else { 547 static struct { 548 int opcode; 549 struct { 550 int pos; 551 verify_type t; 552 } inputs[2]; 553 } others[] = { 554 Op_FastLock, 555 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 556 Op_Lock, 557 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, 558 Op_ArrayCopy, 559 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } }, 560 Op_StrCompressedCopy, 561 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 562 Op_StrInflatedCopy, 563 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 564 Op_AryEq, 565 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 566 Op_StrIndexOf, 567 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 568 Op_StrComp, 569 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 570 Op_StrEquals, 571 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 572 Op_VectorizedHashCode, 573 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 574 Op_EncodeISOArray, 575 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 576 Op_CountPositives, 577 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} }, 578 Op_CastP2X, 579 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 580 Op_StrIndexOfChar, 581 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 582 }; 583 584 const int others_len = sizeof(others) / sizeof(others[0]); 585 int i = 0; 586 for (; i < others_len; i++) { 587 if (others[i].opcode == n->Opcode()) { 588 break; 589 } 590 } 591 uint stop = n->is_Call() ? n->as_Call()->tf()->domain_sig()->cnt() : n->req(); 592 if (i != others_len) { 593 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); 594 for (uint j = 0; j < inputs_len; j++) { 595 int pos = others[i].inputs[j].pos; 596 if (pos == -1) { 597 break; 598 } 599 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { 600 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 601 } 602 } 603 for (uint j = 1; j < stop; j++) { 604 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() && 605 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 606 uint k = 0; 607 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); 608 if (k == inputs_len) { 609 fatal("arg %d for node %s not covered", j, n->Name()); 610 } 611 } 612 } 613 } else { 614 for (uint j = 1; j < stop; j++) { 615 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() && 616 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 617 fatal("%s not covered", n->Name()); 618 } 619 } 620 } 621 } 622 623 if (n->is_SafePoint()) { 624 SafePointNode* sfpt = n->as_SafePoint(); 625 if (verify_no_useless_barrier && sfpt->jvms() != nullptr) { 626 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { 627 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { 628 phis.clear(); 629 visited.reset(); 630 } 631 } 632 } 633 } 634 } 635 636 if (verify_no_useless_barrier) { 637 for (int i = 0; i < barriers.length(); i++) { 638 Node* n = barriers.at(i); 639 if (!barriers_used.member(n)) { 640 tty->print("XXX useless barrier"); n->dump(-2); 641 ShouldNotReachHere(); 642 } 643 } 644 } 645 } 646 #endif 647 648 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { 649 // That both nodes have the same control is not sufficient to prove 650 // domination, verify that there's no path from d to n 651 ResourceMark rm; 652 Unique_Node_List wq; 653 wq.push(d); 654 for (uint next = 0; next < wq.size(); next++) { 655 Node *m = wq.at(next); 656 if (m == n) { 657 return false; 658 } 659 if (m->is_Phi() && m->in(0)->is_Loop()) { 660 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); 661 } else { 662 if (m->is_Store() || m->is_LoadStore()) { 663 // Take anti-dependencies into account 664 Node* mem = m->in(MemNode::Memory); 665 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 666 Node* u = mem->fast_out(i); 667 if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) && 668 phase->ctrl_or_self(u) == c) { 669 wq.push(u); 670 } 671 } 672 } 673 for (uint i = 0; i < m->req(); i++) { 674 if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) { 675 wq.push(m->in(i)); 676 } 677 } 678 } 679 } 680 return true; 681 } 682 683 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { 684 if (d_c != n_c) { 685 return phase->is_dominator(d_c, n_c); 686 } 687 return is_dominator_same_ctrl(d_c, d, n, phase); 688 } 689 690 Node* next_mem(Node* mem, int alias) { 691 Node* res = nullptr; 692 if (mem->is_Proj()) { 693 res = mem->in(0); 694 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 695 res = mem->in(TypeFunc::Memory); 696 } else if (mem->is_Phi()) { 697 res = mem->in(1); 698 } else if (mem->is_MergeMem()) { 699 res = mem->as_MergeMem()->memory_at(alias); 700 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 701 assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); 702 res = mem->in(MemNode::Memory); 703 } else { 704 #ifdef ASSERT 705 mem->dump(); 706 #endif 707 ShouldNotReachHere(); 708 } 709 return res; 710 } 711 712 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { 713 Node* iffproj = nullptr; 714 while (c != dom) { 715 Node* next = phase->idom(c); 716 assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); 717 if (c->is_Region()) { 718 ResourceMark rm; 719 Unique_Node_List wq; 720 wq.push(c); 721 for (uint i = 0; i < wq.size(); i++) { 722 Node *n = wq.at(i); 723 if (n == next) { 724 continue; 725 } 726 if (n->is_Region()) { 727 for (uint j = 1; j < n->req(); j++) { 728 wq.push(n->in(j)); 729 } 730 } else { 731 wq.push(n->in(0)); 732 } 733 } 734 for (uint i = 0; i < wq.size(); i++) { 735 Node *n = wq.at(i); 736 assert(n->is_CFG(), ""); 737 if (n->is_Multi()) { 738 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 739 Node* u = n->fast_out(j); 740 if (u->is_CFG()) { 741 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj()) { 742 return NodeSentinel; 743 } 744 } 745 } 746 } 747 } 748 } else if (c->is_Proj()) { 749 if (c->is_IfProj()) { 750 if (c->as_Proj()->is_uncommon_trap_if_pattern() != nullptr) { 751 // continue; 752 } else { 753 if (!allow_one_proj) { 754 return NodeSentinel; 755 } 756 if (iffproj == nullptr) { 757 iffproj = c; 758 } else { 759 return NodeSentinel; 760 } 761 } 762 } else if (c->Opcode() == Op_JumpProj) { 763 return NodeSentinel; // unsupported 764 } else if (c->Opcode() == Op_CatchProj) { 765 return NodeSentinel; // unsupported 766 } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) { 767 return NodeSentinel; // unsupported 768 } else { 769 assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); 770 } 771 } 772 c = next; 773 } 774 return iffproj; 775 } 776 777 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { 778 ResourceMark rm; 779 VectorSet wq; 780 wq.set(mem->_idx); 781 mem_ctrl = phase->ctrl_or_self(mem); 782 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { 783 mem = next_mem(mem, alias); 784 if (wq.test_set(mem->_idx)) { 785 return nullptr; 786 } 787 mem_ctrl = phase->ctrl_or_self(mem); 788 } 789 if (mem->is_MergeMem()) { 790 mem = mem->as_MergeMem()->memory_at(alias); 791 mem_ctrl = phase->ctrl_or_self(mem); 792 } 793 return mem; 794 } 795 796 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { 797 Node* mem = nullptr; 798 Node* c = ctrl; 799 do { 800 if (c->is_Region()) { 801 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) { 802 Node* u = c->fast_out(i); 803 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 804 if (u->adr_type() == TypePtr::BOTTOM) { 805 mem = u; 806 } 807 } 808 } 809 } else { 810 if (c->is_Call() && c->as_Call()->adr_type() != nullptr) { 811 CallProjections* projs = c->as_Call()->extract_projections(true, false); 812 if (projs->fallthrough_memproj != nullptr) { 813 if (projs->fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { 814 if (projs->catchall_memproj == nullptr) { 815 mem = projs->fallthrough_memproj; 816 } else { 817 if (phase->is_dominator(projs->fallthrough_catchproj, ctrl)) { 818 mem = projs->fallthrough_memproj; 819 } else { 820 assert(phase->is_dominator(projs->catchall_catchproj, ctrl), "one proj must dominate barrier"); 821 mem = projs->catchall_memproj; 822 } 823 } 824 } 825 } else { 826 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); 827 if (proj != nullptr && 828 proj->adr_type() == TypePtr::BOTTOM) { 829 mem = proj; 830 } 831 } 832 } else { 833 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 834 Node* u = c->fast_out(i); 835 if (u->is_Proj() && 836 u->bottom_type() == Type::MEMORY && 837 u->adr_type() == TypePtr::BOTTOM) { 838 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); 839 assert(mem == nullptr, "only one proj"); 840 mem = u; 841 } 842 } 843 assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected"); 844 } 845 } 846 c = phase->idom(c); 847 } while (mem == nullptr); 848 return mem; 849 } 850 851 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { 852 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 853 Node* u = n->fast_out(i); 854 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { 855 uses.push(u); 856 } 857 } 858 } 859 860 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) { 861 OuterStripMinedLoopEndNode* le = inner->outer_loop_end(); 862 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); 863 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); 864 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); 865 phase->register_control(new_le, phase->get_loop(le), le->in(0)); 866 phase->lazy_replace(outer, new_outer); 867 phase->lazy_replace(le, new_le); 868 inner->clear_strip_mined(); 869 } 870 871 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl, 872 PhaseIdealLoop* phase, int flags) { 873 PhaseIterGVN& igvn = phase->igvn(); 874 Node* old_ctrl = ctrl; 875 876 Node* thread = new ThreadLocalNode(); 877 Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 878 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset); 879 Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr, 880 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr), 881 TypeInt::BYTE, MemNode::unordered); 882 Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags)); 883 Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT)); 884 Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne); 885 886 IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 887 ctrl = new IfTrueNode(gc_state_iff); 888 test_fail_ctrl = new IfFalseNode(gc_state_iff); 889 890 IdealLoopTree* loop = phase->get_loop(old_ctrl); 891 phase->register_control(gc_state_iff, loop, old_ctrl); 892 phase->register_control(ctrl, loop, gc_state_iff); 893 phase->register_control(test_fail_ctrl, loop, gc_state_iff); 894 895 phase->register_new_node(thread, old_ctrl); 896 phase->register_new_node(gc_state_addr, old_ctrl); 897 phase->register_new_node(gc_state, old_ctrl); 898 phase->register_new_node(gc_state_and, old_ctrl); 899 phase->register_new_node(gc_state_cmp, old_ctrl); 900 phase->register_new_node(gc_state_bool, old_ctrl); 901 902 phase->set_ctrl(gc_state_offset, phase->C->root()); 903 904 assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape"); 905 } 906 907 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { 908 Node* old_ctrl = ctrl; 909 PhaseIterGVN& igvn = phase->igvn(); 910 911 const Type* val_t = igvn.type(val); 912 if (val_t->meet(TypePtr::NULL_PTR) == val_t) { 913 Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT)); 914 Node* null_test = new BoolNode(null_cmp, BoolTest::ne); 915 916 IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 917 ctrl = new IfTrueNode(null_iff); 918 null_ctrl = new IfFalseNode(null_iff); 919 920 IdealLoopTree* loop = phase->get_loop(old_ctrl); 921 phase->register_control(null_iff, loop, old_ctrl); 922 phase->register_control(ctrl, loop, null_iff); 923 phase->register_control(null_ctrl, loop, null_iff); 924 925 phase->register_new_node(null_cmp, old_ctrl); 926 phase->register_new_node(null_test, old_ctrl); 927 } 928 } 929 930 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { 931 Node* old_ctrl = ctrl; 932 PhaseIterGVN& igvn = phase->igvn(); 933 934 Node* raw_val = new CastP2XNode(old_ctrl, val); 935 Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); 936 937 // Figure out the target cset address with raw pointer math. 938 // This avoids matching AddP+LoadB that would emit inefficient code. 939 // See JDK-8245465. 940 Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); 941 Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr); 942 Node* cset_load_addr = new AddXNode(cset_addr, cset_idx); 943 Node* cset_load_ptr = new CastX2PNode(cset_load_addr); 944 945 Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr, 946 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr), 947 TypeInt::BYTE, MemNode::unordered); 948 Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT)); 949 Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne); 950 951 IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 952 ctrl = new IfTrueNode(cset_iff); 953 not_cset_ctrl = new IfFalseNode(cset_iff); 954 955 IdealLoopTree *loop = phase->get_loop(old_ctrl); 956 phase->register_control(cset_iff, loop, old_ctrl); 957 phase->register_control(ctrl, loop, cset_iff); 958 phase->register_control(not_cset_ctrl, loop, cset_iff); 959 960 phase->set_ctrl(cset_addr_ptr, phase->C->root()); 961 962 phase->register_new_node(raw_val, old_ctrl); 963 phase->register_new_node(cset_idx, old_ctrl); 964 phase->register_new_node(cset_addr, old_ctrl); 965 phase->register_new_node(cset_load_addr, old_ctrl); 966 phase->register_new_node(cset_load_ptr, old_ctrl); 967 phase->register_new_node(cset_load, old_ctrl); 968 phase->register_new_node(cset_cmp, old_ctrl); 969 phase->register_new_node(cset_bool, old_ctrl); 970 } 971 972 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, 973 DecoratorSet decorators, PhaseIdealLoop* phase) { 974 IdealLoopTree*loop = phase->get_loop(ctrl); 975 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr(); 976 977 address calladdr = nullptr; 978 const char* name = nullptr; 979 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 980 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 981 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 982 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 983 bool is_narrow = UseCompressedOops && !is_native; 984 if (is_strong) { 985 if (is_narrow) { 986 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow); 987 name = "load_reference_barrier_strong_narrow"; 988 } else { 989 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); 990 name = "load_reference_barrier_strong"; 991 } 992 } else if (is_weak) { 993 if (is_narrow) { 994 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow); 995 name = "load_reference_barrier_weak_narrow"; 996 } else { 997 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak); 998 name = "load_reference_barrier_weak"; 999 } 1000 } else { 1001 assert(is_phantom, "only remaining strength"); 1002 if (is_narrow) { 1003 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow); 1004 name = "load_reference_barrier_phantom_narrow"; 1005 } else { 1006 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom); 1007 name = "load_reference_barrier_phantom"; 1008 } 1009 } 1010 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM); 1011 1012 call->init_req(TypeFunc::Control, ctrl); 1013 call->init_req(TypeFunc::I_O, phase->C->top()); 1014 call->init_req(TypeFunc::Memory, phase->C->top()); 1015 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1016 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1017 call->init_req(TypeFunc::Parms, val); 1018 call->init_req(TypeFunc::Parms+1, load_addr); 1019 phase->register_control(call, loop, ctrl); 1020 ctrl = new ProjNode(call, TypeFunc::Control); 1021 phase->register_control(ctrl, loop, call); 1022 val = new ProjNode(call, TypeFunc::Parms); 1023 phase->register_new_node(val, call); 1024 val = new CheckCastPPNode(ctrl, val, obj_type); 1025 phase->register_new_node(val, ctrl); 1026 } 1027 1028 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { 1029 Node* ctrl = phase->get_ctrl(barrier); 1030 Node* init_raw_mem = fixer.find_mem(ctrl, barrier); 1031 1032 // Update the control of all nodes that should be after the 1033 // barrier control flow 1034 uses.clear(); 1035 // Every node that is control dependent on the barrier's input 1036 // control will be after the expanded barrier. The raw memory (if 1037 // its memory is control dependent on the barrier's input control) 1038 // must stay above the barrier. 1039 uses_to_ignore.clear(); 1040 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { 1041 uses_to_ignore.push(init_raw_mem); 1042 } 1043 for (uint next = 0; next < uses_to_ignore.size(); next++) { 1044 Node *n = uses_to_ignore.at(next); 1045 for (uint i = 0; i < n->req(); i++) { 1046 Node* in = n->in(i); 1047 if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { 1048 uses_to_ignore.push(in); 1049 } 1050 } 1051 } 1052 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { 1053 Node* u = ctrl->fast_out(i); 1054 if (u->_idx < last && 1055 u != barrier && 1056 !uses_to_ignore.member(u) && 1057 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && 1058 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { 1059 Node* old_c = phase->ctrl_or_self(u); 1060 Node* c = old_c; 1061 if (c != ctrl || 1062 is_dominator_same_ctrl(old_c, barrier, u, phase) || 1063 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { 1064 phase->igvn().rehash_node_delayed(u); 1065 int nb = u->replace_edge(ctrl, region, &phase->igvn()); 1066 if (u->is_CFG()) { 1067 if (phase->idom(u) == ctrl) { 1068 phase->set_idom(u, region, phase->dom_depth(region)); 1069 } 1070 } else if (phase->get_ctrl(u) == ctrl) { 1071 assert(u != init_raw_mem, "should leave input raw mem above the barrier"); 1072 uses.push(u); 1073 } 1074 assert(nb == 1, "more than 1 ctrl input?"); 1075 --i, imax -= nb; 1076 } 1077 } 1078 } 1079 } 1080 1081 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections* projs, PhaseIdealLoop* phase) { 1082 Node* region = nullptr; 1083 while (c != ctrl) { 1084 if (c->is_Region()) { 1085 region = c; 1086 } 1087 c = phase->idom(c); 1088 } 1089 assert(region != nullptr, ""); 1090 Node* phi = new PhiNode(region, n->bottom_type()); 1091 for (uint j = 1; j < region->req(); j++) { 1092 Node* in = region->in(j); 1093 if (phase->is_dominator(projs->fallthrough_catchproj, in)) { 1094 phi->init_req(j, n); 1095 } else if (phase->is_dominator(projs->catchall_catchproj, in)) { 1096 phi->init_req(j, n_clone); 1097 } else { 1098 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); 1099 } 1100 } 1101 phase->register_new_node(phi, region); 1102 return phi; 1103 } 1104 1105 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { 1106 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 1107 1108 Unique_Node_List uses; 1109 for (int i = 0; i < state->iu_barriers_count(); i++) { 1110 Node* barrier = state->iu_barrier(i); 1111 Node* ctrl = phase->get_ctrl(barrier); 1112 IdealLoopTree* loop = phase->get_loop(ctrl); 1113 Node* head = loop->head(); 1114 if (head->is_OuterStripMinedLoop()) { 1115 // Expanding a barrier here will break loop strip mining 1116 // verification. Transform the loop so the loop nest doesn't 1117 // appear as strip mined. 1118 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop(); 1119 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1120 } 1121 } 1122 1123 Node_Stack stack(0); 1124 Node_List clones; 1125 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1126 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1127 1128 Node* ctrl = phase->get_ctrl(lrb); 1129 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1130 1131 CallStaticJavaNode* unc = nullptr; 1132 Node* unc_ctrl = nullptr; 1133 Node* uncasted_val = val; 1134 1135 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { 1136 Node* u = lrb->fast_out(i); 1137 if (u->Opcode() == Op_CastPP && 1138 u->in(0) != nullptr && 1139 phase->is_dominator(u->in(0), ctrl)) { 1140 const Type* u_t = phase->igvn().type(u); 1141 1142 if (u_t->meet(TypePtr::NULL_PTR) != u_t && 1143 u->in(0)->Opcode() == Op_IfTrue && 1144 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern() && 1145 u->in(0)->in(0)->is_If() && 1146 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && 1147 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && 1148 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 1149 u->in(0)->in(0)->in(1)->in(1)->in(1) == val && 1150 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 1151 IdealLoopTree* loop = phase->get_loop(ctrl); 1152 IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); 1153 1154 if (!unc_loop->is_member(loop)) { 1155 continue; 1156 } 1157 1158 Node* branch = no_branches(ctrl, u->in(0), false, phase); 1159 assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch"); 1160 if (branch == NodeSentinel) { 1161 continue; 1162 } 1163 1164 Node* iff = u->in(0)->in(0); 1165 Node* bol = iff->in(1)->clone(); 1166 Node* cmp = bol->in(1)->clone(); 1167 cmp->set_req(1, lrb); 1168 bol->set_req(1, cmp); 1169 phase->igvn().replace_input_of(iff, 1, bol); 1170 phase->set_ctrl(lrb, iff->in(0)); 1171 phase->register_new_node(cmp, iff->in(0)); 1172 phase->register_new_node(bol, iff->in(0)); 1173 break; 1174 } 1175 } 1176 } 1177 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) { 1178 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava(); 1179 if (call->entry_point() == OptoRuntime::rethrow_stub()) { 1180 // The rethrow call may have too many projections to be 1181 // properly handled here. Given there's no reason for a 1182 // barrier to depend on the call, move it above the call 1183 stack.push(lrb, 0); 1184 do { 1185 Node* n = stack.node(); 1186 uint idx = stack.index(); 1187 if (idx < n->req()) { 1188 Node* in = n->in(idx); 1189 stack.set_index(idx+1); 1190 if (in != nullptr) { 1191 if (phase->has_ctrl(in)) { 1192 if (phase->is_dominator(call, phase->get_ctrl(in))) { 1193 #ifdef ASSERT 1194 for (uint i = 0; i < stack.size(); i++) { 1195 assert(stack.node_at(i) != in, "node shouldn't have been seen yet"); 1196 } 1197 #endif 1198 stack.push(in, 0); 1199 } 1200 } else { 1201 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call"); 1202 } 1203 } 1204 } else { 1205 phase->set_ctrl(n, call->in(0)); 1206 stack.pop(); 1207 } 1208 } while(stack.size() > 0); 1209 continue; 1210 } 1211 CallProjections* projs = call->extract_projections(false, false); 1212 #ifdef ASSERT 1213 VectorSet cloned; 1214 #endif 1215 Node* lrb_clone = lrb->clone(); 1216 phase->register_new_node(lrb_clone, projs->catchall_catchproj); 1217 phase->set_ctrl(lrb, projs->fallthrough_catchproj); 1218 1219 stack.push(lrb, 0); 1220 clones.push(lrb_clone); 1221 1222 do { 1223 assert(stack.size() == clones.size(), ""); 1224 Node* n = stack.node(); 1225 #ifdef ASSERT 1226 if (n->is_Load()) { 1227 Node* mem = n->in(MemNode::Memory); 1228 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { 1229 Node* u = mem->fast_out(j); 1230 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); 1231 } 1232 } 1233 #endif 1234 uint idx = stack.index(); 1235 Node* n_clone = clones.at(clones.size()-1); 1236 if (idx < n->outcnt()) { 1237 Node* u = n->raw_out(idx); 1238 Node* c = phase->ctrl_or_self(u); 1239 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs->fallthrough_proj)) { 1240 stack.set_index(idx+1); 1241 assert(!u->is_CFG(), ""); 1242 stack.push(u, 0); 1243 assert(!cloned.test_set(u->_idx), "only one clone"); 1244 Node* u_clone = u->clone(); 1245 int nb = u_clone->replace_edge(n, n_clone, &phase->igvn()); 1246 assert(nb > 0, "should have replaced some uses"); 1247 phase->register_new_node(u_clone, projs->catchall_catchproj); 1248 clones.push(u_clone); 1249 phase->set_ctrl(u, projs->fallthrough_catchproj); 1250 } else { 1251 bool replaced = false; 1252 if (u->is_Phi()) { 1253 for (uint k = 1; k < u->req(); k++) { 1254 if (u->in(k) == n) { 1255 if (phase->is_dominator(projs->catchall_catchproj, u->in(0)->in(k))) { 1256 phase->igvn().replace_input_of(u, k, n_clone); 1257 replaced = true; 1258 } else if (!phase->is_dominator(projs->fallthrough_catchproj, u->in(0)->in(k))) { 1259 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); 1260 replaced = true; 1261 } 1262 } 1263 } 1264 } else { 1265 if (phase->is_dominator(projs->catchall_catchproj, c)) { 1266 phase->igvn().rehash_node_delayed(u); 1267 int nb = u->replace_edge(n, n_clone, &phase->igvn()); 1268 assert(nb > 0, "should have replaced some uses"); 1269 replaced = true; 1270 } else if (!phase->is_dominator(projs->fallthrough_catchproj, c)) { 1271 if (u->is_If()) { 1272 // Can't break If/Bool/Cmp chain 1273 assert(n->is_Bool(), "unexpected If shape"); 1274 assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape"); 1275 assert(n_clone->is_Bool(), "unexpected clone"); 1276 assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone"); 1277 Node* bol_clone = n->clone(); 1278 Node* cmp_clone = stack.node_at(stack.size()-2)->clone(); 1279 bol_clone->set_req(1, cmp_clone); 1280 1281 Node* nn = stack.node_at(stack.size()-3); 1282 Node* nn_clone = clones.at(clones.size()-3); 1283 assert(nn->Opcode() == nn_clone->Opcode(), "mismatch"); 1284 1285 int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase), 1286 &phase->igvn()); 1287 assert(nb > 0, "should have replaced some uses"); 1288 1289 phase->register_new_node(bol_clone, u->in(0)); 1290 phase->register_new_node(cmp_clone, u->in(0)); 1291 1292 phase->igvn().replace_input_of(u, 1, bol_clone); 1293 1294 } else { 1295 phase->igvn().rehash_node_delayed(u); 1296 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn()); 1297 assert(nb > 0, "should have replaced some uses"); 1298 } 1299 replaced = true; 1300 } 1301 } 1302 if (!replaced) { 1303 stack.set_index(idx+1); 1304 } 1305 } 1306 } else { 1307 stack.pop(); 1308 clones.pop(); 1309 } 1310 } while (stack.size() > 0); 1311 assert(stack.size() == 0 && clones.size() == 0, ""); 1312 } 1313 } 1314 1315 for (int i = 0; i < state->load_reference_barriers_count(); i++) { 1316 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1317 Node* ctrl = phase->get_ctrl(lrb); 1318 IdealLoopTree* loop = phase->get_loop(ctrl); 1319 Node* head = loop->head(); 1320 if (head->is_OuterStripMinedLoop()) { 1321 // Expanding a barrier here will break loop strip mining 1322 // verification. Transform the loop so the loop nest doesn't 1323 // appear as strip mined. 1324 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop(); 1325 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1326 } 1327 } 1328 1329 // Expand load-reference-barriers 1330 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); 1331 Unique_Node_List uses_to_ignore; 1332 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1333 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1334 uint last = phase->C->unique(); 1335 Node* ctrl = phase->get_ctrl(lrb); 1336 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1337 1338 Node* orig_ctrl = ctrl; 1339 1340 Node* raw_mem = fixer.find_mem(ctrl, lrb); 1341 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr); 1342 1343 IdealLoopTree *loop = phase->get_loop(ctrl); 1344 1345 Node* heap_stable_ctrl = nullptr; 1346 Node* null_ctrl = nullptr; 1347 1348 assert(val->bottom_type()->make_oopptr(), "need oop"); 1349 assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant"); 1350 1351 enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT }; 1352 Node* region = new RegionNode(PATH_LIMIT); 1353 Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr()); 1354 1355 // Stable path. 1356 int flags = ShenandoahHeap::HAS_FORWARDED; 1357 if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) { 1358 flags |= ShenandoahHeap::WEAK_ROOTS; 1359 } 1360 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags); 1361 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); 1362 1363 // Heap stable case 1364 region->init_req(_heap_stable, heap_stable_ctrl); 1365 val_phi->init_req(_heap_stable, val); 1366 1367 // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null 1368 // even for non-cset objects to prevent resurrection of such objects. 1369 // Wires !in_cset(obj) to slot 2 of region and phis 1370 Node* not_cset_ctrl = nullptr; 1371 if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) { 1372 test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase); 1373 } 1374 if (not_cset_ctrl != nullptr) { 1375 region->init_req(_not_cset, not_cset_ctrl); 1376 val_phi->init_req(_not_cset, val); 1377 } else { 1378 region->del_req(_not_cset); 1379 val_phi->del_req(_not_cset); 1380 } 1381 1382 // Resolve object when orig-value is in cset. 1383 // Make the unconditional resolve for fwdptr. 1384 1385 // Call lrb-stub and wire up that path in slots 4 1386 Node* result_mem = nullptr; 1387 1388 Node* addr; 1389 { 1390 VectorSet visited; 1391 addr = get_load_addr(phase, visited, lrb); 1392 } 1393 if (addr->Opcode() == Op_AddP) { 1394 Node* orig_base = addr->in(AddPNode::Base); 1395 Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency); 1396 phase->register_new_node(base, ctrl); 1397 if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) { 1398 // Field access 1399 addr = addr->clone(); 1400 addr->set_req(AddPNode::Base, base); 1401 addr->set_req(AddPNode::Address, base); 1402 phase->register_new_node(addr, ctrl); 1403 } else { 1404 Node* addr2 = addr->in(AddPNode::Address); 1405 if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) && 1406 addr2->in(AddPNode::Base) == orig_base) { 1407 addr2 = addr2->clone(); 1408 addr2->set_req(AddPNode::Base, base); 1409 addr2->set_req(AddPNode::Address, base); 1410 phase->register_new_node(addr2, ctrl); 1411 addr = addr->clone(); 1412 addr->set_req(AddPNode::Base, base); 1413 addr->set_req(AddPNode::Address, addr2); 1414 phase->register_new_node(addr, ctrl); 1415 } 1416 } 1417 } 1418 call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase); 1419 region->init_req(_evac_path, ctrl); 1420 val_phi->init_req(_evac_path, val); 1421 1422 phase->register_control(region, loop, heap_stable_iff); 1423 Node* out_val = val_phi; 1424 phase->register_new_node(val_phi, region); 1425 1426 fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); 1427 1428 ctrl = orig_ctrl; 1429 1430 phase->igvn().replace_node(lrb, out_val); 1431 1432 follow_barrier_uses(out_val, ctrl, uses, phase); 1433 1434 for(uint next = 0; next < uses.size(); next++ ) { 1435 Node *n = uses.at(next); 1436 assert(phase->get_ctrl(n) == ctrl, "bad control"); 1437 assert(n != raw_mem, "should leave input raw mem above the barrier"); 1438 phase->set_ctrl(n, region); 1439 follow_barrier_uses(n, ctrl, uses, phase); 1440 } 1441 fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl); 1442 } 1443 // Done expanding load-reference-barriers. 1444 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); 1445 1446 for (int i = state->iu_barriers_count() - 1; i >= 0; i--) { 1447 Node* barrier = state->iu_barrier(i); 1448 Node* pre_val = barrier->in(1); 1449 1450 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { 1451 ShouldNotReachHere(); 1452 continue; 1453 } 1454 1455 Node* ctrl = phase->get_ctrl(barrier); 1456 1457 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { 1458 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move"); 1459 ctrl = ctrl->in(0)->in(0); 1460 phase->set_ctrl(barrier, ctrl); 1461 } else if (ctrl->is_CallRuntime()) { 1462 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move"); 1463 ctrl = ctrl->in(0); 1464 phase->set_ctrl(barrier, ctrl); 1465 } 1466 1467 Node* init_ctrl = ctrl; 1468 IdealLoopTree* loop = phase->get_loop(ctrl); 1469 Node* raw_mem = fixer.find_mem(ctrl, barrier); 1470 Node* init_raw_mem = raw_mem; 1471 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr); 1472 Node* heap_stable_ctrl = nullptr; 1473 Node* null_ctrl = nullptr; 1474 uint last = phase->C->unique(); 1475 1476 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 1477 Node* region = new RegionNode(PATH_LIMIT); 1478 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1479 1480 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 }; 1481 Node* region2 = new RegionNode(PATH_LIMIT2); 1482 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1483 1484 // Stable path. 1485 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING); 1486 region->init_req(_heap_stable, heap_stable_ctrl); 1487 phi->init_req(_heap_stable, raw_mem); 1488 1489 // Null path 1490 Node* reg2_ctrl = nullptr; 1491 test_null(ctrl, pre_val, null_ctrl, phase); 1492 if (null_ctrl != nullptr) { 1493 reg2_ctrl = null_ctrl->in(0); 1494 region2->init_req(_null_path, null_ctrl); 1495 phi2->init_req(_null_path, raw_mem); 1496 } else { 1497 region2->del_req(_null_path); 1498 phi2->del_req(_null_path); 1499 } 1500 1501 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 1502 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 1503 Node* thread = new ThreadLocalNode(); 1504 phase->register_new_node(thread, ctrl); 1505 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset)); 1506 phase->register_new_node(buffer_adr, ctrl); 1507 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset)); 1508 phase->register_new_node(index_adr, ctrl); 1509 1510 BasicType index_bt = TypeX_X->basic_type(); 1511 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size."); 1512 const TypePtr* adr_type = TypeRawPtr::BOTTOM; 1513 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered); 1514 phase->register_new_node(index, ctrl); 1515 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0)); 1516 phase->register_new_node(index_cmp, ctrl); 1517 Node* index_test = new BoolNode(index_cmp, BoolTest::ne); 1518 phase->register_new_node(index_test, ctrl); 1519 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 1520 if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff; 1521 phase->register_control(queue_full_iff, loop, ctrl); 1522 Node* not_full = new IfTrueNode(queue_full_iff); 1523 phase->register_control(not_full, loop, queue_full_iff); 1524 Node* full = new IfFalseNode(queue_full_iff); 1525 phase->register_control(full, loop, queue_full_iff); 1526 1527 ctrl = not_full; 1528 1529 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t))); 1530 phase->register_new_node(next_index, ctrl); 1531 1532 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered); 1533 phase->register_new_node(buffer, ctrl); 1534 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index); 1535 phase->register_new_node(log_addr, ctrl); 1536 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered); 1537 phase->register_new_node(log_store, ctrl); 1538 // update the index 1539 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered); 1540 phase->register_new_node(index_update, ctrl); 1541 1542 // Fast-path case 1543 region2->init_req(_fast_path, ctrl); 1544 phi2->init_req(_fast_path, index_update); 1545 1546 ctrl = full; 1547 1548 Node* base = find_bottom_mem(ctrl, phase); 1549 1550 MergeMemNode* mm = MergeMemNode::make(base); 1551 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 1552 phase->register_new_node(mm, ctrl); 1553 1554 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM); 1555 call->init_req(TypeFunc::Control, ctrl); 1556 call->init_req(TypeFunc::I_O, phase->C->top()); 1557 call->init_req(TypeFunc::Memory, mm); 1558 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1559 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1560 call->init_req(TypeFunc::Parms, pre_val); 1561 call->init_req(TypeFunc::Parms+1, thread); 1562 phase->register_control(call, loop, ctrl); 1563 1564 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control); 1565 phase->register_control(ctrl_proj, loop, call); 1566 Node* mem_proj = new ProjNode(call, TypeFunc::Memory); 1567 phase->register_new_node(mem_proj, call); 1568 1569 // Slow-path case 1570 region2->init_req(_slow_path, ctrl_proj); 1571 phi2->init_req(_slow_path, mem_proj); 1572 1573 phase->register_control(region2, loop, reg2_ctrl); 1574 phase->register_new_node(phi2, region2); 1575 1576 region->init_req(_heap_unstable, region2); 1577 phi->init_req(_heap_unstable, phi2); 1578 1579 phase->register_control(region, loop, heap_stable_ctrl->in(0)); 1580 phase->register_new_node(phi, region); 1581 1582 fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase); 1583 for(uint next = 0; next < uses.size(); next++ ) { 1584 Node *n = uses.at(next); 1585 assert(phase->get_ctrl(n) == init_ctrl, "bad control"); 1586 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 1587 phase->set_ctrl(n, region); 1588 follow_barrier_uses(n, init_ctrl, uses, phase); 1589 } 1590 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses); 1591 1592 phase->igvn().replace_node(barrier, pre_val); 1593 } 1594 assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced"); 1595 1596 } 1597 1598 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { 1599 if (visited.test_set(in->_idx)) { 1600 return nullptr; 1601 } 1602 switch (in->Opcode()) { 1603 case Op_Proj: 1604 return get_load_addr(phase, visited, in->in(0)); 1605 case Op_CastPP: 1606 case Op_CheckCastPP: 1607 case Op_DecodeN: 1608 case Op_EncodeP: 1609 return get_load_addr(phase, visited, in->in(1)); 1610 case Op_LoadN: 1611 case Op_LoadP: 1612 return in->in(MemNode::Address); 1613 case Op_CompareAndExchangeN: 1614 case Op_CompareAndExchangeP: 1615 case Op_GetAndSetN: 1616 case Op_GetAndSetP: 1617 case Op_ShenandoahCompareAndExchangeP: 1618 case Op_ShenandoahCompareAndExchangeN: 1619 // Those instructions would just have stored a different 1620 // value into the field. No use to attempt to fix it at this point. 1621 return phase->igvn().zerocon(T_OBJECT); 1622 case Op_CMoveP: 1623 case Op_CMoveN: { 1624 Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue)); 1625 Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse)); 1626 // Handle unambiguous cases: single address reported on both branches. 1627 if (t != nullptr && f == nullptr) return t; 1628 if (t == nullptr && f != nullptr) return f; 1629 if (t != nullptr && t == f) return t; 1630 // Ambiguity. 1631 return phase->igvn().zerocon(T_OBJECT); 1632 } 1633 case Op_Phi: { 1634 Node* addr = nullptr; 1635 for (uint i = 1; i < in->req(); i++) { 1636 Node* addr1 = get_load_addr(phase, visited, in->in(i)); 1637 if (addr == nullptr) { 1638 addr = addr1; 1639 } 1640 if (addr != addr1) { 1641 return phase->igvn().zerocon(T_OBJECT); 1642 } 1643 } 1644 return addr; 1645 } 1646 case Op_ShenandoahLoadReferenceBarrier: 1647 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); 1648 case Op_ShenandoahIUBarrier: 1649 return get_load_addr(phase, visited, in->in(1)); 1650 case Op_CallDynamicJava: 1651 case Op_CallLeaf: 1652 case Op_CallStaticJava: 1653 case Op_ConN: 1654 case Op_ConP: 1655 case Op_Parm: 1656 case Op_CreateEx: 1657 return phase->igvn().zerocon(T_OBJECT); 1658 default: 1659 #ifdef ASSERT 1660 fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]); 1661 #endif 1662 return phase->igvn().zerocon(T_OBJECT); 1663 } 1664 1665 } 1666 1667 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { 1668 IdealLoopTree *loop = phase->get_loop(iff); 1669 Node* loop_head = loop->_head; 1670 Node* entry_c = loop_head->in(LoopNode::EntryControl); 1671 1672 Node* bol = iff->in(1); 1673 Node* cmp = bol->in(1); 1674 Node* andi = cmp->in(1); 1675 Node* load = andi->in(1); 1676 1677 assert(is_gc_state_load(load), "broken"); 1678 if (!phase->is_dominator(load->in(0), entry_c)) { 1679 Node* mem_ctrl = nullptr; 1680 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); 1681 load = load->clone(); 1682 load->set_req(MemNode::Memory, mem); 1683 load->set_req(0, entry_c); 1684 phase->register_new_node(load, entry_c); 1685 andi = andi->clone(); 1686 andi->set_req(1, load); 1687 phase->register_new_node(andi, entry_c); 1688 cmp = cmp->clone(); 1689 cmp->set_req(1, andi); 1690 phase->register_new_node(cmp, entry_c); 1691 bol = bol->clone(); 1692 bol->set_req(1, cmp); 1693 phase->register_new_node(bol, entry_c); 1694 1695 phase->igvn().replace_input_of(iff, 1, bol); 1696 } 1697 } 1698 1699 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { 1700 if (!n->is_If() || n->is_CountedLoopEnd()) { 1701 return false; 1702 } 1703 Node* region = n->in(0); 1704 1705 if (!region->is_Region()) { 1706 return false; 1707 } 1708 Node* dom = phase->idom(region); 1709 if (!dom->is_If()) { 1710 return false; 1711 } 1712 1713 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { 1714 return false; 1715 } 1716 1717 IfNode* dom_if = dom->as_If(); 1718 Node* proj_true = dom_if->proj_out(1); 1719 Node* proj_false = dom_if->proj_out(0); 1720 1721 for (uint i = 1; i < region->req(); i++) { 1722 if (phase->is_dominator(proj_true, region->in(i))) { 1723 continue; 1724 } 1725 if (phase->is_dominator(proj_false, region->in(i))) { 1726 continue; 1727 } 1728 return false; 1729 } 1730 1731 return true; 1732 } 1733 1734 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { 1735 assert(is_heap_stable_test(n), "no other tests"); 1736 if (identical_backtoback_ifs(n, phase)) { 1737 Node* n_ctrl = n->in(0); 1738 if (phase->can_split_if(n_ctrl)) { 1739 IfNode* dom_if = phase->idom(n_ctrl)->as_If(); 1740 if (is_heap_stable_test(n)) { 1741 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); 1742 assert(is_gc_state_load(gc_state_load), "broken"); 1743 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); 1744 assert(is_gc_state_load(dom_gc_state_load), "broken"); 1745 if (gc_state_load != dom_gc_state_load) { 1746 phase->igvn().replace_node(gc_state_load, dom_gc_state_load); 1747 } 1748 } 1749 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); 1750 Node* proj_true = dom_if->proj_out(1); 1751 Node* proj_false = dom_if->proj_out(0); 1752 Node* con_true = phase->igvn().makecon(TypeInt::ONE); 1753 Node* con_false = phase->igvn().makecon(TypeInt::ZERO); 1754 1755 for (uint i = 1; i < n_ctrl->req(); i++) { 1756 if (phase->is_dominator(proj_true, n_ctrl->in(i))) { 1757 bolphi->init_req(i, con_true); 1758 } else { 1759 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); 1760 bolphi->init_req(i, con_false); 1761 } 1762 } 1763 phase->register_new_node(bolphi, n_ctrl); 1764 phase->igvn().replace_input_of(n, 1, bolphi); 1765 phase->do_split_if(n); 1766 } 1767 } 1768 } 1769 1770 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { 1771 // Find first invariant test that doesn't exit the loop 1772 LoopNode *head = loop->_head->as_Loop(); 1773 IfNode* unswitch_iff = nullptr; 1774 Node* n = head->in(LoopNode::LoopBackControl); 1775 int loop_has_sfpts = -1; 1776 while (n != head) { 1777 Node* n_dom = phase->idom(n); 1778 if (n->is_Region()) { 1779 if (n_dom->is_If()) { 1780 IfNode* iff = n_dom->as_If(); 1781 if (iff->in(1)->is_Bool()) { 1782 BoolNode* bol = iff->in(1)->as_Bool(); 1783 if (bol->in(1)->is_Cmp()) { 1784 // If condition is invariant and not a loop exit, 1785 // then found reason to unswitch. 1786 if (is_heap_stable_test(iff) && 1787 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) { 1788 assert(!loop->is_loop_exit(iff), "both branches should be in the loop"); 1789 if (loop_has_sfpts == -1) { 1790 for(uint i = 0; i < loop->_body.size(); i++) { 1791 Node *m = loop->_body[i]; 1792 if (m->is_SafePoint() && !m->is_CallLeaf()) { 1793 loop_has_sfpts = 1; 1794 break; 1795 } 1796 } 1797 if (loop_has_sfpts == -1) { 1798 loop_has_sfpts = 0; 1799 } 1800 } 1801 if (!loop_has_sfpts) { 1802 unswitch_iff = iff; 1803 } 1804 } 1805 } 1806 } 1807 } 1808 } 1809 n = n_dom; 1810 } 1811 return unswitch_iff; 1812 } 1813 1814 1815 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { 1816 Node_List heap_stable_tests; 1817 stack.push(phase->C->start(), 0); 1818 do { 1819 Node* n = stack.node(); 1820 uint i = stack.index(); 1821 1822 if (i < n->outcnt()) { 1823 Node* u = n->raw_out(i); 1824 stack.set_index(i+1); 1825 if (!visited.test_set(u->_idx)) { 1826 stack.push(u, 0); 1827 } 1828 } else { 1829 stack.pop(); 1830 if (n->is_If() && is_heap_stable_test(n)) { 1831 heap_stable_tests.push(n); 1832 } 1833 } 1834 } while (stack.size() > 0); 1835 1836 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1837 Node* n = heap_stable_tests.at(i); 1838 assert(is_heap_stable_test(n), "only evacuation test"); 1839 merge_back_to_back_tests(n, phase); 1840 } 1841 1842 if (!phase->C->major_progress()) { 1843 VectorSet seen; 1844 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1845 Node* n = heap_stable_tests.at(i); 1846 IdealLoopTree* loop = phase->get_loop(n); 1847 if (loop != phase->ltree_root() && 1848 loop->_child == nullptr && 1849 !loop->_irreducible) { 1850 Node* head = loop->_head; 1851 if (head->is_Loop() && 1852 (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && 1853 !seen.test_set(head->_idx)) { 1854 IfNode* iff = find_unswitching_candidate(loop, phase); 1855 if (iff != nullptr) { 1856 Node* bol = iff->in(1); 1857 if (head->as_Loop()->is_strip_mined()) { 1858 head->as_Loop()->verify_strip_mined(0); 1859 } 1860 move_gc_state_test_out_of_loop(iff, phase); 1861 1862 AutoNodeBudget node_budget(phase); 1863 1864 if (loop->policy_unswitching(phase)) { 1865 if (head->as_Loop()->is_strip_mined()) { 1866 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop(); 1867 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase); 1868 } 1869 phase->do_unswitching(loop, old_new); 1870 } else { 1871 // Not proceeding with unswitching. Move load back in 1872 // the loop. 1873 phase->igvn().replace_input_of(iff, 1, bol); 1874 } 1875 } 1876 } 1877 } 1878 } 1879 } 1880 } 1881 1882 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) { 1883 ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this); 1884 } 1885 1886 const Type* ShenandoahIUBarrierNode::bottom_type() const { 1887 if (in(1) == nullptr || in(1)->is_top()) { 1888 return Type::TOP; 1889 } 1890 const Type* t = in(1)->bottom_type(); 1891 if (t == TypePtr::NULL_PTR) { 1892 return t; 1893 } 1894 return t->is_oopptr(); 1895 } 1896 1897 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const { 1898 if (in(1) == nullptr) { 1899 return Type::TOP; 1900 } 1901 const Type* t = phase->type(in(1)); 1902 if (t == Type::TOP) { 1903 return Type::TOP; 1904 } 1905 if (t == TypePtr::NULL_PTR) { 1906 return t; 1907 } 1908 return t->is_oopptr(); 1909 } 1910 1911 int ShenandoahIUBarrierNode::needed(Node* n) { 1912 if (n == nullptr || 1913 n->is_Allocate() || 1914 n->Opcode() == Op_ShenandoahIUBarrier || 1915 n->bottom_type() == TypePtr::NULL_PTR || 1916 (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) { 1917 return NotNeeded; 1918 } 1919 if (n->is_Phi() || 1920 n->is_CMove()) { 1921 return MaybeNeeded; 1922 } 1923 return Needed; 1924 } 1925 1926 Node* ShenandoahIUBarrierNode::next(Node* n) { 1927 for (;;) { 1928 if (n == nullptr) { 1929 return n; 1930 } else if (n->bottom_type() == TypePtr::NULL_PTR) { 1931 return n; 1932 } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) { 1933 return n; 1934 } else if (n->is_ConstraintCast() || 1935 n->Opcode() == Op_DecodeN || 1936 n->Opcode() == Op_EncodeP) { 1937 n = n->in(1); 1938 } else if (n->is_Proj()) { 1939 n = n->in(0); 1940 } else { 1941 return n; 1942 } 1943 } 1944 ShouldNotReachHere(); 1945 return nullptr; 1946 } 1947 1948 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) { 1949 PhaseIterGVN* igvn = phase->is_IterGVN(); 1950 1951 Node* n = next(in(1)); 1952 1953 int cont = needed(n); 1954 1955 if (cont == NotNeeded) { 1956 return in(1); 1957 } else if (cont == MaybeNeeded) { 1958 if (igvn == nullptr) { 1959 phase->record_for_igvn(this); 1960 return this; 1961 } else { 1962 ResourceMark rm; 1963 Unique_Node_List wq; 1964 uint wq_i = 0; 1965 1966 for (;;) { 1967 if (n->is_Phi()) { 1968 for (uint i = 1; i < n->req(); i++) { 1969 Node* m = n->in(i); 1970 if (m != nullptr) { 1971 wq.push(m); 1972 } 1973 } 1974 } else { 1975 assert(n->is_CMove(), "nothing else here"); 1976 Node* m = n->in(CMoveNode::IfFalse); 1977 wq.push(m); 1978 m = n->in(CMoveNode::IfTrue); 1979 wq.push(m); 1980 } 1981 Node* orig_n = nullptr; 1982 do { 1983 if (wq_i >= wq.size()) { 1984 return in(1); 1985 } 1986 n = wq.at(wq_i); 1987 wq_i++; 1988 orig_n = n; 1989 n = next(n); 1990 cont = needed(n); 1991 if (cont == Needed) { 1992 return this; 1993 } 1994 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n))); 1995 } 1996 } 1997 } 1998 1999 return this; 2000 } 2001 2002 #ifdef ASSERT 2003 static bool has_never_branch(Node* root) { 2004 for (uint i = 1; i < root->req(); i++) { 2005 Node* in = root->in(i); 2006 if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) { 2007 return true; 2008 } 2009 } 2010 return false; 2011 } 2012 #endif 2013 2014 void MemoryGraphFixer::collect_memory_nodes() { 2015 Node_Stack stack(0); 2016 VectorSet visited; 2017 Node_List regions; 2018 2019 // Walk the raw memory graph and create a mapping from CFG node to 2020 // memory node. Exclude phis for now. 2021 stack.push(_phase->C->root(), 1); 2022 do { 2023 Node* n = stack.node(); 2024 int opc = n->Opcode(); 2025 uint i = stack.index(); 2026 if (i < n->req()) { 2027 Node* mem = nullptr; 2028 if (opc == Op_Root) { 2029 Node* in = n->in(i); 2030 int in_opc = in->Opcode(); 2031 if (in_opc == Op_Return || in_opc == Op_Rethrow) { 2032 mem = in->in(TypeFunc::Memory); 2033 } else if (in_opc == Op_Halt) { 2034 if (in->in(0)->is_Region()) { 2035 Node* r = in->in(0); 2036 for (uint j = 1; j < r->req(); j++) { 2037 assert(!r->in(j)->is_NeverBranch(), ""); 2038 } 2039 } else { 2040 Node* proj = in->in(0); 2041 assert(proj->is_Proj(), ""); 2042 Node* in = proj->in(0); 2043 assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); 2044 if (in->is_CallStaticJava()) { 2045 mem = in->in(TypeFunc::Memory); 2046 } else if (in->Opcode() == Op_Catch) { 2047 Node* call = in->in(0)->in(0); 2048 assert(call->is_Call(), ""); 2049 mem = call->in(TypeFunc::Memory); 2050 } else if (in->is_NeverBranch()) { 2051 mem = collect_memory_for_infinite_loop(in); 2052 } 2053 } 2054 } else { 2055 #ifdef ASSERT 2056 n->dump(); 2057 in->dump(); 2058 #endif 2059 ShouldNotReachHere(); 2060 } 2061 } else { 2062 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); 2063 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); 2064 mem = n->in(i); 2065 } 2066 i++; 2067 stack.set_index(i); 2068 if (mem == nullptr) { 2069 continue; 2070 } 2071 for (;;) { 2072 if (visited.test_set(mem->_idx) || mem->is_Start()) { 2073 break; 2074 } 2075 if (mem->is_Phi()) { 2076 stack.push(mem, 2); 2077 mem = mem->in(1); 2078 } else if (mem->is_Proj()) { 2079 stack.push(mem, mem->req()); 2080 mem = mem->in(0); 2081 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 2082 mem = mem->in(TypeFunc::Memory); 2083 } else if (mem->is_MergeMem()) { 2084 MergeMemNode* mm = mem->as_MergeMem(); 2085 mem = mm->memory_at(_alias); 2086 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 2087 assert(_alias == Compile::AliasIdxRaw, ""); 2088 stack.push(mem, mem->req()); 2089 mem = mem->in(MemNode::Memory); 2090 } else { 2091 #ifdef ASSERT 2092 mem->dump(); 2093 #endif 2094 ShouldNotReachHere(); 2095 } 2096 } 2097 } else { 2098 if (n->is_Phi()) { 2099 // Nothing 2100 } else if (!n->is_Root()) { 2101 Node* c = get_ctrl(n); 2102 _memory_nodes.map(c->_idx, n); 2103 } 2104 stack.pop(); 2105 } 2106 } while(stack.is_nonempty()); 2107 2108 // Iterate over CFG nodes in rpo and propagate memory state to 2109 // compute memory state at regions, creating new phis if needed. 2110 Node_List rpo_list; 2111 visited.clear(); 2112 _phase->rpo(_phase->C->root(), stack, visited, rpo_list); 2113 Node* root = rpo_list.pop(); 2114 assert(root == _phase->C->root(), ""); 2115 2116 const bool trace = false; 2117 #ifdef ASSERT 2118 if (trace) { 2119 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2120 Node* c = rpo_list.at(i); 2121 if (_memory_nodes[c->_idx] != nullptr) { 2122 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); 2123 } 2124 } 2125 } 2126 #endif 2127 uint last = _phase->C->unique(); 2128 2129 #ifdef ASSERT 2130 uint16_t max_depth = 0; 2131 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { 2132 IdealLoopTree* lpt = iter.current(); 2133 max_depth = MAX2(max_depth, lpt->_nest); 2134 } 2135 #endif 2136 2137 bool progress = true; 2138 int iteration = 0; 2139 Node_List dead_phis; 2140 while (progress) { 2141 progress = false; 2142 iteration++; 2143 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); 2144 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } 2145 2146 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2147 Node* c = rpo_list.at(i); 2148 2149 Node* prev_mem = _memory_nodes[c->_idx]; 2150 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2151 Node* prev_region = regions[c->_idx]; 2152 Node* unique = nullptr; 2153 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { 2154 Node* m = _memory_nodes[c->in(j)->_idx]; 2155 assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); 2156 if (m != nullptr) { 2157 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { 2158 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); 2159 // continue 2160 } else if (unique == nullptr) { 2161 unique = m; 2162 } else if (m == unique) { 2163 // continue 2164 } else { 2165 unique = NodeSentinel; 2166 } 2167 } 2168 } 2169 assert(unique != nullptr, "empty phi???"); 2170 if (unique != NodeSentinel) { 2171 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) { 2172 dead_phis.push(prev_region); 2173 } 2174 regions.map(c->_idx, unique); 2175 } else { 2176 Node* phi = nullptr; 2177 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { 2178 phi = prev_region; 2179 for (uint k = 1; k < c->req(); k++) { 2180 Node* m = _memory_nodes[c->in(k)->_idx]; 2181 assert(m != nullptr, "expect memory state"); 2182 phi->set_req(k, m); 2183 } 2184 } else { 2185 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) { 2186 Node* u = c->fast_out(j); 2187 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2188 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { 2189 phi = u; 2190 for (uint k = 1; k < c->req() && phi != nullptr; k++) { 2191 Node* m = _memory_nodes[c->in(k)->_idx]; 2192 assert(m != nullptr, "expect memory state"); 2193 if (u->in(k) != m) { 2194 phi = NodeSentinel; 2195 } 2196 } 2197 } 2198 } 2199 if (phi == NodeSentinel) { 2200 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2201 for (uint k = 1; k < c->req(); k++) { 2202 Node* m = _memory_nodes[c->in(k)->_idx]; 2203 assert(m != nullptr, "expect memory state"); 2204 phi->init_req(k, m); 2205 } 2206 } 2207 } 2208 if (phi != nullptr) { 2209 regions.map(c->_idx, phi); 2210 } else { 2211 assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2212 } 2213 } 2214 Node* current_region = regions[c->_idx]; 2215 if (current_region != prev_region) { 2216 progress = true; 2217 if (prev_region == prev_mem) { 2218 _memory_nodes.map(c->_idx, current_region); 2219 } 2220 } 2221 } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { 2222 Node* m = _memory_nodes[_phase->idom(c)->_idx]; 2223 assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state"); 2224 if (m != prev_mem) { 2225 _memory_nodes.map(c->_idx, m); 2226 progress = true; 2227 } 2228 } 2229 #ifdef ASSERT 2230 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } 2231 #endif 2232 } 2233 } 2234 2235 // Replace existing phi with computed memory state for that region 2236 // if different (could be a new phi or a dominating memory node if 2237 // that phi was found to be useless). 2238 while (dead_phis.size() > 0) { 2239 Node* n = dead_phis.pop(); 2240 n->replace_by(_phase->C->top()); 2241 n->destruct(&_phase->igvn()); 2242 } 2243 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2244 Node* c = rpo_list.at(i); 2245 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2246 Node* n = regions[c->_idx]; 2247 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2248 if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) { 2249 _phase->register_new_node(n, c); 2250 } 2251 } 2252 } 2253 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2254 Node* c = rpo_list.at(i); 2255 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2256 Node* n = regions[c->_idx]; 2257 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2258 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 2259 Node* u = c->fast_out(i); 2260 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2261 u != n) { 2262 assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state"); 2263 if (u->adr_type() == TypePtr::BOTTOM) { 2264 fix_memory_uses(u, n, n, c); 2265 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2266 _phase->lazy_replace(u, n); 2267 --i; --imax; 2268 } 2269 } 2270 } 2271 } 2272 } 2273 } 2274 2275 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) { 2276 Node* mem = nullptr; 2277 Node* head = in->in(0); 2278 assert(head->is_Region(), "unexpected infinite loop graph shape"); 2279 2280 Node* phi_mem = nullptr; 2281 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 2282 Node* u = head->fast_out(j); 2283 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 2284 if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2285 assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, ""); 2286 phi_mem = u; 2287 } else if (u->adr_type() == TypePtr::BOTTOM) { 2288 assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, ""); 2289 if (phi_mem == nullptr) { 2290 phi_mem = u; 2291 } 2292 } 2293 } 2294 } 2295 if (phi_mem == nullptr) { 2296 ResourceMark rm; 2297 Node_Stack stack(0); 2298 stack.push(head, 1); 2299 do { 2300 Node* n = stack.node(); 2301 uint i = stack.index(); 2302 if (i >= n->req()) { 2303 stack.pop(); 2304 } else { 2305 stack.set_index(i + 1); 2306 Node* c = n->in(i); 2307 assert(c != head, "should have found a safepoint on the way"); 2308 if (stack.size() != 1 || _phase->is_dominator(head, c)) { 2309 for (;;) { 2310 if (c->is_Region()) { 2311 stack.push(c, 1); 2312 break; 2313 } else if (c->is_SafePoint() && !c->is_CallLeaf()) { 2314 Node* m = c->in(TypeFunc::Memory); 2315 if (m->is_MergeMem()) { 2316 m = m->as_MergeMem()->memory_at(_alias); 2317 } 2318 assert(mem == nullptr || mem == m, "several memory states"); 2319 mem = m; 2320 break; 2321 } else { 2322 assert(c != c->in(0), ""); 2323 c = c->in(0); 2324 } 2325 } 2326 } 2327 } 2328 } while (stack.size() > 0); 2329 assert(mem != nullptr, "should have found safepoint"); 2330 } else { 2331 mem = phi_mem; 2332 } 2333 return mem; 2334 } 2335 2336 Node* MemoryGraphFixer::get_ctrl(Node* n) const { 2337 Node* c = _phase->get_ctrl(n); 2338 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) { 2339 assert(c == n->in(0), ""); 2340 CallNode* call = c->as_Call(); 2341 CallProjections* projs = call->extract_projections(true, false); 2342 if (projs->catchall_memproj != nullptr) { 2343 if (projs->fallthrough_memproj == n) { 2344 c = projs->fallthrough_catchproj; 2345 } else { 2346 assert(projs->catchall_memproj == n, ""); 2347 c = projs->catchall_catchproj; 2348 } 2349 } 2350 } 2351 return c; 2352 } 2353 2354 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { 2355 if (_phase->has_ctrl(n)) 2356 return get_ctrl(n); 2357 else { 2358 assert (n->is_CFG(), "must be a CFG node"); 2359 return n; 2360 } 2361 } 2362 2363 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { 2364 return m != nullptr && get_ctrl(m) == c; 2365 } 2366 2367 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { 2368 assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, ""); 2369 assert(!ctrl->is_Call() || ctrl == n, "projection expected"); 2370 #ifdef ASSERT 2371 if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) || 2372 (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) { 2373 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call(); 2374 int mems = 0; 2375 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { 2376 Node* u = call->fast_out(i); 2377 if (u->bottom_type() == Type::MEMORY) { 2378 mems++; 2379 } 2380 } 2381 assert(mems <= 1, "No node right after call if multiple mem projections"); 2382 } 2383 #endif 2384 Node* mem = _memory_nodes[ctrl->_idx]; 2385 Node* c = ctrl; 2386 while (!mem_is_valid(mem, c) && 2387 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { 2388 c = _phase->idom(c); 2389 mem = _memory_nodes[c->_idx]; 2390 } 2391 if (n != nullptr && mem_is_valid(mem, c)) { 2392 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { 2393 mem = next_mem(mem, _alias); 2394 } 2395 if (mem->is_MergeMem()) { 2396 mem = mem->as_MergeMem()->memory_at(_alias); 2397 } 2398 if (!mem_is_valid(mem, c)) { 2399 do { 2400 c = _phase->idom(c); 2401 mem = _memory_nodes[c->_idx]; 2402 } while (!mem_is_valid(mem, c) && 2403 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))); 2404 } 2405 } 2406 assert(mem->bottom_type() == Type::MEMORY, ""); 2407 return mem; 2408 } 2409 2410 bool MemoryGraphFixer::has_mem_phi(Node* region) const { 2411 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2412 Node* use = region->fast_out(i); 2413 if (use->is_Phi() && use->bottom_type() == Type::MEMORY && 2414 (_phase->C->get_alias_index(use->adr_type()) == _alias)) { 2415 return true; 2416 } 2417 } 2418 return false; 2419 } 2420 2421 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { 2422 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); 2423 const bool trace = false; 2424 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); 2425 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); 2426 GrowableArray<Node*> phis; 2427 if (mem_for_ctrl != mem) { 2428 Node* old = mem_for_ctrl; 2429 Node* prev = nullptr; 2430 while (old != mem) { 2431 prev = old; 2432 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { 2433 assert(_alias == Compile::AliasIdxRaw, ""); 2434 old = old->in(MemNode::Memory); 2435 } else if (old->Opcode() == Op_SCMemProj) { 2436 assert(_alias == Compile::AliasIdxRaw, ""); 2437 old = old->in(0); 2438 } else { 2439 ShouldNotReachHere(); 2440 } 2441 } 2442 assert(prev != nullptr, ""); 2443 if (new_ctrl != ctrl) { 2444 _memory_nodes.map(ctrl->_idx, mem); 2445 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 2446 } 2447 uint input = (uint)MemNode::Memory; 2448 _phase->igvn().replace_input_of(prev, input, new_mem); 2449 } else { 2450 uses.clear(); 2451 _memory_nodes.map(new_ctrl->_idx, new_mem); 2452 uses.push(new_ctrl); 2453 for(uint next = 0; next < uses.size(); next++ ) { 2454 Node *n = uses.at(next); 2455 assert(n->is_CFG(), ""); 2456 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); 2457 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2458 Node* u = n->fast_out(i); 2459 if (!u->is_Root() && u->is_CFG() && u != n) { 2460 Node* m = _memory_nodes[u->_idx]; 2461 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) && 2462 !has_mem_phi(u) && 2463 u->unique_ctrl_out()->Opcode() != Op_Halt) { 2464 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); 2465 DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); }); 2466 2467 if (!mem_is_valid(m, u) || !m->is_Phi()) { 2468 bool push = true; 2469 bool create_phi = true; 2470 if (_phase->is_dominator(new_ctrl, u)) { 2471 create_phi = false; 2472 } 2473 if (create_phi) { 2474 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2475 _phase->register_new_node(phi, u); 2476 phis.push(phi); 2477 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); 2478 if (!mem_is_valid(m, u)) { 2479 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); 2480 _memory_nodes.map(u->_idx, phi); 2481 } else { 2482 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); 2483 for (;;) { 2484 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); 2485 Node* next = nullptr; 2486 if (m->is_Proj()) { 2487 next = m->in(0); 2488 } else { 2489 assert(m->is_Mem() || m->is_LoadStore(), ""); 2490 assert(_alias == Compile::AliasIdxRaw, ""); 2491 next = m->in(MemNode::Memory); 2492 } 2493 if (_phase->get_ctrl(next) != u) { 2494 break; 2495 } 2496 if (next->is_MergeMem()) { 2497 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); 2498 break; 2499 } 2500 if (next->is_Phi()) { 2501 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); 2502 break; 2503 } 2504 m = next; 2505 } 2506 2507 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); 2508 assert(m->is_Mem() || m->is_LoadStore(), ""); 2509 uint input = (uint)MemNode::Memory; 2510 _phase->igvn().replace_input_of(m, input, phi); 2511 push = false; 2512 } 2513 } else { 2514 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); 2515 } 2516 if (push) { 2517 uses.push(u); 2518 } 2519 } 2520 } else if (!mem_is_valid(m, u) && 2521 !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) { 2522 uses.push(u); 2523 } 2524 } 2525 } 2526 } 2527 for (int i = 0; i < phis.length(); i++) { 2528 Node* n = phis.at(i); 2529 Node* r = n->in(0); 2530 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); 2531 for (uint j = 1; j < n->req(); j++) { 2532 Node* m = find_mem(r->in(j), nullptr); 2533 _phase->igvn().replace_input_of(n, j, m); 2534 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); 2535 } 2536 } 2537 } 2538 uint last = _phase->C->unique(); 2539 MergeMemNode* mm = nullptr; 2540 int alias = _alias; 2541 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); 2542 // Process loads first to not miss an anti-dependency: if the memory 2543 // edge of a store is updated before a load is processed then an 2544 // anti-dependency may be missed. 2545 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2546 Node* u = mem->out(i); 2547 if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) { 2548 Node* m = find_mem(_phase->get_ctrl(u), u); 2549 if (m != mem) { 2550 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2551 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 2552 --i; 2553 } 2554 } 2555 } 2556 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2557 Node* u = mem->out(i); 2558 if (u->_idx < last) { 2559 if (u->is_Mem()) { 2560 if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2561 Node* m = find_mem(_phase->get_ctrl(u), u); 2562 if (m != mem) { 2563 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2564 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 2565 --i; 2566 } 2567 } 2568 } else if (u->is_MergeMem()) { 2569 MergeMemNode* u_mm = u->as_MergeMem(); 2570 if (u_mm->memory_at(alias) == mem) { 2571 MergeMemNode* newmm = nullptr; 2572 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2573 Node* uu = u->fast_out(j); 2574 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2575 if (uu->is_Phi()) { 2576 assert(uu->adr_type() == TypePtr::BOTTOM, ""); 2577 Node* region = uu->in(0); 2578 int nb = 0; 2579 for (uint k = 1; k < uu->req(); k++) { 2580 if (uu->in(k) == u) { 2581 Node* m = find_mem(region->in(k), nullptr); 2582 if (m != mem) { 2583 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); 2584 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2585 if (newmm != u) { 2586 _phase->igvn().replace_input_of(uu, k, newmm); 2587 nb++; 2588 --jmax; 2589 } 2590 } 2591 } 2592 } 2593 if (nb > 0) { 2594 --j; 2595 } 2596 } else { 2597 Node* m = find_mem(_phase->ctrl_or_self(uu), uu); 2598 if (m != mem) { 2599 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); 2600 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2601 if (newmm != u) { 2602 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2603 --j, --jmax; 2604 } 2605 } 2606 } 2607 } 2608 } 2609 } else if (u->is_Phi()) { 2610 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2611 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { 2612 Node* region = u->in(0); 2613 bool replaced = false; 2614 for (uint j = 1; j < u->req(); j++) { 2615 if (u->in(j) == mem) { 2616 Node* m = find_mem(region->in(j), nullptr); 2617 Node* nnew = m; 2618 if (m != mem) { 2619 if (u->adr_type() == TypePtr::BOTTOM) { 2620 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); 2621 nnew = mm; 2622 } 2623 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); 2624 _phase->igvn().replace_input_of(u, j, nnew); 2625 replaced = true; 2626 } 2627 } 2628 } 2629 if (replaced) { 2630 --i; 2631 } 2632 } 2633 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 2634 u->adr_type() == nullptr) { 2635 assert(u->adr_type() != nullptr || 2636 u->Opcode() == Op_Rethrow || 2637 u->Opcode() == Op_Return || 2638 u->Opcode() == Op_SafePoint || 2639 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 2640 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 2641 u->Opcode() == Op_CallLeaf, ""); 2642 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2643 if (m != mem) { 2644 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); 2645 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 2646 --i; 2647 } 2648 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2649 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2650 if (m != mem) { 2651 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2652 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2653 --i; 2654 } 2655 } else if (u->adr_type() != TypePtr::BOTTOM && 2656 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { 2657 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2658 assert(m != mem, ""); 2659 // u is on the wrong slice... 2660 assert(u->is_ClearArray(), ""); 2661 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2662 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2663 --i; 2664 } 2665 } 2666 } 2667 #ifdef ASSERT 2668 assert(new_mem->outcnt() > 0, ""); 2669 for (int i = 0; i < phis.length(); i++) { 2670 Node* n = phis.at(i); 2671 assert(n->outcnt() > 0, "new phi must have uses now"); 2672 } 2673 #endif 2674 } 2675 2676 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) { 2677 if (mem_for_ctrl != mem && new_ctrl != ctrl) { 2678 _memory_nodes.map(ctrl->_idx, mem); 2679 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 2680 } 2681 } 2682 2683 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { 2684 MergeMemNode* mm = MergeMemNode::make(mem); 2685 mm->set_memory_at(_alias, rep_proj); 2686 _phase->register_new_node(mm, rep_ctrl); 2687 return mm; 2688 } 2689 2690 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { 2691 MergeMemNode* newmm = nullptr; 2692 MergeMemNode* u_mm = u->as_MergeMem(); 2693 Node* c = _phase->get_ctrl(u); 2694 if (_phase->is_dominator(c, rep_ctrl)) { 2695 c = rep_ctrl; 2696 } else { 2697 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); 2698 } 2699 if (u->outcnt() == 1) { 2700 if (u->req() > (uint)_alias && u->in(_alias) == mem) { 2701 _phase->igvn().replace_input_of(u, _alias, rep_proj); 2702 --i; 2703 } else { 2704 _phase->igvn().rehash_node_delayed(u); 2705 u_mm->set_memory_at(_alias, rep_proj); 2706 } 2707 newmm = u_mm; 2708 _phase->set_ctrl_and_loop(u, c); 2709 } else { 2710 // can't simply clone u and then change one of its input because 2711 // it adds and then removes an edge which messes with the 2712 // DUIterator 2713 newmm = MergeMemNode::make(u_mm->base_memory()); 2714 for (uint j = 0; j < u->req(); j++) { 2715 if (j < newmm->req()) { 2716 if (j == (uint)_alias) { 2717 newmm->set_req(j, rep_proj); 2718 } else if (newmm->in(j) != u->in(j)) { 2719 newmm->set_req(j, u->in(j)); 2720 } 2721 } else if (j == (uint)_alias) { 2722 newmm->add_req(rep_proj); 2723 } else { 2724 newmm->add_req(u->in(j)); 2725 } 2726 } 2727 if ((uint)_alias >= u->req()) { 2728 newmm->set_memory_at(_alias, rep_proj); 2729 } 2730 _phase->register_new_node(newmm, c); 2731 } 2732 return newmm; 2733 } 2734 2735 bool MemoryGraphFixer::should_process_phi(Node* phi) const { 2736 if (phi->adr_type() == TypePtr::BOTTOM) { 2737 Node* region = phi->in(0); 2738 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { 2739 Node* uu = region->fast_out(j); 2740 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { 2741 return false; 2742 } 2743 } 2744 return true; 2745 } 2746 return _phase->C->get_alias_index(phi->adr_type()) == _alias; 2747 } 2748 2749 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { 2750 uint last = _phase-> C->unique(); 2751 MergeMemNode* mm = nullptr; 2752 assert(mem->bottom_type() == Type::MEMORY, ""); 2753 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2754 Node* u = mem->out(i); 2755 if (u != replacement && u->_idx < last) { 2756 if (u->is_MergeMem()) { 2757 MergeMemNode* u_mm = u->as_MergeMem(); 2758 if (u_mm->memory_at(_alias) == mem) { 2759 MergeMemNode* newmm = nullptr; 2760 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2761 Node* uu = u->fast_out(j); 2762 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2763 if (uu->is_Phi()) { 2764 if (should_process_phi(uu)) { 2765 Node* region = uu->in(0); 2766 int nb = 0; 2767 for (uint k = 1; k < uu->req(); k++) { 2768 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { 2769 if (newmm == nullptr) { 2770 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2771 } 2772 if (newmm != u) { 2773 _phase->igvn().replace_input_of(uu, k, newmm); 2774 nb++; 2775 --jmax; 2776 } 2777 } 2778 } 2779 if (nb > 0) { 2780 --j; 2781 } 2782 } 2783 } else { 2784 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { 2785 if (newmm == nullptr) { 2786 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2787 } 2788 if (newmm != u) { 2789 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2790 --j, --jmax; 2791 } 2792 } 2793 } 2794 } 2795 } 2796 } else if (u->is_Phi()) { 2797 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2798 Node* region = u->in(0); 2799 if (should_process_phi(u)) { 2800 bool replaced = false; 2801 for (uint j = 1; j < u->req(); j++) { 2802 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { 2803 Node* nnew = rep_proj; 2804 if (u->adr_type() == TypePtr::BOTTOM) { 2805 if (mm == nullptr) { 2806 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 2807 } 2808 nnew = mm; 2809 } 2810 _phase->igvn().replace_input_of(u, j, nnew); 2811 replaced = true; 2812 } 2813 } 2814 if (replaced) { 2815 --i; 2816 } 2817 2818 } 2819 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 2820 u->adr_type() == nullptr) { 2821 assert(u->adr_type() != nullptr || 2822 u->Opcode() == Op_Rethrow || 2823 u->Opcode() == Op_Return || 2824 u->Opcode() == Op_SafePoint || 2825 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 2826 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 2827 u->Opcode() == Op_CallLeaf, "%s", u->Name()); 2828 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 2829 if (mm == nullptr) { 2830 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 2831 } 2832 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 2833 --i; 2834 } 2835 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2836 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 2837 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 2838 --i; 2839 } 2840 } 2841 } 2842 } 2843 } 2844 2845 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators) 2846 : Node(ctrl, obj), _decorators(decorators) { 2847 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); 2848 } 2849 2850 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const { 2851 return _decorators; 2852 } 2853 2854 uint ShenandoahLoadReferenceBarrierNode::size_of() const { 2855 return sizeof(*this); 2856 } 2857 2858 static DecoratorSet mask_decorators(DecoratorSet decorators) { 2859 return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE); 2860 } 2861 2862 uint ShenandoahLoadReferenceBarrierNode::hash() const { 2863 uint hash = Node::hash(); 2864 hash += mask_decorators(_decorators); 2865 return hash; 2866 } 2867 2868 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const { 2869 return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier && 2870 mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators); 2871 } 2872 2873 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { 2874 if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) { 2875 return Type::TOP; 2876 } 2877 const Type* t = in(ValueIn)->bottom_type(); 2878 if (t == TypePtr::NULL_PTR) { 2879 return t; 2880 } 2881 2882 if (ShenandoahBarrierSet::is_strong_access(decorators())) { 2883 return t; 2884 } 2885 2886 return t->meet(TypePtr::NULL_PTR); 2887 } 2888 2889 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const { 2890 // Either input is TOP ==> the result is TOP 2891 const Type *t2 = phase->type(in(ValueIn)); 2892 if( t2 == Type::TOP ) return Type::TOP; 2893 2894 if (t2 == TypePtr::NULL_PTR) { 2895 return t2; 2896 } 2897 2898 if (ShenandoahBarrierSet::is_strong_access(decorators())) { 2899 return t2; 2900 } 2901 2902 return t2->meet(TypePtr::NULL_PTR); 2903 } 2904 2905 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) { 2906 Node* value = in(ValueIn); 2907 if (!needs_barrier(phase, value)) { 2908 return value; 2909 } 2910 return this; 2911 } 2912 2913 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) { 2914 Unique_Node_List visited; 2915 return needs_barrier_impl(phase, n, visited); 2916 } 2917 2918 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) { 2919 if (n == nullptr) return false; 2920 if (visited.member(n)) { 2921 return false; // Been there. 2922 } 2923 visited.push(n); 2924 2925 if (n->is_Allocate()) { 2926 // tty->print_cr("optimize barrier on alloc"); 2927 return false; 2928 } 2929 if (n->is_Call()) { 2930 // tty->print_cr("optimize barrier on call"); 2931 return false; 2932 } 2933 2934 const Type* type = phase->type(n); 2935 if (type == Type::TOP) { 2936 return false; 2937 } 2938 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { 2939 // tty->print_cr("optimize barrier on null"); 2940 return false; 2941 } 2942 if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) { 2943 // tty->print_cr("optimize barrier on constant"); 2944 return false; 2945 } 2946 2947 switch (n->Opcode()) { 2948 case Op_AddP: 2949 return true; // TODO: Can refine? 2950 case Op_LoadP: 2951 case Op_ShenandoahCompareAndExchangeN: 2952 case Op_ShenandoahCompareAndExchangeP: 2953 case Op_CompareAndExchangeN: 2954 case Op_CompareAndExchangeP: 2955 case Op_GetAndSetN: 2956 case Op_GetAndSetP: 2957 return true; 2958 case Op_Phi: { 2959 for (uint i = 1; i < n->req(); i++) { 2960 if (needs_barrier_impl(phase, n->in(i), visited)) return true; 2961 } 2962 return false; 2963 } 2964 case Op_CheckCastPP: 2965 case Op_CastPP: 2966 return needs_barrier_impl(phase, n->in(1), visited); 2967 case Op_Proj: 2968 return needs_barrier_impl(phase, n->in(0), visited); 2969 case Op_ShenandoahLoadReferenceBarrier: 2970 // tty->print_cr("optimize barrier on barrier"); 2971 return false; 2972 case Op_Parm: 2973 // tty->print_cr("optimize barrier on input arg"); 2974 return false; 2975 case Op_DecodeN: 2976 case Op_EncodeP: 2977 return needs_barrier_impl(phase, n->in(1), visited); 2978 case Op_LoadN: 2979 return true; 2980 case Op_CMoveN: 2981 case Op_CMoveP: 2982 return needs_barrier_impl(phase, n->in(2), visited) || 2983 needs_barrier_impl(phase, n->in(3), visited); 2984 case Op_ShenandoahIUBarrier: 2985 return needs_barrier_impl(phase, n->in(1), visited); 2986 case Op_CreateEx: 2987 return false; 2988 default: 2989 break; 2990 } 2991 #ifdef ASSERT 2992 tty->print("need barrier on?: "); 2993 tty->print_cr("ins:"); 2994 n->dump(2); 2995 tty->print_cr("outs:"); 2996 n->dump(-2); 2997 ShouldNotReachHere(); 2998 #endif 2999 return true; 3000 }