1 /* 2 * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "classfile/javaClasses.hpp" 29 #include "gc/shenandoah/c2/shenandoahSupport.hpp" 30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" 31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 32 #include "gc/shenandoah/shenandoahForwarding.hpp" 33 #include "gc/shenandoah/shenandoahHeap.hpp" 34 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 35 #include "gc/shenandoah/shenandoahRuntime.hpp" 36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 37 #include "opto/arraycopynode.hpp" 38 #include "opto/block.hpp" 39 #include "opto/callnode.hpp" 40 #include "opto/castnode.hpp" 41 #include "opto/movenode.hpp" 42 #include "opto/phaseX.hpp" 43 #include "opto/rootnode.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/subnode.hpp" 46 47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { 48 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 49 if ((state->iu_barriers_count() + 50 state->load_reference_barriers_count()) > 0) { 51 assert(C->post_loop_opts_phase(), "no loop opts allowed"); 52 C->reset_post_loop_opts_phase(); // ... but we know what we are doing 53 bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; 54 C->clear_major_progress(); 55 PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand); 56 if (C->failing()) return false; 57 PhaseIdealLoop::verify(igvn); 58 if (attempt_more_loopopts) { 59 C->set_major_progress(); 60 if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) { 61 return false; 62 } 63 C->clear_major_progress(); 64 65 C->process_for_post_loop_opts_igvn(igvn); 66 } 67 C->set_post_loop_opts_phase(); // now for real! 68 } 69 return true; 70 } 71 72 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) { 73 if (!UseShenandoahGC) { 74 return false; 75 } 76 assert(iff->is_If(), "bad input"); 77 if (iff->Opcode() != Op_If) { 78 return false; 79 } 80 Node* bol = iff->in(1); 81 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { 82 return false; 83 } 84 Node* cmp = bol->in(1); 85 if (cmp->Opcode() != Op_CmpI) { 86 return false; 87 } 88 Node* in1 = cmp->in(1); 89 Node* in2 = cmp->in(2); 90 if (in2->find_int_con(-1) != 0) { 91 return false; 92 } 93 if (in1->Opcode() != Op_AndI) { 94 return false; 95 } 96 in2 = in1->in(2); 97 if (in2->find_int_con(-1) != mask) { 98 return false; 99 } 100 in1 = in1->in(1); 101 102 return is_gc_state_load(in1); 103 } 104 105 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { 106 return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED); 107 } 108 109 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { 110 if (!UseShenandoahGC) { 111 return false; 112 } 113 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { 114 return false; 115 } 116 Node* addp = n->in(MemNode::Address); 117 if (!addp->is_AddP()) { 118 return false; 119 } 120 Node* base = addp->in(AddPNode::Address); 121 Node* off = addp->in(AddPNode::Offset); 122 if (base->Opcode() != Op_ThreadLocal) { 123 return false; 124 } 125 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) { 126 return false; 127 } 128 return true; 129 } 130 131 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { 132 assert(phase->is_dominator(stop, start), "bad inputs"); 133 ResourceMark rm; 134 Unique_Node_List wq; 135 wq.push(start); 136 for (uint next = 0; next < wq.size(); next++) { 137 Node *m = wq.at(next); 138 if (m == stop) { 139 continue; 140 } 141 if (m->is_SafePoint() && !m->is_CallLeaf()) { 142 return true; 143 } 144 if (m->is_Region()) { 145 for (uint i = 1; i < m->req(); i++) { 146 wq.push(m->in(i)); 147 } 148 } else { 149 wq.push(m->in(0)); 150 } 151 } 152 return false; 153 } 154 155 #ifdef ASSERT 156 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { 157 assert(phis.size() == 0, ""); 158 159 while (true) { 160 if (in->bottom_type() == TypePtr::NULL_PTR) { 161 if (trace) {tty->print_cr("null");} 162 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { 163 if (trace) {tty->print_cr("Non oop");} 164 } else { 165 if (in->is_ConstraintCast()) { 166 in = in->in(1); 167 continue; 168 } else if (in->is_AddP()) { 169 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); 170 in = in->in(AddPNode::Address); 171 continue; 172 } else if (in->is_Con()) { 173 if (trace) { 174 tty->print("Found constant"); 175 in->dump(); 176 } 177 } else if (in->Opcode() == Op_Parm) { 178 if (trace) { 179 tty->print("Found argument"); 180 } 181 } else if (in->Opcode() == Op_CreateEx) { 182 if (trace) { 183 tty->print("Found create-exception"); 184 } 185 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { 186 if (trace) { 187 tty->print("Found raw LoadP (OSR argument?)"); 188 } 189 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 190 if (t == ShenandoahOopStore) { 191 uint i = 0; 192 for (; i < phis.size(); i++) { 193 Node* n = phis.node_at(i); 194 if (n->Opcode() == Op_ShenandoahIUBarrier) { 195 break; 196 } 197 } 198 if (i == phis.size()) { 199 return false; 200 } 201 } 202 barriers_used.push(in); 203 if (trace) {tty->print("Found barrier"); in->dump();} 204 } else if (in->Opcode() == Op_ShenandoahIUBarrier) { 205 if (t != ShenandoahOopStore) { 206 in = in->in(1); 207 continue; 208 } 209 if (trace) {tty->print("Found enqueue barrier"); in->dump();} 210 phis.push(in, in->req()); 211 in = in->in(1); 212 continue; 213 } else if (in->is_Proj() && in->in(0)->is_Allocate()) { 214 if (trace) { 215 tty->print("Found alloc"); 216 in->in(0)->dump(); 217 } 218 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { 219 if (trace) { 220 tty->print("Found Java call"); 221 } 222 } else if (in->is_Phi()) { 223 if (!visited.test_set(in->_idx)) { 224 if (trace) {tty->print("Pushed phi:"); in->dump();} 225 phis.push(in, 2); 226 in = in->in(1); 227 continue; 228 } 229 if (trace) {tty->print("Already seen phi:"); in->dump();} 230 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { 231 if (!visited.test_set(in->_idx)) { 232 if (trace) {tty->print("Pushed cmovep:"); in->dump();} 233 phis.push(in, CMoveNode::IfTrue); 234 in = in->in(CMoveNode::IfFalse); 235 continue; 236 } 237 if (trace) {tty->print("Already seen cmovep:"); in->dump();} 238 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { 239 in = in->in(1); 240 continue; 241 } else { 242 return false; 243 } 244 } 245 bool cont = false; 246 while (phis.is_nonempty()) { 247 uint idx = phis.index(); 248 Node* phi = phis.node(); 249 if (idx >= phi->req()) { 250 if (trace) {tty->print("Popped phi:"); phi->dump();} 251 phis.pop(); 252 continue; 253 } 254 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} 255 in = phi->in(idx); 256 phis.set_index(idx+1); 257 cont = true; 258 break; 259 } 260 if (!cont) { 261 break; 262 } 263 } 264 return true; 265 } 266 267 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { 268 if (n1 != nullptr) { 269 n1->dump(+10); 270 } 271 if (n2 != nullptr) { 272 n2->dump(+10); 273 } 274 fatal("%s", msg); 275 } 276 277 void ShenandoahBarrierC2Support::verify(RootNode* root) { 278 ResourceMark rm; 279 Unique_Node_List wq; 280 GrowableArray<Node*> barriers; 281 Unique_Node_List barriers_used; 282 Node_Stack phis(0); 283 VectorSet visited; 284 const bool trace = false; 285 const bool verify_no_useless_barrier = false; 286 287 wq.push(root); 288 for (uint next = 0; next < wq.size(); next++) { 289 Node *n = wq.at(next); 290 if (n->is_Load()) { 291 const bool trace = false; 292 if (trace) {tty->print("Verifying"); n->dump();} 293 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { 294 if (trace) {tty->print_cr("Load range/klass");} 295 } else { 296 const TypePtr* adr_type = n->as_Load()->adr_type(); 297 298 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { 299 if (trace) {tty->print_cr("Mark load");} 300 } else if (adr_type->isa_instptr() && 301 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && 302 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) { 303 if (trace) {tty->print_cr("Reference.get()");} 304 } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { 305 report_verify_failure("Shenandoah verification: Load should have barriers", n); 306 } 307 } 308 } else if (n->is_Store()) { 309 const bool trace = false; 310 311 if (trace) {tty->print("Verifying"); n->dump();} 312 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { 313 Node* adr = n->in(MemNode::Address); 314 bool verify = true; 315 316 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { 317 adr = adr->in(AddPNode::Address); 318 if (adr->is_AddP()) { 319 assert(adr->in(AddPNode::Base)->is_top(), ""); 320 adr = adr->in(AddPNode::Address); 321 if (adr->Opcode() == Op_LoadP && 322 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && 323 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 324 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) { 325 if (trace) {tty->print_cr("SATB prebarrier");} 326 verify = false; 327 } 328 } 329 } 330 331 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 332 report_verify_failure("Shenandoah verification: Store should have barriers", n); 333 } 334 } 335 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 336 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); 337 } 338 } else if (n->Opcode() == Op_CmpP) { 339 const bool trace = false; 340 341 Node* in1 = n->in(1); 342 Node* in2 = n->in(2); 343 if (in1->bottom_type()->isa_oopptr()) { 344 if (trace) {tty->print("Verifying"); n->dump();} 345 346 bool mark_inputs = false; 347 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || 348 (in1->is_Con() || in2->is_Con())) { 349 if (trace) {tty->print_cr("Comparison against a constant");} 350 mark_inputs = true; 351 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || 352 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { 353 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} 354 mark_inputs = true; 355 } else { 356 assert(in2->bottom_type()->isa_oopptr(), ""); 357 358 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || 359 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { 360 report_verify_failure("Shenandoah verification: Cmp should have barriers", n); 361 } 362 } 363 if (verify_no_useless_barrier && 364 mark_inputs && 365 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || 366 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { 367 phis.clear(); 368 visited.reset(); 369 } 370 } 371 } else if (n->is_LoadStore()) { 372 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && 373 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { 374 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); 375 } 376 377 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { 378 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); 379 } 380 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { 381 CallNode* call = n->as_Call(); 382 383 static struct { 384 const char* name; 385 struct { 386 int pos; 387 verify_type t; 388 } args[6]; 389 } calls[] = { 390 "aescrypt_encryptBlock", 391 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 392 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 393 "aescrypt_decryptBlock", 394 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 395 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 396 "multiplyToLen", 397 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, 398 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 399 "squareToLen", 400 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, 401 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 402 "montgomery_multiply", 403 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 404 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 405 "montgomery_square", 406 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, 407 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 408 "mulAdd", 409 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 410 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 411 "vectorizedMismatch", 412 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, 413 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 414 "updateBytesCRC32", 415 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 416 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 417 "updateBytesAdler32", 418 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 419 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 420 "updateBytesCRC32C", 421 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, 422 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 423 "counterMode_AESCrypt", 424 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 425 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, 426 "cipherBlockChaining_encryptAESCrypt", 427 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 428 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 429 "cipherBlockChaining_decryptAESCrypt", 430 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, 431 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 432 "shenandoah_clone_barrier", 433 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, 434 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 435 "ghash_processBlocks", 436 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, 437 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 438 "sha1_implCompress", 439 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 440 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 441 "sha256_implCompress", 442 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 443 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 444 "sha512_implCompress", 445 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 446 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 447 "sha1_implCompressMB", 448 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 449 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 450 "sha256_implCompressMB", 451 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 452 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 453 "sha512_implCompressMB", 454 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, 455 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 456 "encodeBlock", 457 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 458 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 459 "decodeBlock", 460 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, 461 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, 462 }; 463 464 if (call->is_call_to_arraycopystub()) { 465 Node* dest = nullptr; 466 const TypeTuple* args = n->as_Call()->_tf->domain(); 467 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 468 if (args->field_at(i)->isa_ptr()) { 469 j++; 470 if (j == 2) { 471 dest = n->in(i); 472 break; 473 } 474 } 475 } 476 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || 477 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { 478 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); 479 } 480 } else if (strlen(call->_name) > 5 && 481 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { 482 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { 483 report_verify_failure("Shenandoah verification: _fill should have barriers", n); 484 } 485 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { 486 // skip 487 } else { 488 const int calls_len = sizeof(calls) / sizeof(calls[0]); 489 int i = 0; 490 for (; i < calls_len; i++) { 491 if (!strcmp(calls[i].name, call->_name)) { 492 break; 493 } 494 } 495 if (i != calls_len) { 496 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); 497 for (uint j = 0; j < args_len; j++) { 498 int pos = calls[i].args[j].pos; 499 if (pos == -1) { 500 break; 501 } 502 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { 503 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 504 } 505 } 506 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 507 if (call->in(j)->bottom_type()->make_ptr() && 508 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 509 uint k = 0; 510 for (; k < args_len && calls[i].args[k].pos != (int)j; k++); 511 if (k == args_len) { 512 fatal("arg %d for call %s not covered", j, call->_name); 513 } 514 } 515 } 516 } else { 517 for (uint j = TypeFunc::Parms; j < call->req(); j++) { 518 if (call->in(j)->bottom_type()->make_ptr() && 519 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { 520 fatal("%s not covered", call->_name); 521 } 522 } 523 } 524 } 525 } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { 526 // skip 527 } else if (n->is_AddP() 528 || n->is_Phi() 529 || n->is_ConstraintCast() 530 || n->Opcode() == Op_Return 531 || n->Opcode() == Op_CMoveP 532 || n->Opcode() == Op_CMoveN 533 || n->Opcode() == Op_Rethrow 534 || n->is_MemBar() 535 || n->Opcode() == Op_Conv2B 536 || n->Opcode() == Op_SafePoint 537 || n->is_CallJava() 538 || n->Opcode() == Op_Unlock 539 || n->Opcode() == Op_EncodeP 540 || n->Opcode() == Op_DecodeN) { 541 // nothing to do 542 } else { 543 static struct { 544 int opcode; 545 struct { 546 int pos; 547 verify_type t; 548 } inputs[2]; 549 } others[] = { 550 Op_FastLock, 551 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 552 Op_Lock, 553 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, 554 Op_ArrayCopy, 555 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } }, 556 Op_StrCompressedCopy, 557 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 558 Op_StrInflatedCopy, 559 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 560 Op_AryEq, 561 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 562 Op_StrIndexOf, 563 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 564 Op_StrComp, 565 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, 566 Op_StrEquals, 567 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, 568 Op_VectorizedHashCode, 569 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 570 Op_EncodeISOArray, 571 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, 572 Op_CountPositives, 573 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} }, 574 Op_CastP2X, 575 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, 576 Op_StrIndexOfChar, 577 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } }, 578 }; 579 580 const int others_len = sizeof(others) / sizeof(others[0]); 581 int i = 0; 582 for (; i < others_len; i++) { 583 if (others[i].opcode == n->Opcode()) { 584 break; 585 } 586 } 587 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); 588 if (i != others_len) { 589 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); 590 for (uint j = 0; j < inputs_len; j++) { 591 int pos = others[i].inputs[j].pos; 592 if (pos == -1) { 593 break; 594 } 595 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { 596 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); 597 } 598 } 599 for (uint j = 1; j < stop; j++) { 600 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() && 601 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 602 uint k = 0; 603 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); 604 if (k == inputs_len) { 605 fatal("arg %d for node %s not covered", j, n->Name()); 606 } 607 } 608 } 609 } else { 610 for (uint j = 1; j < stop; j++) { 611 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() && 612 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { 613 fatal("%s not covered", n->Name()); 614 } 615 } 616 } 617 } 618 619 if (n->is_SafePoint()) { 620 SafePointNode* sfpt = n->as_SafePoint(); 621 if (verify_no_useless_barrier && sfpt->jvms() != nullptr) { 622 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { 623 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { 624 phis.clear(); 625 visited.reset(); 626 } 627 } 628 } 629 } 630 } 631 632 if (verify_no_useless_barrier) { 633 for (int i = 0; i < barriers.length(); i++) { 634 Node* n = barriers.at(i); 635 if (!barriers_used.member(n)) { 636 tty->print("XXX useless barrier"); n->dump(-2); 637 ShouldNotReachHere(); 638 } 639 } 640 } 641 } 642 #endif 643 644 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { 645 // That both nodes have the same control is not sufficient to prove 646 // domination, verify that there's no path from d to n 647 ResourceMark rm; 648 Unique_Node_List wq; 649 wq.push(d); 650 for (uint next = 0; next < wq.size(); next++) { 651 Node *m = wq.at(next); 652 if (m == n) { 653 return false; 654 } 655 if (m->is_Phi() && m->in(0)->is_Loop()) { 656 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); 657 } else { 658 if (m->is_Store() || m->is_LoadStore()) { 659 // Take anti-dependencies into account 660 Node* mem = m->in(MemNode::Memory); 661 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 662 Node* u = mem->fast_out(i); 663 if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) && 664 phase->ctrl_or_self(u) == c) { 665 wq.push(u); 666 } 667 } 668 } 669 for (uint i = 0; i < m->req(); i++) { 670 if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) { 671 wq.push(m->in(i)); 672 } 673 } 674 } 675 } 676 return true; 677 } 678 679 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { 680 if (d_c != n_c) { 681 return phase->is_dominator(d_c, n_c); 682 } 683 return is_dominator_same_ctrl(d_c, d, n, phase); 684 } 685 686 Node* next_mem(Node* mem, int alias) { 687 Node* res = nullptr; 688 if (mem->is_Proj()) { 689 res = mem->in(0); 690 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 691 res = mem->in(TypeFunc::Memory); 692 } else if (mem->is_Phi()) { 693 res = mem->in(1); 694 } else if (mem->is_MergeMem()) { 695 res = mem->as_MergeMem()->memory_at(alias); 696 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 697 assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); 698 res = mem->in(MemNode::Memory); 699 } else { 700 #ifdef ASSERT 701 mem->dump(); 702 #endif 703 ShouldNotReachHere(); 704 } 705 return res; 706 } 707 708 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { 709 Node* iffproj = nullptr; 710 while (c != dom) { 711 Node* next = phase->idom(c); 712 assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); 713 if (c->is_Region()) { 714 ResourceMark rm; 715 Unique_Node_List wq; 716 wq.push(c); 717 for (uint i = 0; i < wq.size(); i++) { 718 Node *n = wq.at(i); 719 if (n == next) { 720 continue; 721 } 722 if (n->is_Region()) { 723 for (uint j = 1; j < n->req(); j++) { 724 wq.push(n->in(j)); 725 } 726 } else { 727 wq.push(n->in(0)); 728 } 729 } 730 for (uint i = 0; i < wq.size(); i++) { 731 Node *n = wq.at(i); 732 assert(n->is_CFG(), ""); 733 if (n->is_Multi()) { 734 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 735 Node* u = n->fast_out(j); 736 if (u->is_CFG()) { 737 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { 738 return NodeSentinel; 739 } 740 } 741 } 742 } 743 } 744 } else if (c->is_Proj()) { 745 if (c->is_IfProj()) { 746 if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) { 747 // continue; 748 } else { 749 if (!allow_one_proj) { 750 return NodeSentinel; 751 } 752 if (iffproj == nullptr) { 753 iffproj = c; 754 } else { 755 return NodeSentinel; 756 } 757 } 758 } else if (c->Opcode() == Op_JumpProj) { 759 return NodeSentinel; // unsupported 760 } else if (c->Opcode() == Op_CatchProj) { 761 return NodeSentinel; // unsupported 762 } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) { 763 return NodeSentinel; // unsupported 764 } else { 765 assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); 766 } 767 } 768 c = next; 769 } 770 return iffproj; 771 } 772 773 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { 774 ResourceMark rm; 775 VectorSet wq; 776 wq.set(mem->_idx); 777 mem_ctrl = phase->ctrl_or_self(mem); 778 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { 779 mem = next_mem(mem, alias); 780 if (wq.test_set(mem->_idx)) { 781 return nullptr; 782 } 783 mem_ctrl = phase->ctrl_or_self(mem); 784 } 785 if (mem->is_MergeMem()) { 786 mem = mem->as_MergeMem()->memory_at(alias); 787 mem_ctrl = phase->ctrl_or_self(mem); 788 } 789 return mem; 790 } 791 792 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { 793 Node* mem = nullptr; 794 Node* c = ctrl; 795 do { 796 if (c->is_Region()) { 797 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) { 798 Node* u = c->fast_out(i); 799 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 800 if (u->adr_type() == TypePtr::BOTTOM) { 801 mem = u; 802 } 803 } 804 } 805 } else { 806 if (c->is_Call() && c->as_Call()->adr_type() != nullptr) { 807 CallProjections projs; 808 c->as_Call()->extract_projections(&projs, true, false); 809 if (projs.fallthrough_memproj != nullptr) { 810 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { 811 if (projs.catchall_memproj == nullptr) { 812 mem = projs.fallthrough_memproj; 813 } else { 814 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { 815 mem = projs.fallthrough_memproj; 816 } else { 817 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); 818 mem = projs.catchall_memproj; 819 } 820 } 821 } 822 } else { 823 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); 824 if (proj != nullptr && 825 proj->adr_type() == TypePtr::BOTTOM) { 826 mem = proj; 827 } 828 } 829 } else { 830 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 831 Node* u = c->fast_out(i); 832 if (u->is_Proj() && 833 u->bottom_type() == Type::MEMORY && 834 u->adr_type() == TypePtr::BOTTOM) { 835 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); 836 assert(mem == nullptr, "only one proj"); 837 mem = u; 838 } 839 } 840 assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected"); 841 } 842 } 843 c = phase->idom(c); 844 } while (mem == nullptr); 845 return mem; 846 } 847 848 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { 849 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 850 Node* u = n->fast_out(i); 851 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { 852 uses.push(u); 853 } 854 } 855 } 856 857 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) { 858 OuterStripMinedLoopEndNode* le = inner->outer_loop_end(); 859 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl)); 860 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl)); 861 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt); 862 phase->register_control(new_le, phase->get_loop(le), le->in(0)); 863 phase->lazy_replace(outer, new_outer); 864 phase->lazy_replace(le, new_le); 865 inner->clear_strip_mined(); 866 } 867 868 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl, 869 PhaseIdealLoop* phase, int flags) { 870 PhaseIterGVN& igvn = phase->igvn(); 871 Node* old_ctrl = ctrl; 872 873 Node* thread = new ThreadLocalNode(); 874 Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 875 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset); 876 Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr, 877 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr), 878 TypeInt::BYTE, MemNode::unordered); 879 Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags)); 880 Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT)); 881 Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne); 882 883 IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 884 ctrl = new IfTrueNode(gc_state_iff); 885 test_fail_ctrl = new IfFalseNode(gc_state_iff); 886 887 IdealLoopTree* loop = phase->get_loop(old_ctrl); 888 phase->register_control(gc_state_iff, loop, old_ctrl); 889 phase->register_control(ctrl, loop, gc_state_iff); 890 phase->register_control(test_fail_ctrl, loop, gc_state_iff); 891 892 phase->register_new_node(thread, old_ctrl); 893 phase->register_new_node(gc_state_addr, old_ctrl); 894 phase->register_new_node(gc_state, old_ctrl); 895 phase->register_new_node(gc_state_and, old_ctrl); 896 phase->register_new_node(gc_state_cmp, old_ctrl); 897 phase->register_new_node(gc_state_bool, old_ctrl); 898 899 phase->set_ctrl(gc_state_offset, phase->C->root()); 900 901 assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape"); 902 } 903 904 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { 905 Node* old_ctrl = ctrl; 906 PhaseIterGVN& igvn = phase->igvn(); 907 908 const Type* val_t = igvn.type(val); 909 if (val_t->meet(TypePtr::NULL_PTR) == val_t) { 910 Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT)); 911 Node* null_test = new BoolNode(null_cmp, BoolTest::ne); 912 913 IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 914 ctrl = new IfTrueNode(null_iff); 915 null_ctrl = new IfFalseNode(null_iff); 916 917 IdealLoopTree* loop = phase->get_loop(old_ctrl); 918 phase->register_control(null_iff, loop, old_ctrl); 919 phase->register_control(ctrl, loop, null_iff); 920 phase->register_control(null_ctrl, loop, null_iff); 921 922 phase->register_new_node(null_cmp, old_ctrl); 923 phase->register_new_node(null_test, old_ctrl); 924 } 925 } 926 927 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { 928 Node* old_ctrl = ctrl; 929 PhaseIterGVN& igvn = phase->igvn(); 930 931 Node* raw_val = new CastP2XNode(old_ctrl, val); 932 Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); 933 934 // Figure out the target cset address with raw pointer math. 935 // This avoids matching AddP+LoadB that would emit inefficient code. 936 // See JDK-8245465. 937 Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); 938 Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr); 939 Node* cset_load_addr = new AddXNode(cset_addr, cset_idx); 940 Node* cset_load_ptr = new CastX2PNode(cset_load_addr); 941 942 Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr, 943 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr), 944 TypeInt::BYTE, MemNode::unordered); 945 Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT)); 946 Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne); 947 948 IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); 949 ctrl = new IfTrueNode(cset_iff); 950 not_cset_ctrl = new IfFalseNode(cset_iff); 951 952 IdealLoopTree *loop = phase->get_loop(old_ctrl); 953 phase->register_control(cset_iff, loop, old_ctrl); 954 phase->register_control(ctrl, loop, cset_iff); 955 phase->register_control(not_cset_ctrl, loop, cset_iff); 956 957 phase->set_ctrl(cset_addr_ptr, phase->C->root()); 958 959 phase->register_new_node(raw_val, old_ctrl); 960 phase->register_new_node(cset_idx, old_ctrl); 961 phase->register_new_node(cset_addr, old_ctrl); 962 phase->register_new_node(cset_load_addr, old_ctrl); 963 phase->register_new_node(cset_load_ptr, old_ctrl); 964 phase->register_new_node(cset_load, old_ctrl); 965 phase->register_new_node(cset_cmp, old_ctrl); 966 phase->register_new_node(cset_bool, old_ctrl); 967 } 968 969 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, 970 DecoratorSet decorators, PhaseIdealLoop* phase) { 971 IdealLoopTree*loop = phase->get_loop(ctrl); 972 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr(); 973 974 address calladdr = nullptr; 975 const char* name = nullptr; 976 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators); 977 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators); 978 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators); 979 bool is_native = ShenandoahBarrierSet::is_native_access(decorators); 980 bool is_narrow = UseCompressedOops && !is_native; 981 if (is_strong) { 982 if (is_narrow) { 983 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow); 984 name = "load_reference_barrier_strong_narrow"; 985 } else { 986 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong); 987 name = "load_reference_barrier_strong"; 988 } 989 } else if (is_weak) { 990 if (is_narrow) { 991 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow); 992 name = "load_reference_barrier_weak_narrow"; 993 } else { 994 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak); 995 name = "load_reference_barrier_weak"; 996 } 997 } else { 998 assert(is_phantom, "only remaining strength"); 999 if (is_narrow) { 1000 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow); 1001 name = "load_reference_barrier_phantom_narrow"; 1002 } else { 1003 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom); 1004 name = "load_reference_barrier_phantom"; 1005 } 1006 } 1007 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM); 1008 1009 call->init_req(TypeFunc::Control, ctrl); 1010 call->init_req(TypeFunc::I_O, phase->C->top()); 1011 call->init_req(TypeFunc::Memory, phase->C->top()); 1012 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1013 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1014 call->init_req(TypeFunc::Parms, val); 1015 call->init_req(TypeFunc::Parms+1, load_addr); 1016 phase->register_control(call, loop, ctrl); 1017 ctrl = new ProjNode(call, TypeFunc::Control); 1018 phase->register_control(ctrl, loop, call); 1019 val = new ProjNode(call, TypeFunc::Parms); 1020 phase->register_new_node(val, call); 1021 val = new CheckCastPPNode(ctrl, val, obj_type); 1022 phase->register_new_node(val, ctrl); 1023 } 1024 1025 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { 1026 Node* ctrl = phase->get_ctrl(barrier); 1027 Node* init_raw_mem = fixer.find_mem(ctrl, barrier); 1028 1029 // Update the control of all nodes that should be after the 1030 // barrier control flow 1031 uses.clear(); 1032 // Every node that is control dependent on the barrier's input 1033 // control will be after the expanded barrier. The raw memory (if 1034 // its memory is control dependent on the barrier's input control) 1035 // must stay above the barrier. 1036 uses_to_ignore.clear(); 1037 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { 1038 uses_to_ignore.push(init_raw_mem); 1039 } 1040 for (uint next = 0; next < uses_to_ignore.size(); next++) { 1041 Node *n = uses_to_ignore.at(next); 1042 for (uint i = 0; i < n->req(); i++) { 1043 Node* in = n->in(i); 1044 if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { 1045 uses_to_ignore.push(in); 1046 } 1047 } 1048 } 1049 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { 1050 Node* u = ctrl->fast_out(i); 1051 if (u->_idx < last && 1052 u != barrier && 1053 !uses_to_ignore.member(u) && 1054 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && 1055 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { 1056 Node* old_c = phase->ctrl_or_self(u); 1057 Node* c = old_c; 1058 if (c != ctrl || 1059 is_dominator_same_ctrl(old_c, barrier, u, phase) || 1060 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { 1061 phase->igvn().rehash_node_delayed(u); 1062 int nb = u->replace_edge(ctrl, region, &phase->igvn()); 1063 if (u->is_CFG()) { 1064 if (phase->idom(u) == ctrl) { 1065 phase->set_idom(u, region, phase->dom_depth(region)); 1066 } 1067 } else if (phase->get_ctrl(u) == ctrl) { 1068 assert(u != init_raw_mem, "should leave input raw mem above the barrier"); 1069 uses.push(u); 1070 } 1071 assert(nb == 1, "more than 1 ctrl input?"); 1072 --i, imax -= nb; 1073 } 1074 } 1075 } 1076 } 1077 1078 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) { 1079 Node* region = nullptr; 1080 while (c != ctrl) { 1081 if (c->is_Region()) { 1082 region = c; 1083 } 1084 c = phase->idom(c); 1085 } 1086 assert(region != nullptr, ""); 1087 Node* phi = new PhiNode(region, n->bottom_type()); 1088 for (uint j = 1; j < region->req(); j++) { 1089 Node* in = region->in(j); 1090 if (phase->is_dominator(projs.fallthrough_catchproj, in)) { 1091 phi->init_req(j, n); 1092 } else if (phase->is_dominator(projs.catchall_catchproj, in)) { 1093 phi->init_req(j, n_clone); 1094 } else { 1095 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); 1096 } 1097 } 1098 phase->register_new_node(phi, region); 1099 return phi; 1100 } 1101 1102 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { 1103 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); 1104 1105 Unique_Node_List uses; 1106 for (int i = 0; i < state->iu_barriers_count(); i++) { 1107 Node* barrier = state->iu_barrier(i); 1108 Node* ctrl = phase->get_ctrl(barrier); 1109 IdealLoopTree* loop = phase->get_loop(ctrl); 1110 Node* head = loop->head(); 1111 if (head->is_OuterStripMinedLoop()) { 1112 // Expanding a barrier here will break loop strip mining 1113 // verification. Transform the loop so the loop nest doesn't 1114 // appear as strip mined. 1115 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop(); 1116 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1117 } 1118 } 1119 1120 Node_Stack stack(0); 1121 Node_List clones; 1122 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1123 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1124 1125 Node* ctrl = phase->get_ctrl(lrb); 1126 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1127 1128 CallStaticJavaNode* unc = nullptr; 1129 Node* unc_ctrl = nullptr; 1130 Node* uncasted_val = val; 1131 1132 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { 1133 Node* u = lrb->fast_out(i); 1134 if (u->Opcode() == Op_CastPP && 1135 u->in(0) != nullptr && 1136 phase->is_dominator(u->in(0), ctrl)) { 1137 const Type* u_t = phase->igvn().type(u); 1138 1139 if (u_t->meet(TypePtr::NULL_PTR) != u_t && 1140 u->in(0)->Opcode() == Op_IfTrue && 1141 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && 1142 u->in(0)->in(0)->is_If() && 1143 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && 1144 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && 1145 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && 1146 u->in(0)->in(0)->in(1)->in(1)->in(1) == val && 1147 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { 1148 IdealLoopTree* loop = phase->get_loop(ctrl); 1149 IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); 1150 1151 if (!unc_loop->is_member(loop)) { 1152 continue; 1153 } 1154 1155 Node* branch = no_branches(ctrl, u->in(0), false, phase); 1156 assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch"); 1157 if (branch == NodeSentinel) { 1158 continue; 1159 } 1160 1161 Node* iff = u->in(0)->in(0); 1162 Node* bol = iff->in(1)->clone(); 1163 Node* cmp = bol->in(1)->clone(); 1164 cmp->set_req(1, lrb); 1165 bol->set_req(1, cmp); 1166 phase->igvn().replace_input_of(iff, 1, bol); 1167 phase->set_ctrl(lrb, iff->in(0)); 1168 phase->register_new_node(cmp, iff->in(0)); 1169 phase->register_new_node(bol, iff->in(0)); 1170 break; 1171 } 1172 } 1173 } 1174 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) { 1175 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava(); 1176 if (call->entry_point() == OptoRuntime::rethrow_stub()) { 1177 // The rethrow call may have too many projections to be 1178 // properly handled here. Given there's no reason for a 1179 // barrier to depend on the call, move it above the call 1180 stack.push(lrb, 0); 1181 do { 1182 Node* n = stack.node(); 1183 uint idx = stack.index(); 1184 if (idx < n->req()) { 1185 Node* in = n->in(idx); 1186 stack.set_index(idx+1); 1187 if (in != nullptr) { 1188 if (phase->has_ctrl(in)) { 1189 if (phase->is_dominator(call, phase->get_ctrl(in))) { 1190 #ifdef ASSERT 1191 for (uint i = 0; i < stack.size(); i++) { 1192 assert(stack.node_at(i) != in, "node shouldn't have been seen yet"); 1193 } 1194 #endif 1195 stack.push(in, 0); 1196 } 1197 } else { 1198 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call"); 1199 } 1200 } 1201 } else { 1202 phase->set_ctrl(n, call->in(0)); 1203 stack.pop(); 1204 } 1205 } while(stack.size() > 0); 1206 continue; 1207 } 1208 CallProjections projs; 1209 call->extract_projections(&projs, false, false); 1210 1211 #ifdef ASSERT 1212 VectorSet cloned; 1213 #endif 1214 Node* lrb_clone = lrb->clone(); 1215 phase->register_new_node(lrb_clone, projs.catchall_catchproj); 1216 phase->set_ctrl(lrb, projs.fallthrough_catchproj); 1217 1218 stack.push(lrb, 0); 1219 clones.push(lrb_clone); 1220 1221 do { 1222 assert(stack.size() == clones.size(), ""); 1223 Node* n = stack.node(); 1224 #ifdef ASSERT 1225 if (n->is_Load()) { 1226 Node* mem = n->in(MemNode::Memory); 1227 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { 1228 Node* u = mem->fast_out(j); 1229 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); 1230 } 1231 } 1232 #endif 1233 uint idx = stack.index(); 1234 Node* n_clone = clones.at(clones.size()-1); 1235 if (idx < n->outcnt()) { 1236 Node* u = n->raw_out(idx); 1237 Node* c = phase->ctrl_or_self(u); 1238 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) { 1239 stack.set_index(idx+1); 1240 assert(!u->is_CFG(), ""); 1241 stack.push(u, 0); 1242 assert(!cloned.test_set(u->_idx), "only one clone"); 1243 Node* u_clone = u->clone(); 1244 int nb = u_clone->replace_edge(n, n_clone, &phase->igvn()); 1245 assert(nb > 0, "should have replaced some uses"); 1246 phase->register_new_node(u_clone, projs.catchall_catchproj); 1247 clones.push(u_clone); 1248 phase->set_ctrl(u, projs.fallthrough_catchproj); 1249 } else { 1250 bool replaced = false; 1251 if (u->is_Phi()) { 1252 for (uint k = 1; k < u->req(); k++) { 1253 if (u->in(k) == n) { 1254 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) { 1255 phase->igvn().replace_input_of(u, k, n_clone); 1256 replaced = true; 1257 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) { 1258 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); 1259 replaced = true; 1260 } 1261 } 1262 } 1263 } else { 1264 if (phase->is_dominator(projs.catchall_catchproj, c)) { 1265 phase->igvn().rehash_node_delayed(u); 1266 int nb = u->replace_edge(n, n_clone, &phase->igvn()); 1267 assert(nb > 0, "should have replaced some uses"); 1268 replaced = true; 1269 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) { 1270 if (u->is_If()) { 1271 // Can't break If/Bool/Cmp chain 1272 assert(n->is_Bool(), "unexpected If shape"); 1273 assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape"); 1274 assert(n_clone->is_Bool(), "unexpected clone"); 1275 assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone"); 1276 Node* bol_clone = n->clone(); 1277 Node* cmp_clone = stack.node_at(stack.size()-2)->clone(); 1278 bol_clone->set_req(1, cmp_clone); 1279 1280 Node* nn = stack.node_at(stack.size()-3); 1281 Node* nn_clone = clones.at(clones.size()-3); 1282 assert(nn->Opcode() == nn_clone->Opcode(), "mismatch"); 1283 1284 int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase), 1285 &phase->igvn()); 1286 assert(nb > 0, "should have replaced some uses"); 1287 1288 phase->register_new_node(bol_clone, u->in(0)); 1289 phase->register_new_node(cmp_clone, u->in(0)); 1290 1291 phase->igvn().replace_input_of(u, 1, bol_clone); 1292 1293 } else { 1294 phase->igvn().rehash_node_delayed(u); 1295 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn()); 1296 assert(nb > 0, "should have replaced some uses"); 1297 } 1298 replaced = true; 1299 } 1300 } 1301 if (!replaced) { 1302 stack.set_index(idx+1); 1303 } 1304 } 1305 } else { 1306 stack.pop(); 1307 clones.pop(); 1308 } 1309 } while (stack.size() > 0); 1310 assert(stack.size() == 0 && clones.size() == 0, ""); 1311 } 1312 } 1313 1314 for (int i = 0; i < state->load_reference_barriers_count(); i++) { 1315 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1316 Node* ctrl = phase->get_ctrl(lrb); 1317 IdealLoopTree* loop = phase->get_loop(ctrl); 1318 Node* head = loop->head(); 1319 if (head->is_OuterStripMinedLoop()) { 1320 // Expanding a barrier here will break loop strip mining 1321 // verification. Transform the loop so the loop nest doesn't 1322 // appear as strip mined. 1323 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop(); 1324 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); 1325 } 1326 } 1327 1328 // Expand load-reference-barriers 1329 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); 1330 Unique_Node_List uses_to_ignore; 1331 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { 1332 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); 1333 uint last = phase->C->unique(); 1334 Node* ctrl = phase->get_ctrl(lrb); 1335 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); 1336 1337 Node* orig_ctrl = ctrl; 1338 1339 Node* raw_mem = fixer.find_mem(ctrl, lrb); 1340 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr); 1341 1342 IdealLoopTree *loop = phase->get_loop(ctrl); 1343 1344 Node* heap_stable_ctrl = nullptr; 1345 Node* null_ctrl = nullptr; 1346 1347 assert(val->bottom_type()->make_oopptr(), "need oop"); 1348 assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant"); 1349 1350 enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT }; 1351 Node* region = new RegionNode(PATH_LIMIT); 1352 Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr()); 1353 1354 // Stable path. 1355 int flags = ShenandoahHeap::HAS_FORWARDED; 1356 if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) { 1357 flags |= ShenandoahHeap::WEAK_ROOTS; 1358 } 1359 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags); 1360 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); 1361 1362 // Heap stable case 1363 region->init_req(_heap_stable, heap_stable_ctrl); 1364 val_phi->init_req(_heap_stable, val); 1365 1366 // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null 1367 // even for non-cset objects to prevent resurrection of such objects. 1368 // Wires !in_cset(obj) to slot 2 of region and phis 1369 Node* not_cset_ctrl = nullptr; 1370 if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) { 1371 test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase); 1372 } 1373 if (not_cset_ctrl != nullptr) { 1374 region->init_req(_not_cset, not_cset_ctrl); 1375 val_phi->init_req(_not_cset, val); 1376 } else { 1377 region->del_req(_not_cset); 1378 val_phi->del_req(_not_cset); 1379 } 1380 1381 // Resolve object when orig-value is in cset. 1382 // Make the unconditional resolve for fwdptr. 1383 1384 // Call lrb-stub and wire up that path in slots 4 1385 Node* result_mem = nullptr; 1386 1387 Node* addr; 1388 if (ShenandoahSelfFixing) { 1389 VectorSet visited; 1390 addr = get_load_addr(phase, visited, lrb); 1391 } else { 1392 addr = phase->igvn().zerocon(T_OBJECT); 1393 } 1394 if (addr->Opcode() == Op_AddP) { 1395 Node* orig_base = addr->in(AddPNode::Base); 1396 Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency); 1397 phase->register_new_node(base, ctrl); 1398 if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) { 1399 // Field access 1400 addr = addr->clone(); 1401 addr->set_req(AddPNode::Base, base); 1402 addr->set_req(AddPNode::Address, base); 1403 phase->register_new_node(addr, ctrl); 1404 } else { 1405 Node* addr2 = addr->in(AddPNode::Address); 1406 if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) && 1407 addr2->in(AddPNode::Base) == orig_base) { 1408 addr2 = addr2->clone(); 1409 addr2->set_req(AddPNode::Base, base); 1410 addr2->set_req(AddPNode::Address, base); 1411 phase->register_new_node(addr2, ctrl); 1412 addr = addr->clone(); 1413 addr->set_req(AddPNode::Base, base); 1414 addr->set_req(AddPNode::Address, addr2); 1415 phase->register_new_node(addr, ctrl); 1416 } 1417 } 1418 } 1419 call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase); 1420 region->init_req(_evac_path, ctrl); 1421 val_phi->init_req(_evac_path, val); 1422 1423 phase->register_control(region, loop, heap_stable_iff); 1424 Node* out_val = val_phi; 1425 phase->register_new_node(val_phi, region); 1426 1427 fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); 1428 1429 ctrl = orig_ctrl; 1430 1431 phase->igvn().replace_node(lrb, out_val); 1432 1433 follow_barrier_uses(out_val, ctrl, uses, phase); 1434 1435 for(uint next = 0; next < uses.size(); next++ ) { 1436 Node *n = uses.at(next); 1437 assert(phase->get_ctrl(n) == ctrl, "bad control"); 1438 assert(n != raw_mem, "should leave input raw mem above the barrier"); 1439 phase->set_ctrl(n, region); 1440 follow_barrier_uses(n, ctrl, uses, phase); 1441 } 1442 fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl); 1443 } 1444 // Done expanding load-reference-barriers. 1445 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); 1446 1447 for (int i = state->iu_barriers_count() - 1; i >= 0; i--) { 1448 Node* barrier = state->iu_barrier(i); 1449 Node* pre_val = barrier->in(1); 1450 1451 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { 1452 ShouldNotReachHere(); 1453 continue; 1454 } 1455 1456 Node* ctrl = phase->get_ctrl(barrier); 1457 1458 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { 1459 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move"); 1460 ctrl = ctrl->in(0)->in(0); 1461 phase->set_ctrl(barrier, ctrl); 1462 } else if (ctrl->is_CallRuntime()) { 1463 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move"); 1464 ctrl = ctrl->in(0); 1465 phase->set_ctrl(barrier, ctrl); 1466 } 1467 1468 Node* init_ctrl = ctrl; 1469 IdealLoopTree* loop = phase->get_loop(ctrl); 1470 Node* raw_mem = fixer.find_mem(ctrl, barrier); 1471 Node* init_raw_mem = raw_mem; 1472 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr); 1473 Node* heap_stable_ctrl = nullptr; 1474 Node* null_ctrl = nullptr; 1475 uint last = phase->C->unique(); 1476 1477 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; 1478 Node* region = new RegionNode(PATH_LIMIT); 1479 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1480 1481 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 }; 1482 Node* region2 = new RegionNode(PATH_LIMIT2); 1483 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); 1484 1485 // Stable path. 1486 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING); 1487 region->init_req(_heap_stable, heap_stable_ctrl); 1488 phi->init_req(_heap_stable, raw_mem); 1489 1490 // Null path 1491 Node* reg2_ctrl = nullptr; 1492 test_null(ctrl, pre_val, null_ctrl, phase); 1493 if (null_ctrl != nullptr) { 1494 reg2_ctrl = null_ctrl->in(0); 1495 region2->init_req(_null_path, null_ctrl); 1496 phi2->init_req(_null_path, raw_mem); 1497 } else { 1498 region2->del_req(_null_path); 1499 phi2->del_req(_null_path); 1500 } 1501 1502 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()); 1503 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()); 1504 Node* thread = new ThreadLocalNode(); 1505 phase->register_new_node(thread, ctrl); 1506 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset)); 1507 phase->register_new_node(buffer_adr, ctrl); 1508 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset)); 1509 phase->register_new_node(index_adr, ctrl); 1510 1511 BasicType index_bt = TypeX_X->basic_type(); 1512 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size."); 1513 const TypePtr* adr_type = TypeRawPtr::BOTTOM; 1514 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered); 1515 phase->register_new_node(index, ctrl); 1516 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0)); 1517 phase->register_new_node(index_cmp, ctrl); 1518 Node* index_test = new BoolNode(index_cmp, BoolTest::ne); 1519 phase->register_new_node(index_test, ctrl); 1520 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); 1521 if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff; 1522 phase->register_control(queue_full_iff, loop, ctrl); 1523 Node* not_full = new IfTrueNode(queue_full_iff); 1524 phase->register_control(not_full, loop, queue_full_iff); 1525 Node* full = new IfFalseNode(queue_full_iff); 1526 phase->register_control(full, loop, queue_full_iff); 1527 1528 ctrl = not_full; 1529 1530 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t))); 1531 phase->register_new_node(next_index, ctrl); 1532 1533 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered); 1534 phase->register_new_node(buffer, ctrl); 1535 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index); 1536 phase->register_new_node(log_addr, ctrl); 1537 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered); 1538 phase->register_new_node(log_store, ctrl); 1539 // update the index 1540 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered); 1541 phase->register_new_node(index_update, ctrl); 1542 1543 // Fast-path case 1544 region2->init_req(_fast_path, ctrl); 1545 phi2->init_req(_fast_path, index_update); 1546 1547 ctrl = full; 1548 1549 Node* base = find_bottom_mem(ctrl, phase); 1550 1551 MergeMemNode* mm = MergeMemNode::make(base); 1552 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); 1553 phase->register_new_node(mm, ctrl); 1554 1555 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM); 1556 call->init_req(TypeFunc::Control, ctrl); 1557 call->init_req(TypeFunc::I_O, phase->C->top()); 1558 call->init_req(TypeFunc::Memory, mm); 1559 call->init_req(TypeFunc::FramePtr, phase->C->top()); 1560 call->init_req(TypeFunc::ReturnAdr, phase->C->top()); 1561 call->init_req(TypeFunc::Parms, pre_val); 1562 call->init_req(TypeFunc::Parms+1, thread); 1563 phase->register_control(call, loop, ctrl); 1564 1565 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control); 1566 phase->register_control(ctrl_proj, loop, call); 1567 Node* mem_proj = new ProjNode(call, TypeFunc::Memory); 1568 phase->register_new_node(mem_proj, call); 1569 1570 // Slow-path case 1571 region2->init_req(_slow_path, ctrl_proj); 1572 phi2->init_req(_slow_path, mem_proj); 1573 1574 phase->register_control(region2, loop, reg2_ctrl); 1575 phase->register_new_node(phi2, region2); 1576 1577 region->init_req(_heap_unstable, region2); 1578 phi->init_req(_heap_unstable, phi2); 1579 1580 phase->register_control(region, loop, heap_stable_ctrl->in(0)); 1581 phase->register_new_node(phi, region); 1582 1583 fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase); 1584 for(uint next = 0; next < uses.size(); next++ ) { 1585 Node *n = uses.at(next); 1586 assert(phase->get_ctrl(n) == init_ctrl, "bad control"); 1587 assert(n != init_raw_mem, "should leave input raw mem above the barrier"); 1588 phase->set_ctrl(n, region); 1589 follow_barrier_uses(n, init_ctrl, uses, phase); 1590 } 1591 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses); 1592 1593 phase->igvn().replace_node(barrier, pre_val); 1594 } 1595 assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced"); 1596 1597 } 1598 1599 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { 1600 if (visited.test_set(in->_idx)) { 1601 return nullptr; 1602 } 1603 switch (in->Opcode()) { 1604 case Op_Proj: 1605 return get_load_addr(phase, visited, in->in(0)); 1606 case Op_CastPP: 1607 case Op_CheckCastPP: 1608 case Op_DecodeN: 1609 case Op_EncodeP: 1610 return get_load_addr(phase, visited, in->in(1)); 1611 case Op_LoadN: 1612 case Op_LoadP: 1613 return in->in(MemNode::Address); 1614 case Op_CompareAndExchangeN: 1615 case Op_CompareAndExchangeP: 1616 case Op_GetAndSetN: 1617 case Op_GetAndSetP: 1618 case Op_ShenandoahCompareAndExchangeP: 1619 case Op_ShenandoahCompareAndExchangeN: 1620 // Those instructions would just have stored a different 1621 // value into the field. No use to attempt to fix it at this point. 1622 return phase->igvn().zerocon(T_OBJECT); 1623 case Op_CMoveP: 1624 case Op_CMoveN: { 1625 Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue)); 1626 Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse)); 1627 // Handle unambiguous cases: single address reported on both branches. 1628 if (t != nullptr && f == nullptr) return t; 1629 if (t == nullptr && f != nullptr) return f; 1630 if (t != nullptr && t == f) return t; 1631 // Ambiguity. 1632 return phase->igvn().zerocon(T_OBJECT); 1633 } 1634 case Op_Phi: { 1635 Node* addr = nullptr; 1636 for (uint i = 1; i < in->req(); i++) { 1637 Node* addr1 = get_load_addr(phase, visited, in->in(i)); 1638 if (addr == nullptr) { 1639 addr = addr1; 1640 } 1641 if (addr != addr1) { 1642 return phase->igvn().zerocon(T_OBJECT); 1643 } 1644 } 1645 return addr; 1646 } 1647 case Op_ShenandoahLoadReferenceBarrier: 1648 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); 1649 case Op_ShenandoahIUBarrier: 1650 return get_load_addr(phase, visited, in->in(1)); 1651 case Op_CallDynamicJava: 1652 case Op_CallLeaf: 1653 case Op_CallStaticJava: 1654 case Op_ConN: 1655 case Op_ConP: 1656 case Op_Parm: 1657 case Op_CreateEx: 1658 return phase->igvn().zerocon(T_OBJECT); 1659 default: 1660 #ifdef ASSERT 1661 fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]); 1662 #endif 1663 return phase->igvn().zerocon(T_OBJECT); 1664 } 1665 1666 } 1667 1668 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { 1669 IdealLoopTree *loop = phase->get_loop(iff); 1670 Node* loop_head = loop->_head; 1671 Node* entry_c = loop_head->in(LoopNode::EntryControl); 1672 1673 Node* bol = iff->in(1); 1674 Node* cmp = bol->in(1); 1675 Node* andi = cmp->in(1); 1676 Node* load = andi->in(1); 1677 1678 assert(is_gc_state_load(load), "broken"); 1679 if (!phase->is_dominator(load->in(0), entry_c)) { 1680 Node* mem_ctrl = nullptr; 1681 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); 1682 load = load->clone(); 1683 load->set_req(MemNode::Memory, mem); 1684 load->set_req(0, entry_c); 1685 phase->register_new_node(load, entry_c); 1686 andi = andi->clone(); 1687 andi->set_req(1, load); 1688 phase->register_new_node(andi, entry_c); 1689 cmp = cmp->clone(); 1690 cmp->set_req(1, andi); 1691 phase->register_new_node(cmp, entry_c); 1692 bol = bol->clone(); 1693 bol->set_req(1, cmp); 1694 phase->register_new_node(bol, entry_c); 1695 1696 phase->igvn().replace_input_of(iff, 1, bol); 1697 } 1698 } 1699 1700 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { 1701 if (!n->is_If() || n->is_CountedLoopEnd()) { 1702 return false; 1703 } 1704 Node* region = n->in(0); 1705 1706 if (!region->is_Region()) { 1707 return false; 1708 } 1709 Node* dom = phase->idom(region); 1710 if (!dom->is_If()) { 1711 return false; 1712 } 1713 1714 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { 1715 return false; 1716 } 1717 1718 IfNode* dom_if = dom->as_If(); 1719 Node* proj_true = dom_if->proj_out(1); 1720 Node* proj_false = dom_if->proj_out(0); 1721 1722 for (uint i = 1; i < region->req(); i++) { 1723 if (phase->is_dominator(proj_true, region->in(i))) { 1724 continue; 1725 } 1726 if (phase->is_dominator(proj_false, region->in(i))) { 1727 continue; 1728 } 1729 return false; 1730 } 1731 1732 return true; 1733 } 1734 1735 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { 1736 assert(is_heap_stable_test(n), "no other tests"); 1737 if (identical_backtoback_ifs(n, phase)) { 1738 Node* n_ctrl = n->in(0); 1739 if (phase->can_split_if(n_ctrl)) { 1740 IfNode* dom_if = phase->idom(n_ctrl)->as_If(); 1741 if (is_heap_stable_test(n)) { 1742 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); 1743 assert(is_gc_state_load(gc_state_load), "broken"); 1744 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); 1745 assert(is_gc_state_load(dom_gc_state_load), "broken"); 1746 if (gc_state_load != dom_gc_state_load) { 1747 phase->igvn().replace_node(gc_state_load, dom_gc_state_load); 1748 } 1749 } 1750 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); 1751 Node* proj_true = dom_if->proj_out(1); 1752 Node* proj_false = dom_if->proj_out(0); 1753 Node* con_true = phase->igvn().makecon(TypeInt::ONE); 1754 Node* con_false = phase->igvn().makecon(TypeInt::ZERO); 1755 1756 for (uint i = 1; i < n_ctrl->req(); i++) { 1757 if (phase->is_dominator(proj_true, n_ctrl->in(i))) { 1758 bolphi->init_req(i, con_true); 1759 } else { 1760 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); 1761 bolphi->init_req(i, con_false); 1762 } 1763 } 1764 phase->register_new_node(bolphi, n_ctrl); 1765 phase->igvn().replace_input_of(n, 1, bolphi); 1766 phase->do_split_if(n); 1767 } 1768 } 1769 } 1770 1771 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { 1772 // Find first invariant test that doesn't exit the loop 1773 LoopNode *head = loop->_head->as_Loop(); 1774 IfNode* unswitch_iff = nullptr; 1775 Node* n = head->in(LoopNode::LoopBackControl); 1776 int loop_has_sfpts = -1; 1777 while (n != head) { 1778 Node* n_dom = phase->idom(n); 1779 if (n->is_Region()) { 1780 if (n_dom->is_If()) { 1781 IfNode* iff = n_dom->as_If(); 1782 if (iff->in(1)->is_Bool()) { 1783 BoolNode* bol = iff->in(1)->as_Bool(); 1784 if (bol->in(1)->is_Cmp()) { 1785 // If condition is invariant and not a loop exit, 1786 // then found reason to unswitch. 1787 if (is_heap_stable_test(iff) && 1788 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) { 1789 assert(!loop->is_loop_exit(iff), "both branches should be in the loop"); 1790 if (loop_has_sfpts == -1) { 1791 for(uint i = 0; i < loop->_body.size(); i++) { 1792 Node *m = loop->_body[i]; 1793 if (m->is_SafePoint() && !m->is_CallLeaf()) { 1794 loop_has_sfpts = 1; 1795 break; 1796 } 1797 } 1798 if (loop_has_sfpts == -1) { 1799 loop_has_sfpts = 0; 1800 } 1801 } 1802 if (!loop_has_sfpts) { 1803 unswitch_iff = iff; 1804 } 1805 } 1806 } 1807 } 1808 } 1809 } 1810 n = n_dom; 1811 } 1812 return unswitch_iff; 1813 } 1814 1815 1816 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { 1817 Node_List heap_stable_tests; 1818 stack.push(phase->C->start(), 0); 1819 do { 1820 Node* n = stack.node(); 1821 uint i = stack.index(); 1822 1823 if (i < n->outcnt()) { 1824 Node* u = n->raw_out(i); 1825 stack.set_index(i+1); 1826 if (!visited.test_set(u->_idx)) { 1827 stack.push(u, 0); 1828 } 1829 } else { 1830 stack.pop(); 1831 if (n->is_If() && is_heap_stable_test(n)) { 1832 heap_stable_tests.push(n); 1833 } 1834 } 1835 } while (stack.size() > 0); 1836 1837 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1838 Node* n = heap_stable_tests.at(i); 1839 assert(is_heap_stable_test(n), "only evacuation test"); 1840 merge_back_to_back_tests(n, phase); 1841 } 1842 1843 if (!phase->C->major_progress()) { 1844 VectorSet seen; 1845 for (uint i = 0; i < heap_stable_tests.size(); i++) { 1846 Node* n = heap_stable_tests.at(i); 1847 IdealLoopTree* loop = phase->get_loop(n); 1848 if (loop != phase->ltree_root() && 1849 loop->_child == nullptr && 1850 !loop->_irreducible) { 1851 Node* head = loop->_head; 1852 if (head->is_Loop() && 1853 (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && 1854 !seen.test_set(head->_idx)) { 1855 IfNode* iff = find_unswitching_candidate(loop, phase); 1856 if (iff != nullptr) { 1857 Node* bol = iff->in(1); 1858 if (head->as_Loop()->is_strip_mined()) { 1859 head->as_Loop()->verify_strip_mined(0); 1860 } 1861 move_gc_state_test_out_of_loop(iff, phase); 1862 1863 AutoNodeBudget node_budget(phase); 1864 1865 if (loop->policy_unswitching(phase)) { 1866 if (head->as_Loop()->is_strip_mined()) { 1867 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop(); 1868 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase); 1869 } 1870 phase->do_unswitching(loop, old_new); 1871 } else { 1872 // Not proceeding with unswitching. Move load back in 1873 // the loop. 1874 phase->igvn().replace_input_of(iff, 1, bol); 1875 } 1876 } 1877 } 1878 } 1879 } 1880 } 1881 } 1882 1883 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) { 1884 ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this); 1885 } 1886 1887 const Type* ShenandoahIUBarrierNode::bottom_type() const { 1888 if (in(1) == nullptr || in(1)->is_top()) { 1889 return Type::TOP; 1890 } 1891 const Type* t = in(1)->bottom_type(); 1892 if (t == TypePtr::NULL_PTR) { 1893 return t; 1894 } 1895 return t->is_oopptr(); 1896 } 1897 1898 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const { 1899 if (in(1) == nullptr) { 1900 return Type::TOP; 1901 } 1902 const Type* t = phase->type(in(1)); 1903 if (t == Type::TOP) { 1904 return Type::TOP; 1905 } 1906 if (t == TypePtr::NULL_PTR) { 1907 return t; 1908 } 1909 return t->is_oopptr(); 1910 } 1911 1912 int ShenandoahIUBarrierNode::needed(Node* n) { 1913 if (n == nullptr || 1914 n->is_Allocate() || 1915 n->Opcode() == Op_ShenandoahIUBarrier || 1916 n->bottom_type() == TypePtr::NULL_PTR || 1917 (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) { 1918 return NotNeeded; 1919 } 1920 if (n->is_Phi() || 1921 n->is_CMove()) { 1922 return MaybeNeeded; 1923 } 1924 return Needed; 1925 } 1926 1927 Node* ShenandoahIUBarrierNode::next(Node* n) { 1928 for (;;) { 1929 if (n == nullptr) { 1930 return n; 1931 } else if (n->bottom_type() == TypePtr::NULL_PTR) { 1932 return n; 1933 } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) { 1934 return n; 1935 } else if (n->is_ConstraintCast() || 1936 n->Opcode() == Op_DecodeN || 1937 n->Opcode() == Op_EncodeP) { 1938 n = n->in(1); 1939 } else if (n->is_Proj()) { 1940 n = n->in(0); 1941 } else { 1942 return n; 1943 } 1944 } 1945 ShouldNotReachHere(); 1946 return nullptr; 1947 } 1948 1949 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) { 1950 PhaseIterGVN* igvn = phase->is_IterGVN(); 1951 1952 Node* n = next(in(1)); 1953 1954 int cont = needed(n); 1955 1956 if (cont == NotNeeded) { 1957 return in(1); 1958 } else if (cont == MaybeNeeded) { 1959 if (igvn == nullptr) { 1960 phase->record_for_igvn(this); 1961 return this; 1962 } else { 1963 ResourceMark rm; 1964 Unique_Node_List wq; 1965 uint wq_i = 0; 1966 1967 for (;;) { 1968 if (n->is_Phi()) { 1969 for (uint i = 1; i < n->req(); i++) { 1970 Node* m = n->in(i); 1971 if (m != nullptr) { 1972 wq.push(m); 1973 } 1974 } 1975 } else { 1976 assert(n->is_CMove(), "nothing else here"); 1977 Node* m = n->in(CMoveNode::IfFalse); 1978 wq.push(m); 1979 m = n->in(CMoveNode::IfTrue); 1980 wq.push(m); 1981 } 1982 Node* orig_n = nullptr; 1983 do { 1984 if (wq_i >= wq.size()) { 1985 return in(1); 1986 } 1987 n = wq.at(wq_i); 1988 wq_i++; 1989 orig_n = n; 1990 n = next(n); 1991 cont = needed(n); 1992 if (cont == Needed) { 1993 return this; 1994 } 1995 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n))); 1996 } 1997 } 1998 } 1999 2000 return this; 2001 } 2002 2003 #ifdef ASSERT 2004 static bool has_never_branch(Node* root) { 2005 for (uint i = 1; i < root->req(); i++) { 2006 Node* in = root->in(i); 2007 if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) { 2008 return true; 2009 } 2010 } 2011 return false; 2012 } 2013 #endif 2014 2015 void MemoryGraphFixer::collect_memory_nodes() { 2016 Node_Stack stack(0); 2017 VectorSet visited; 2018 Node_List regions; 2019 2020 // Walk the raw memory graph and create a mapping from CFG node to 2021 // memory node. Exclude phis for now. 2022 stack.push(_phase->C->root(), 1); 2023 do { 2024 Node* n = stack.node(); 2025 int opc = n->Opcode(); 2026 uint i = stack.index(); 2027 if (i < n->req()) { 2028 Node* mem = nullptr; 2029 if (opc == Op_Root) { 2030 Node* in = n->in(i); 2031 int in_opc = in->Opcode(); 2032 if (in_opc == Op_Return || in_opc == Op_Rethrow) { 2033 mem = in->in(TypeFunc::Memory); 2034 } else if (in_opc == Op_Halt) { 2035 if (in->in(0)->is_Region()) { 2036 Node* r = in->in(0); 2037 for (uint j = 1; j < r->req(); j++) { 2038 assert(!r->in(j)->is_NeverBranch(), ""); 2039 } 2040 } else { 2041 Node* proj = in->in(0); 2042 assert(proj->is_Proj(), ""); 2043 Node* in = proj->in(0); 2044 assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); 2045 if (in->is_CallStaticJava()) { 2046 mem = in->in(TypeFunc::Memory); 2047 } else if (in->Opcode() == Op_Catch) { 2048 Node* call = in->in(0)->in(0); 2049 assert(call->is_Call(), ""); 2050 mem = call->in(TypeFunc::Memory); 2051 } else if (in->is_NeverBranch()) { 2052 mem = collect_memory_for_infinite_loop(in); 2053 } 2054 } 2055 } else { 2056 #ifdef ASSERT 2057 n->dump(); 2058 in->dump(); 2059 #endif 2060 ShouldNotReachHere(); 2061 } 2062 } else { 2063 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); 2064 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); 2065 mem = n->in(i); 2066 } 2067 i++; 2068 stack.set_index(i); 2069 if (mem == nullptr) { 2070 continue; 2071 } 2072 for (;;) { 2073 if (visited.test_set(mem->_idx) || mem->is_Start()) { 2074 break; 2075 } 2076 if (mem->is_Phi()) { 2077 stack.push(mem, 2); 2078 mem = mem->in(1); 2079 } else if (mem->is_Proj()) { 2080 stack.push(mem, mem->req()); 2081 mem = mem->in(0); 2082 } else if (mem->is_SafePoint() || mem->is_MemBar()) { 2083 mem = mem->in(TypeFunc::Memory); 2084 } else if (mem->is_MergeMem()) { 2085 MergeMemNode* mm = mem->as_MergeMem(); 2086 mem = mm->memory_at(_alias); 2087 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { 2088 assert(_alias == Compile::AliasIdxRaw, ""); 2089 stack.push(mem, mem->req()); 2090 mem = mem->in(MemNode::Memory); 2091 } else { 2092 #ifdef ASSERT 2093 mem->dump(); 2094 #endif 2095 ShouldNotReachHere(); 2096 } 2097 } 2098 } else { 2099 if (n->is_Phi()) { 2100 // Nothing 2101 } else if (!n->is_Root()) { 2102 Node* c = get_ctrl(n); 2103 _memory_nodes.map(c->_idx, n); 2104 } 2105 stack.pop(); 2106 } 2107 } while(stack.is_nonempty()); 2108 2109 // Iterate over CFG nodes in rpo and propagate memory state to 2110 // compute memory state at regions, creating new phis if needed. 2111 Node_List rpo_list; 2112 visited.clear(); 2113 _phase->rpo(_phase->C->root(), stack, visited, rpo_list); 2114 Node* root = rpo_list.pop(); 2115 assert(root == _phase->C->root(), ""); 2116 2117 const bool trace = false; 2118 #ifdef ASSERT 2119 if (trace) { 2120 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2121 Node* c = rpo_list.at(i); 2122 if (_memory_nodes[c->_idx] != nullptr) { 2123 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); 2124 } 2125 } 2126 } 2127 #endif 2128 uint last = _phase->C->unique(); 2129 2130 #ifdef ASSERT 2131 uint16_t max_depth = 0; 2132 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { 2133 IdealLoopTree* lpt = iter.current(); 2134 max_depth = MAX2(max_depth, lpt->_nest); 2135 } 2136 #endif 2137 2138 bool progress = true; 2139 int iteration = 0; 2140 Node_List dead_phis; 2141 while (progress) { 2142 progress = false; 2143 iteration++; 2144 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); 2145 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } 2146 2147 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2148 Node* c = rpo_list.at(i); 2149 2150 Node* prev_mem = _memory_nodes[c->_idx]; 2151 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2152 Node* prev_region = regions[c->_idx]; 2153 Node* unique = nullptr; 2154 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { 2155 Node* m = _memory_nodes[c->in(j)->_idx]; 2156 assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); 2157 if (m != nullptr) { 2158 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { 2159 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); 2160 // continue 2161 } else if (unique == nullptr) { 2162 unique = m; 2163 } else if (m == unique) { 2164 // continue 2165 } else { 2166 unique = NodeSentinel; 2167 } 2168 } 2169 } 2170 assert(unique != nullptr, "empty phi???"); 2171 if (unique != NodeSentinel) { 2172 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) { 2173 dead_phis.push(prev_region); 2174 } 2175 regions.map(c->_idx, unique); 2176 } else { 2177 Node* phi = nullptr; 2178 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { 2179 phi = prev_region; 2180 for (uint k = 1; k < c->req(); k++) { 2181 Node* m = _memory_nodes[c->in(k)->_idx]; 2182 assert(m != nullptr, "expect memory state"); 2183 phi->set_req(k, m); 2184 } 2185 } else { 2186 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) { 2187 Node* u = c->fast_out(j); 2188 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2189 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { 2190 phi = u; 2191 for (uint k = 1; k < c->req() && phi != nullptr; k++) { 2192 Node* m = _memory_nodes[c->in(k)->_idx]; 2193 assert(m != nullptr, "expect memory state"); 2194 if (u->in(k) != m) { 2195 phi = NodeSentinel; 2196 } 2197 } 2198 } 2199 } 2200 if (phi == NodeSentinel) { 2201 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2202 for (uint k = 1; k < c->req(); k++) { 2203 Node* m = _memory_nodes[c->in(k)->_idx]; 2204 assert(m != nullptr, "expect memory state"); 2205 phi->init_req(k, m); 2206 } 2207 } 2208 } 2209 if (phi != nullptr) { 2210 regions.map(c->_idx, phi); 2211 } else { 2212 assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2213 } 2214 } 2215 Node* current_region = regions[c->_idx]; 2216 if (current_region != prev_region) { 2217 progress = true; 2218 if (prev_region == prev_mem) { 2219 _memory_nodes.map(c->_idx, current_region); 2220 } 2221 } 2222 } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { 2223 Node* m = _memory_nodes[_phase->idom(c)->_idx]; 2224 assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state"); 2225 if (m != prev_mem) { 2226 _memory_nodes.map(c->_idx, m); 2227 progress = true; 2228 } 2229 } 2230 #ifdef ASSERT 2231 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } 2232 #endif 2233 } 2234 } 2235 2236 // Replace existing phi with computed memory state for that region 2237 // if different (could be a new phi or a dominating memory node if 2238 // that phi was found to be useless). 2239 while (dead_phis.size() > 0) { 2240 Node* n = dead_phis.pop(); 2241 n->replace_by(_phase->C->top()); 2242 n->destruct(&_phase->igvn()); 2243 } 2244 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2245 Node* c = rpo_list.at(i); 2246 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2247 Node* n = regions[c->_idx]; 2248 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2249 if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) { 2250 _phase->register_new_node(n, c); 2251 } 2252 } 2253 } 2254 for (int i = rpo_list.size() - 1; i >= 0; i--) { 2255 Node* c = rpo_list.at(i); 2256 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) { 2257 Node* n = regions[c->_idx]; 2258 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state"); 2259 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { 2260 Node* u = c->fast_out(i); 2261 if (u->is_Phi() && u->bottom_type() == Type::MEMORY && 2262 u != n) { 2263 assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state"); 2264 if (u->adr_type() == TypePtr::BOTTOM) { 2265 fix_memory_uses(u, n, n, c); 2266 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2267 _phase->lazy_replace(u, n); 2268 --i; --imax; 2269 } 2270 } 2271 } 2272 } 2273 } 2274 } 2275 2276 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) { 2277 Node* mem = nullptr; 2278 Node* head = in->in(0); 2279 assert(head->is_Region(), "unexpected infinite loop graph shape"); 2280 2281 Node* phi_mem = nullptr; 2282 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 2283 Node* u = head->fast_out(j); 2284 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { 2285 if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2286 assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, ""); 2287 phi_mem = u; 2288 } else if (u->adr_type() == TypePtr::BOTTOM) { 2289 assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, ""); 2290 if (phi_mem == nullptr) { 2291 phi_mem = u; 2292 } 2293 } 2294 } 2295 } 2296 if (phi_mem == nullptr) { 2297 ResourceMark rm; 2298 Node_Stack stack(0); 2299 stack.push(head, 1); 2300 do { 2301 Node* n = stack.node(); 2302 uint i = stack.index(); 2303 if (i >= n->req()) { 2304 stack.pop(); 2305 } else { 2306 stack.set_index(i + 1); 2307 Node* c = n->in(i); 2308 assert(c != head, "should have found a safepoint on the way"); 2309 if (stack.size() != 1 || _phase->is_dominator(head, c)) { 2310 for (;;) { 2311 if (c->is_Region()) { 2312 stack.push(c, 1); 2313 break; 2314 } else if (c->is_SafePoint() && !c->is_CallLeaf()) { 2315 Node* m = c->in(TypeFunc::Memory); 2316 if (m->is_MergeMem()) { 2317 m = m->as_MergeMem()->memory_at(_alias); 2318 } 2319 assert(mem == nullptr || mem == m, "several memory states"); 2320 mem = m; 2321 break; 2322 } else { 2323 assert(c != c->in(0), ""); 2324 c = c->in(0); 2325 } 2326 } 2327 } 2328 } 2329 } while (stack.size() > 0); 2330 assert(mem != nullptr, "should have found safepoint"); 2331 } else { 2332 mem = phi_mem; 2333 } 2334 return mem; 2335 } 2336 2337 Node* MemoryGraphFixer::get_ctrl(Node* n) const { 2338 Node* c = _phase->get_ctrl(n); 2339 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) { 2340 assert(c == n->in(0), ""); 2341 CallNode* call = c->as_Call(); 2342 CallProjections projs; 2343 call->extract_projections(&projs, true, false); 2344 if (projs.catchall_memproj != nullptr) { 2345 if (projs.fallthrough_memproj == n) { 2346 c = projs.fallthrough_catchproj; 2347 } else { 2348 assert(projs.catchall_memproj == n, ""); 2349 c = projs.catchall_catchproj; 2350 } 2351 } 2352 } 2353 return c; 2354 } 2355 2356 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { 2357 if (_phase->has_ctrl(n)) 2358 return get_ctrl(n); 2359 else { 2360 assert (n->is_CFG(), "must be a CFG node"); 2361 return n; 2362 } 2363 } 2364 2365 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { 2366 return m != nullptr && get_ctrl(m) == c; 2367 } 2368 2369 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { 2370 assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, ""); 2371 assert(!ctrl->is_Call() || ctrl == n, "projection expected"); 2372 #ifdef ASSERT 2373 if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) || 2374 (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) { 2375 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call(); 2376 int mems = 0; 2377 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { 2378 Node* u = call->fast_out(i); 2379 if (u->bottom_type() == Type::MEMORY) { 2380 mems++; 2381 } 2382 } 2383 assert(mems <= 1, "No node right after call if multiple mem projections"); 2384 } 2385 #endif 2386 Node* mem = _memory_nodes[ctrl->_idx]; 2387 Node* c = ctrl; 2388 while (!mem_is_valid(mem, c) && 2389 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { 2390 c = _phase->idom(c); 2391 mem = _memory_nodes[c->_idx]; 2392 } 2393 if (n != nullptr && mem_is_valid(mem, c)) { 2394 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { 2395 mem = next_mem(mem, _alias); 2396 } 2397 if (mem->is_MergeMem()) { 2398 mem = mem->as_MergeMem()->memory_at(_alias); 2399 } 2400 if (!mem_is_valid(mem, c)) { 2401 do { 2402 c = _phase->idom(c); 2403 mem = _memory_nodes[c->_idx]; 2404 } while (!mem_is_valid(mem, c) && 2405 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))); 2406 } 2407 } 2408 assert(mem->bottom_type() == Type::MEMORY, ""); 2409 return mem; 2410 } 2411 2412 bool MemoryGraphFixer::has_mem_phi(Node* region) const { 2413 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2414 Node* use = region->fast_out(i); 2415 if (use->is_Phi() && use->bottom_type() == Type::MEMORY && 2416 (_phase->C->get_alias_index(use->adr_type()) == _alias)) { 2417 return true; 2418 } 2419 } 2420 return false; 2421 } 2422 2423 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { 2424 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); 2425 const bool trace = false; 2426 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); 2427 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); 2428 GrowableArray<Node*> phis; 2429 if (mem_for_ctrl != mem) { 2430 Node* old = mem_for_ctrl; 2431 Node* prev = nullptr; 2432 while (old != mem) { 2433 prev = old; 2434 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { 2435 assert(_alias == Compile::AliasIdxRaw, ""); 2436 old = old->in(MemNode::Memory); 2437 } else if (old->Opcode() == Op_SCMemProj) { 2438 assert(_alias == Compile::AliasIdxRaw, ""); 2439 old = old->in(0); 2440 } else { 2441 ShouldNotReachHere(); 2442 } 2443 } 2444 assert(prev != nullptr, ""); 2445 if (new_ctrl != ctrl) { 2446 _memory_nodes.map(ctrl->_idx, mem); 2447 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 2448 } 2449 uint input = (uint)MemNode::Memory; 2450 _phase->igvn().replace_input_of(prev, input, new_mem); 2451 } else { 2452 uses.clear(); 2453 _memory_nodes.map(new_ctrl->_idx, new_mem); 2454 uses.push(new_ctrl); 2455 for(uint next = 0; next < uses.size(); next++ ) { 2456 Node *n = uses.at(next); 2457 assert(n->is_CFG(), ""); 2458 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); 2459 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2460 Node* u = n->fast_out(i); 2461 if (!u->is_Root() && u->is_CFG() && u != n) { 2462 Node* m = _memory_nodes[u->_idx]; 2463 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) && 2464 !has_mem_phi(u) && 2465 u->unique_ctrl_out()->Opcode() != Op_Halt) { 2466 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); 2467 DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); }); 2468 2469 if (!mem_is_valid(m, u) || !m->is_Phi()) { 2470 bool push = true; 2471 bool create_phi = true; 2472 if (_phase->is_dominator(new_ctrl, u)) { 2473 create_phi = false; 2474 } 2475 if (create_phi) { 2476 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); 2477 _phase->register_new_node(phi, u); 2478 phis.push(phi); 2479 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); 2480 if (!mem_is_valid(m, u)) { 2481 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); 2482 _memory_nodes.map(u->_idx, phi); 2483 } else { 2484 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); 2485 for (;;) { 2486 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); 2487 Node* next = nullptr; 2488 if (m->is_Proj()) { 2489 next = m->in(0); 2490 } else { 2491 assert(m->is_Mem() || m->is_LoadStore(), ""); 2492 assert(_alias == Compile::AliasIdxRaw, ""); 2493 next = m->in(MemNode::Memory); 2494 } 2495 if (_phase->get_ctrl(next) != u) { 2496 break; 2497 } 2498 if (next->is_MergeMem()) { 2499 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); 2500 break; 2501 } 2502 if (next->is_Phi()) { 2503 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); 2504 break; 2505 } 2506 m = next; 2507 } 2508 2509 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); 2510 assert(m->is_Mem() || m->is_LoadStore(), ""); 2511 uint input = (uint)MemNode::Memory; 2512 _phase->igvn().replace_input_of(m, input, phi); 2513 push = false; 2514 } 2515 } else { 2516 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); 2517 } 2518 if (push) { 2519 uses.push(u); 2520 } 2521 } 2522 } else if (!mem_is_valid(m, u) && 2523 !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) { 2524 uses.push(u); 2525 } 2526 } 2527 } 2528 } 2529 for (int i = 0; i < phis.length(); i++) { 2530 Node* n = phis.at(i); 2531 Node* r = n->in(0); 2532 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); 2533 for (uint j = 1; j < n->req(); j++) { 2534 Node* m = find_mem(r->in(j), nullptr); 2535 _phase->igvn().replace_input_of(n, j, m); 2536 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); 2537 } 2538 } 2539 } 2540 uint last = _phase->C->unique(); 2541 MergeMemNode* mm = nullptr; 2542 int alias = _alias; 2543 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); 2544 // Process loads first to not miss an anti-dependency: if the memory 2545 // edge of a store is updated before a load is processed then an 2546 // anti-dependency may be missed. 2547 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2548 Node* u = mem->out(i); 2549 if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) { 2550 Node* m = find_mem(_phase->get_ctrl(u), u); 2551 if (m != mem) { 2552 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2553 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 2554 --i; 2555 } 2556 } 2557 } 2558 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2559 Node* u = mem->out(i); 2560 if (u->_idx < last) { 2561 if (u->is_Mem()) { 2562 if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2563 Node* m = find_mem(_phase->get_ctrl(u), u); 2564 if (m != mem) { 2565 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2566 _phase->igvn().replace_input_of(u, MemNode::Memory, m); 2567 --i; 2568 } 2569 } 2570 } else if (u->is_MergeMem()) { 2571 MergeMemNode* u_mm = u->as_MergeMem(); 2572 if (u_mm->memory_at(alias) == mem) { 2573 MergeMemNode* newmm = nullptr; 2574 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2575 Node* uu = u->fast_out(j); 2576 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2577 if (uu->is_Phi()) { 2578 assert(uu->adr_type() == TypePtr::BOTTOM, ""); 2579 Node* region = uu->in(0); 2580 int nb = 0; 2581 for (uint k = 1; k < uu->req(); k++) { 2582 if (uu->in(k) == u) { 2583 Node* m = find_mem(region->in(k), nullptr); 2584 if (m != mem) { 2585 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); 2586 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2587 if (newmm != u) { 2588 _phase->igvn().replace_input_of(uu, k, newmm); 2589 nb++; 2590 --jmax; 2591 } 2592 } 2593 } 2594 } 2595 if (nb > 0) { 2596 --j; 2597 } 2598 } else { 2599 Node* m = find_mem(_phase->ctrl_or_self(uu), uu); 2600 if (m != mem) { 2601 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); 2602 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); 2603 if (newmm != u) { 2604 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2605 --j, --jmax; 2606 } 2607 } 2608 } 2609 } 2610 } 2611 } else if (u->is_Phi()) { 2612 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2613 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { 2614 Node* region = u->in(0); 2615 bool replaced = false; 2616 for (uint j = 1; j < u->req(); j++) { 2617 if (u->in(j) == mem) { 2618 Node* m = find_mem(region->in(j), nullptr); 2619 Node* nnew = m; 2620 if (m != mem) { 2621 if (u->adr_type() == TypePtr::BOTTOM) { 2622 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); 2623 nnew = mm; 2624 } 2625 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); 2626 _phase->igvn().replace_input_of(u, j, nnew); 2627 replaced = true; 2628 } 2629 } 2630 } 2631 if (replaced) { 2632 --i; 2633 } 2634 } 2635 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 2636 u->adr_type() == nullptr) { 2637 assert(u->adr_type() != nullptr || 2638 u->Opcode() == Op_Rethrow || 2639 u->Opcode() == Op_Return || 2640 u->Opcode() == Op_SafePoint || 2641 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 2642 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 2643 u->Opcode() == Op_CallLeaf, ""); 2644 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2645 if (m != mem) { 2646 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); 2647 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 2648 --i; 2649 } 2650 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { 2651 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2652 if (m != mem) { 2653 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2654 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2655 --i; 2656 } 2657 } else if (u->adr_type() != TypePtr::BOTTOM && 2658 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { 2659 Node* m = find_mem(_phase->ctrl_or_self(u), u); 2660 assert(m != mem, ""); 2661 // u is on the wrong slice... 2662 assert(u->is_ClearArray(), ""); 2663 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); 2664 _phase->igvn().replace_input_of(u, u->find_edge(mem), m); 2665 --i; 2666 } 2667 } 2668 } 2669 #ifdef ASSERT 2670 assert(new_mem->outcnt() > 0, ""); 2671 for (int i = 0; i < phis.length(); i++) { 2672 Node* n = phis.at(i); 2673 assert(n->outcnt() > 0, "new phi must have uses now"); 2674 } 2675 #endif 2676 } 2677 2678 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) { 2679 if (mem_for_ctrl != mem && new_ctrl != ctrl) { 2680 _memory_nodes.map(ctrl->_idx, mem); 2681 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); 2682 } 2683 } 2684 2685 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { 2686 MergeMemNode* mm = MergeMemNode::make(mem); 2687 mm->set_memory_at(_alias, rep_proj); 2688 _phase->register_new_node(mm, rep_ctrl); 2689 return mm; 2690 } 2691 2692 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { 2693 MergeMemNode* newmm = nullptr; 2694 MergeMemNode* u_mm = u->as_MergeMem(); 2695 Node* c = _phase->get_ctrl(u); 2696 if (_phase->is_dominator(c, rep_ctrl)) { 2697 c = rep_ctrl; 2698 } else { 2699 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); 2700 } 2701 if (u->outcnt() == 1) { 2702 if (u->req() > (uint)_alias && u->in(_alias) == mem) { 2703 _phase->igvn().replace_input_of(u, _alias, rep_proj); 2704 --i; 2705 } else { 2706 _phase->igvn().rehash_node_delayed(u); 2707 u_mm->set_memory_at(_alias, rep_proj); 2708 } 2709 newmm = u_mm; 2710 _phase->set_ctrl_and_loop(u, c); 2711 } else { 2712 // can't simply clone u and then change one of its input because 2713 // it adds and then removes an edge which messes with the 2714 // DUIterator 2715 newmm = MergeMemNode::make(u_mm->base_memory()); 2716 for (uint j = 0; j < u->req(); j++) { 2717 if (j < newmm->req()) { 2718 if (j == (uint)_alias) { 2719 newmm->set_req(j, rep_proj); 2720 } else if (newmm->in(j) != u->in(j)) { 2721 newmm->set_req(j, u->in(j)); 2722 } 2723 } else if (j == (uint)_alias) { 2724 newmm->add_req(rep_proj); 2725 } else { 2726 newmm->add_req(u->in(j)); 2727 } 2728 } 2729 if ((uint)_alias >= u->req()) { 2730 newmm->set_memory_at(_alias, rep_proj); 2731 } 2732 _phase->register_new_node(newmm, c); 2733 } 2734 return newmm; 2735 } 2736 2737 bool MemoryGraphFixer::should_process_phi(Node* phi) const { 2738 if (phi->adr_type() == TypePtr::BOTTOM) { 2739 Node* region = phi->in(0); 2740 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { 2741 Node* uu = region->fast_out(j); 2742 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { 2743 return false; 2744 } 2745 } 2746 return true; 2747 } 2748 return _phase->C->get_alias_index(phi->adr_type()) == _alias; 2749 } 2750 2751 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { 2752 uint last = _phase-> C->unique(); 2753 MergeMemNode* mm = nullptr; 2754 assert(mem->bottom_type() == Type::MEMORY, ""); 2755 for (DUIterator i = mem->outs(); mem->has_out(i); i++) { 2756 Node* u = mem->out(i); 2757 if (u != replacement && u->_idx < last) { 2758 if (u->is_MergeMem()) { 2759 MergeMemNode* u_mm = u->as_MergeMem(); 2760 if (u_mm->memory_at(_alias) == mem) { 2761 MergeMemNode* newmm = nullptr; 2762 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { 2763 Node* uu = u->fast_out(j); 2764 assert(!uu->is_MergeMem(), "chain of MergeMems?"); 2765 if (uu->is_Phi()) { 2766 if (should_process_phi(uu)) { 2767 Node* region = uu->in(0); 2768 int nb = 0; 2769 for (uint k = 1; k < uu->req(); k++) { 2770 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { 2771 if (newmm == nullptr) { 2772 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2773 } 2774 if (newmm != u) { 2775 _phase->igvn().replace_input_of(uu, k, newmm); 2776 nb++; 2777 --jmax; 2778 } 2779 } 2780 } 2781 if (nb > 0) { 2782 --j; 2783 } 2784 } 2785 } else { 2786 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { 2787 if (newmm == nullptr) { 2788 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); 2789 } 2790 if (newmm != u) { 2791 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); 2792 --j, --jmax; 2793 } 2794 } 2795 } 2796 } 2797 } 2798 } else if (u->is_Phi()) { 2799 assert(u->bottom_type() == Type::MEMORY, "what else?"); 2800 Node* region = u->in(0); 2801 if (should_process_phi(u)) { 2802 bool replaced = false; 2803 for (uint j = 1; j < u->req(); j++) { 2804 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { 2805 Node* nnew = rep_proj; 2806 if (u->adr_type() == TypePtr::BOTTOM) { 2807 if (mm == nullptr) { 2808 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 2809 } 2810 nnew = mm; 2811 } 2812 _phase->igvn().replace_input_of(u, j, nnew); 2813 replaced = true; 2814 } 2815 } 2816 if (replaced) { 2817 --i; 2818 } 2819 2820 } 2821 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) || 2822 u->adr_type() == nullptr) { 2823 assert(u->adr_type() != nullptr || 2824 u->Opcode() == Op_Rethrow || 2825 u->Opcode() == Op_Return || 2826 u->Opcode() == Op_SafePoint || 2827 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || 2828 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || 2829 u->Opcode() == Op_CallLeaf, "%s", u->Name()); 2830 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 2831 if (mm == nullptr) { 2832 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); 2833 } 2834 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); 2835 --i; 2836 } 2837 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { 2838 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { 2839 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); 2840 --i; 2841 } 2842 } 2843 } 2844 } 2845 } 2846 2847 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators) 2848 : Node(ctrl, obj), _decorators(decorators) { 2849 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); 2850 } 2851 2852 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const { 2853 return _decorators; 2854 } 2855 2856 uint ShenandoahLoadReferenceBarrierNode::size_of() const { 2857 return sizeof(*this); 2858 } 2859 2860 static DecoratorSet mask_decorators(DecoratorSet decorators) { 2861 return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE); 2862 } 2863 2864 uint ShenandoahLoadReferenceBarrierNode::hash() const { 2865 uint hash = Node::hash(); 2866 hash += mask_decorators(_decorators); 2867 return hash; 2868 } 2869 2870 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const { 2871 return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier && 2872 mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators); 2873 } 2874 2875 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { 2876 if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) { 2877 return Type::TOP; 2878 } 2879 const Type* t = in(ValueIn)->bottom_type(); 2880 if (t == TypePtr::NULL_PTR) { 2881 return t; 2882 } 2883 2884 if (ShenandoahBarrierSet::is_strong_access(decorators())) { 2885 return t; 2886 } 2887 2888 return t->meet(TypePtr::NULL_PTR); 2889 } 2890 2891 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const { 2892 // Either input is TOP ==> the result is TOP 2893 const Type *t2 = phase->type(in(ValueIn)); 2894 if( t2 == Type::TOP ) return Type::TOP; 2895 2896 if (t2 == TypePtr::NULL_PTR) { 2897 return t2; 2898 } 2899 2900 if (ShenandoahBarrierSet::is_strong_access(decorators())) { 2901 return t2; 2902 } 2903 2904 return t2->meet(TypePtr::NULL_PTR); 2905 } 2906 2907 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) { 2908 Node* value = in(ValueIn); 2909 if (!needs_barrier(phase, value)) { 2910 return value; 2911 } 2912 return this; 2913 } 2914 2915 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) { 2916 Unique_Node_List visited; 2917 return needs_barrier_impl(phase, n, visited); 2918 } 2919 2920 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) { 2921 if (n == nullptr) return false; 2922 if (visited.member(n)) { 2923 return false; // Been there. 2924 } 2925 visited.push(n); 2926 2927 if (n->is_Allocate()) { 2928 // tty->print_cr("optimize barrier on alloc"); 2929 return false; 2930 } 2931 if (n->is_Call()) { 2932 // tty->print_cr("optimize barrier on call"); 2933 return false; 2934 } 2935 2936 const Type* type = phase->type(n); 2937 if (type == Type::TOP) { 2938 return false; 2939 } 2940 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { 2941 // tty->print_cr("optimize barrier on null"); 2942 return false; 2943 } 2944 if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) { 2945 // tty->print_cr("optimize barrier on constant"); 2946 return false; 2947 } 2948 2949 switch (n->Opcode()) { 2950 case Op_AddP: 2951 return true; // TODO: Can refine? 2952 case Op_LoadP: 2953 case Op_ShenandoahCompareAndExchangeN: 2954 case Op_ShenandoahCompareAndExchangeP: 2955 case Op_CompareAndExchangeN: 2956 case Op_CompareAndExchangeP: 2957 case Op_GetAndSetN: 2958 case Op_GetAndSetP: 2959 return true; 2960 case Op_Phi: { 2961 for (uint i = 1; i < n->req(); i++) { 2962 if (needs_barrier_impl(phase, n->in(i), visited)) return true; 2963 } 2964 return false; 2965 } 2966 case Op_CheckCastPP: 2967 case Op_CastPP: 2968 return needs_barrier_impl(phase, n->in(1), visited); 2969 case Op_Proj: 2970 return needs_barrier_impl(phase, n->in(0), visited); 2971 case Op_ShenandoahLoadReferenceBarrier: 2972 // tty->print_cr("optimize barrier on barrier"); 2973 return false; 2974 case Op_Parm: 2975 // tty->print_cr("optimize barrier on input arg"); 2976 return false; 2977 case Op_DecodeN: 2978 case Op_EncodeP: 2979 return needs_barrier_impl(phase, n->in(1), visited); 2980 case Op_LoadN: 2981 return true; 2982 case Op_CMoveN: 2983 case Op_CMoveP: 2984 return needs_barrier_impl(phase, n->in(2), visited) || 2985 needs_barrier_impl(phase, n->in(3), visited); 2986 case Op_ShenandoahIUBarrier: 2987 return needs_barrier_impl(phase, n->in(1), visited); 2988 case Op_CreateEx: 2989 return false; 2990 default: 2991 break; 2992 } 2993 #ifdef ASSERT 2994 tty->print("need barrier on?: "); 2995 tty->print_cr("ins:"); 2996 n->dump(2); 2997 tty->print_cr("outs:"); 2998 n->dump(-2); 2999 ShouldNotReachHere(); 3000 #endif 3001 return true; 3002 }