1 /*
2 * Copyright (c) 2015, 2026, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022, Tencent. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "classfile/javaClasses.hpp"
28 #include "code/aotCodeCache.hpp"
29 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
30 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
32 #include "gc/shenandoah/shenandoahForwarding.hpp"
33 #include "gc/shenandoah/shenandoahHeap.hpp"
34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
35 #include "gc/shenandoah/shenandoahRuntime.hpp"
36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/block.hpp"
39 #include "opto/callnode.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46
47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
48 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
49 if (state->load_reference_barriers_count() > 0) {
50 assert(C->post_loop_opts_phase(), "no loop opts allowed");
51 C->reset_post_loop_opts_phase(); // ... but we know what we are doing
52 C->clear_major_progress();
53 PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
54 if (C->failing()) return false;
55 C->process_for_post_loop_opts_igvn(igvn);
56 if (C->failing()) return false;
57
58 C->set_post_loop_opts_phase(); // now for real!
59 }
60 return true;
61 }
62
63 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
64 if (!UseShenandoahGC) {
65 return false;
66 }
67 assert(iff->is_If(), "bad input");
68 if (iff->Opcode() != Op_If) {
69 return false;
70 }
71 Node* bol = iff->in(1);
72 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
73 return false;
74 }
75 Node* cmp = bol->in(1);
76 if (cmp->Opcode() != Op_CmpI) {
77 return false;
78 }
79 Node* in1 = cmp->in(1);
80 Node* in2 = cmp->in(2);
81 if (in2->find_int_con(-1) != 0) {
82 return false;
83 }
84 if (in1->Opcode() != Op_AndI) {
85 return false;
86 }
87 in2 = in1->in(2);
88 if (in2->find_int_con(-1) != mask) {
89 return false;
90 }
91 in1 = in1->in(1);
92
93 return is_gc_state_load(in1);
94 }
95
96 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
97 return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
98 }
99
100 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
101 if (!UseShenandoahGC) {
102 return false;
103 }
104 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
105 return false;
106 }
107 Node* addp = n->in(MemNode::Address);
108 if (!addp->is_AddP()) {
109 return false;
110 }
111 Node* base = addp->in(AddPNode::Address);
112 Node* off = addp->in(AddPNode::Offset);
113 if (base->Opcode() != Op_ThreadLocal) {
114 return false;
115 }
116 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
117 return false;
118 }
119 return true;
120 }
121
122 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
123 assert(phase->is_dominator(stop, start), "bad inputs");
124 ResourceMark rm;
125 Unique_Node_List wq;
126 wq.push(start);
127 for (uint next = 0; next < wq.size(); next++) {
128 Node *m = wq.at(next);
129 if (m == stop) {
130 continue;
131 }
132 if (m->is_SafePoint() && !m->is_CallLeaf()) {
133 return true;
134 }
135 if (m->is_Region()) {
136 for (uint i = 1; i < m->req(); i++) {
137 wq.push(m->in(i));
138 }
139 } else {
140 wq.push(m->in(0));
141 }
142 }
143 return false;
144 }
145
146 #ifdef ASSERT
147 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
148 assert(phis.size() == 0, "");
149
150 while (true) {
151 if (in->bottom_type() == TypePtr::NULL_PTR) {
152 if (trace) {tty->print_cr("null");}
153 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
154 if (trace) {tty->print_cr("Non oop");}
155 } else {
156 if (in->is_ConstraintCast()) {
157 in = in->in(1);
158 continue;
159 } else if (in->is_AddP()) {
160 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
161 in = in->in(AddPNode::Address);
162 continue;
163 } else if (in->is_Con()) {
164 if (trace) {
165 tty->print("Found constant");
166 in->dump();
167 }
168 } else if (in->Opcode() == Op_Parm) {
169 if (trace) {
170 tty->print("Found argument");
171 }
172 } else if (in->Opcode() == Op_CreateEx) {
173 if (trace) {
174 tty->print("Found create-exception");
175 }
176 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
177 if (trace) {
178 tty->print("Found raw LoadP (OSR argument?)");
179 }
180 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
181 if (t == ShenandoahOopStore) {
182 return false;
183 }
184 barriers_used.push(in);
185 if (trace) {tty->print("Found barrier"); in->dump();}
186 } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
187 if (trace) {
188 tty->print("Found alloc");
189 in->in(0)->dump();
190 }
191 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
192 if (trace) {
193 tty->print("Found Java call");
194 }
195 } else if (in->is_Phi()) {
196 if (!visited.test_set(in->_idx)) {
197 if (trace) {tty->print("Pushed phi:"); in->dump();}
198 phis.push(in, 2);
199 in = in->in(1);
200 continue;
201 }
202 if (trace) {tty->print("Already seen phi:"); in->dump();}
203 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
204 if (!visited.test_set(in->_idx)) {
205 if (trace) {tty->print("Pushed cmovep:"); in->dump();}
206 phis.push(in, CMoveNode::IfTrue);
207 in = in->in(CMoveNode::IfFalse);
208 continue;
209 }
210 if (trace) {tty->print("Already seen cmovep:"); in->dump();}
211 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
212 in = in->in(1);
213 continue;
214 } else {
215 return false;
216 }
217 }
218 bool cont = false;
219 while (phis.is_nonempty()) {
220 uint idx = phis.index();
221 Node* phi = phis.node();
222 if (idx >= phi->req()) {
223 if (trace) {tty->print("Popped phi:"); phi->dump();}
224 phis.pop();
225 continue;
226 }
227 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
228 in = phi->in(idx);
229 phis.set_index(idx+1);
230 cont = true;
231 break;
232 }
233 if (!cont) {
234 break;
235 }
236 }
237 return true;
238 }
239
240 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
241 if (n1 != nullptr) {
242 n1->dump(+10);
243 }
244 if (n2 != nullptr) {
245 n2->dump(+10);
246 }
247 fatal("%s", msg);
248 }
249
250 void ShenandoahBarrierC2Support::verify(RootNode* root) {
251 ResourceMark rm;
252 Unique_Node_List wq;
253 GrowableArray<Node*> barriers;
254 Unique_Node_List barriers_used;
255 Node_Stack phis(0);
256 VectorSet visited;
257 const bool trace = false;
258 const bool verify_no_useless_barrier = false;
259
260 wq.push(root);
261 for (uint next = 0; next < wq.size(); next++) {
262 Node *n = wq.at(next);
263 if (n->is_Load()) {
264 const bool trace = false;
265 if (trace) {tty->print("Verifying"); n->dump();}
266 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
267 if (trace) {tty->print_cr("Load range/klass");}
268 } else {
269 const TypePtr* adr_type = n->as_Load()->adr_type();
270
271 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
272 if (trace) {tty->print_cr("Mark load");}
273 } else if (adr_type->isa_instptr() &&
274 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
275 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
276 if (trace) {tty->print_cr("Reference.get()");}
277 } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
278 report_verify_failure("Shenandoah verification: Load should have barriers", n);
279 }
280 }
281 } else if (n->is_Store()) {
282 const bool trace = false;
283
284 if (trace) {tty->print("Verifying"); n->dump();}
285 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
286 Node* adr = n->in(MemNode::Address);
287 bool verify = true;
288
289 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
290 adr = adr->in(AddPNode::Address);
291 if (adr->is_AddP()) {
292 assert(adr->in(AddPNode::Base)->is_top(), "");
293 adr = adr->in(AddPNode::Address);
294 if (adr->Opcode() == Op_LoadP &&
295 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
296 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
297 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
298 if (trace) {tty->print_cr("SATB prebarrier");}
299 verify = false;
300 }
301 }
302 }
303
304 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
305 report_verify_failure("Shenandoah verification: Store should have barriers", n);
306 }
307 }
308 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
309 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
310 }
311 } else if (n->Opcode() == Op_CmpP) {
312 const bool trace = false;
313
314 Node* in1 = n->in(1);
315 Node* in2 = n->in(2);
316 if (in1->bottom_type()->isa_oopptr()) {
317 if (trace) {tty->print("Verifying"); n->dump();}
318
319 bool mark_inputs = false;
320 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
321 (in1->is_Con() || in2->is_Con())) {
322 if (trace) {tty->print_cr("Comparison against a constant");}
323 mark_inputs = true;
324 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
325 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
326 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
327 mark_inputs = true;
328 } else {
329 assert(in2->bottom_type()->isa_oopptr(), "");
330
331 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
332 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
333 report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
334 }
335 }
336 if (verify_no_useless_barrier &&
337 mark_inputs &&
338 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
339 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
340 phis.clear();
341 visited.reset();
342 }
343 }
344 } else if (n->is_LoadStore()) {
345 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
346 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
347 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
348 }
349
350 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
351 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
352 }
353 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
354 CallNode* call = n->as_Call();
355
356 static struct {
357 const char* name;
358 struct {
359 int pos;
360 verify_type t;
361 } args[6];
362 } calls[] = {
363 "array_partition_stub",
364 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore }, { -1, ShenandoahNone },
365 { -1, ShenandoahNone }, { -1, ShenandoahNone }, { -1, ShenandoahNone } },
366 "arraysort_stub",
367 { { TypeFunc::Parms, ShenandoahStore }, { -1, ShenandoahNone }, { -1, ShenandoahNone },
368 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
369 "aescrypt_encryptBlock",
370 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
371 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
372 "aescrypt_decryptBlock",
373 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
374 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
375 "multiplyToLen",
376 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore },
377 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
378 "squareToLen",
379 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone},
380 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
381 "montgomery_multiply",
382 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
383 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
384 "montgomery_square",
385 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore },
386 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
387 "mulAdd",
388 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
389 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
390 "vectorizedMismatch",
391 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
392 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
393 "updateBytesCRC32",
394 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
395 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
396 "updateBytesAdler32",
397 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
398 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
399 "updateBytesCRC32C",
400 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone},
401 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
402 "counterMode_AESCrypt",
403 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
404 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
405 "cipherBlockChaining_encryptAESCrypt",
406 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
407 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
408 "cipherBlockChaining_decryptAESCrypt",
409 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
410 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
411 "shenandoah_clone",
412 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
413 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
414 "ghash_processBlocks",
415 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
416 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
417 "sha1_implCompress",
418 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
419 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
420 "sha256_implCompress",
421 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
422 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
423 "sha512_implCompress",
424 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
425 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
426 "sha1_implCompressMB",
427 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
428 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
429 "sha256_implCompressMB",
430 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
431 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
432 "sha512_implCompressMB",
433 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
434 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
435 "encodeBlock",
436 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
437 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
438 "decodeBlock",
439 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
440 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
441 "intpoly_montgomeryMult_P256",
442 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahStore },
443 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
444 "intpoly_assign",
445 { { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone },
446 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
447 };
448
449 if (call->is_call_to_arraycopystub()) {
450 Node* dest = nullptr;
451 const TypeTuple* args = n->as_Call()->_tf->domain();
452 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
453 if (args->field_at(i)->isa_ptr()) {
454 j++;
455 if (j == 2) {
456 dest = n->in(i);
457 break;
458 }
459 }
460 }
461 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
462 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
463 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
464 }
465 } else if (strlen(call->_name) > 5 &&
466 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
467 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
468 report_verify_failure("Shenandoah verification: _fill should have barriers", n);
469 }
470 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
471 // skip
472 } else {
473 const int calls_len = sizeof(calls) / sizeof(calls[0]);
474 int i = 0;
475 for (; i < calls_len; i++) {
476 if (!strcmp(calls[i].name, call->_name)) {
477 break;
478 }
479 }
480 if (i != calls_len) {
481 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
482 for (uint j = 0; j < args_len; j++) {
483 int pos = calls[i].args[j].pos;
484 if (pos == -1) {
485 break;
486 }
487 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
488 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
489 }
490 }
491 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
492 if (call->in(j)->bottom_type()->make_ptr() &&
493 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
494 uint k = 0;
495 for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
496 if (k == args_len) {
497 fatal("arg %d for call %s not covered", j, call->_name);
498 }
499 }
500 }
501 } else {
502 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
503 if (call->in(j)->bottom_type()->make_ptr() &&
504 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
505 fatal("%s not covered", call->_name);
506 }
507 }
508 }
509 }
510 } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
511 // skip
512 } else if (n->is_AddP()
513 || n->is_Phi()
514 || n->is_ConstraintCast()
515 || n->Opcode() == Op_Return
516 || n->Opcode() == Op_CMoveP
517 || n->Opcode() == Op_CMoveN
518 || n->Opcode() == Op_Rethrow
519 || n->is_MemBar()
520 || n->Opcode() == Op_Conv2B
521 || n->Opcode() == Op_SafePoint
522 || n->is_CallJava()
523 || n->Opcode() == Op_Unlock
524 || n->Opcode() == Op_EncodeP
525 || n->Opcode() == Op_DecodeN) {
526 // nothing to do
527 } else {
528 static struct {
529 int opcode;
530 struct {
531 int pos;
532 verify_type t;
533 } inputs[2];
534 } others[] = {
535 Op_FastLock,
536 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
537 Op_Lock,
538 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} },
539 Op_ArrayCopy,
540 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
541 Op_StrCompressedCopy,
542 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
543 Op_StrInflatedCopy,
544 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
545 Op_AryEq,
546 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
547 Op_StrIndexOf,
548 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
549 Op_StrComp,
550 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
551 Op_StrEquals,
552 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
553 Op_VectorizedHashCode,
554 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
555 Op_EncodeISOArray,
556 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
557 Op_CountPositives,
558 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} },
559 Op_CastP2X,
560 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
561 Op_StrIndexOfChar,
562 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
563 };
564
565 const int others_len = sizeof(others) / sizeof(others[0]);
566 int i = 0;
567 for (; i < others_len; i++) {
568 if (others[i].opcode == n->Opcode()) {
569 break;
570 }
571 }
572 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
573 if (i != others_len) {
574 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
575 for (uint j = 0; j < inputs_len; j++) {
576 int pos = others[i].inputs[j].pos;
577 if (pos == -1) {
578 break;
579 }
580 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
581 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
582 }
583 }
584 for (uint j = 1; j < stop; j++) {
585 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
586 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
587 uint k = 0;
588 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
589 if (k == inputs_len) {
590 fatal("arg %d for node %s not covered", j, n->Name());
591 }
592 }
593 }
594 } else {
595 for (uint j = 1; j < stop; j++) {
596 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
597 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
598 fatal("%s not covered", n->Name());
599 }
600 }
601 }
602 }
603
604 if (n->is_SafePoint()) {
605 SafePointNode* sfpt = n->as_SafePoint();
606 if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
607 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
608 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
609 phis.clear();
610 visited.reset();
611 }
612 }
613 }
614 }
615 }
616
617 if (verify_no_useless_barrier) {
618 for (int i = 0; i < barriers.length(); i++) {
619 Node* n = barriers.at(i);
620 if (!barriers_used.member(n)) {
621 tty->print("XXX useless barrier"); n->dump(-2);
622 ShouldNotReachHere();
623 }
624 }
625 }
626 }
627 #endif
628
629 bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
630 Node* control) {
631 return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
632 phase->ctrl_or_self(maybe_load) == control;
633 }
634
635 void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
636 if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
637 return;
638 }
639 Node* mem = maybe_store->in(MemNode::Memory);
640 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
641 Node* u = mem->fast_out(i);
642 if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
643 wq.push(u);
644 }
645 }
646 }
647
648 void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
649 for (uint i = 0; i < n->req(); i++) {
650 Node* in = n->in(i);
651 if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
652 wq.push(in);
653 }
654 }
655 }
656
657 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
658 // That both nodes have the same control is not sufficient to prove
659 // domination, verify that there's no path from d to n
660 ResourceMark rm;
661 Unique_Node_List wq;
662 wq.push(d);
663 for (uint next = 0; next < wq.size(); next++) {
664 Node *m = wq.at(next);
665 if (m == n) {
666 return false;
667 }
668 if (m->is_Phi() && m->in(0)->is_Loop()) {
669 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
670 } else {
671 // Take anti-dependencies into account
672 maybe_push_anti_dependent_loads(phase, m, c, wq);
673 push_data_inputs_at_control(phase, m, c, wq);
674 }
675 }
676 return true;
677 }
678
679 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
680 if (d_c != n_c) {
681 return phase->is_dominator(d_c, n_c);
682 }
683 return is_dominator_same_ctrl(d_c, d, n, phase);
684 }
685
686 Node* next_mem(Node* mem, int alias) {
687 Node* res = nullptr;
688 if (mem->is_Proj()) {
689 res = mem->in(0);
690 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
691 res = mem->in(TypeFunc::Memory);
692 } else if (mem->is_Phi()) {
693 res = mem->in(1);
694 } else if (mem->is_MergeMem()) {
695 res = mem->as_MergeMem()->memory_at(alias);
696 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
697 assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
698 res = mem->in(MemNode::Memory);
699 } else {
700 #ifdef ASSERT
701 mem->dump();
702 #endif
703 ShouldNotReachHere();
704 }
705 return res;
706 }
707
708 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
709 Node* iffproj = nullptr;
710 while (c != dom) {
711 Node* next = phase->idom(c);
712 assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
713 if (c->is_Region()) {
714 ResourceMark rm;
715 Unique_Node_List wq;
716 wq.push(c);
717 for (uint i = 0; i < wq.size(); i++) {
718 Node *n = wq.at(i);
719 if (n == next) {
720 continue;
721 }
722 if (n->is_Region()) {
723 for (uint j = 1; j < n->req(); j++) {
724 wq.push(n->in(j));
725 }
726 } else {
727 wq.push(n->in(0));
728 }
729 }
730 for (uint i = 0; i < wq.size(); i++) {
731 Node *n = wq.at(i);
732 assert(n->is_CFG(), "");
733 if (n->is_Multi()) {
734 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
735 Node* u = n->fast_out(j);
736 if (u->is_CFG()) {
737 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj()) {
738 return NodeSentinel;
739 }
740 }
741 }
742 }
743 }
744 } else if (c->is_Proj()) {
745 if (c->is_IfProj()) {
746 if (c->as_Proj()->is_uncommon_trap_if_pattern() != nullptr) {
747 // continue;
748 } else {
749 if (!allow_one_proj) {
750 return NodeSentinel;
751 }
752 if (iffproj == nullptr) {
753 iffproj = c;
754 } else {
755 return NodeSentinel;
756 }
757 }
758 } else if (c->Opcode() == Op_JumpProj) {
759 return NodeSentinel; // unsupported
760 } else if (c->Opcode() == Op_CatchProj) {
761 return NodeSentinel; // unsupported
762 } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
763 return NodeSentinel; // unsupported
764 } else {
765 assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
766 }
767 }
768 c = next;
769 }
770 return iffproj;
771 }
772
773 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
774 ResourceMark rm;
775 VectorSet wq;
776 wq.set(mem->_idx);
777 mem_ctrl = phase->ctrl_or_self(mem);
778 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
779 mem = next_mem(mem, alias);
780 if (wq.test_set(mem->_idx)) {
781 return nullptr;
782 }
783 mem_ctrl = phase->ctrl_or_self(mem);
784 }
785 if (mem->is_MergeMem()) {
786 mem = mem->as_MergeMem()->memory_at(alias);
787 mem_ctrl = phase->ctrl_or_self(mem);
788 }
789 return mem;
790 }
791
792 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
793 Node* mem = nullptr;
794 Node* c = ctrl;
795 do {
796 if (c->is_Region()) {
797 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
798 Node* u = c->fast_out(i);
799 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
800 if (u->adr_type() == TypePtr::BOTTOM) {
801 mem = u;
802 }
803 }
804 }
805 } else {
806 if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
807 CallProjections projs;
808 c->as_Call()->extract_projections(&projs, true, false);
809 if (projs.fallthrough_memproj != nullptr) {
810 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
811 if (projs.catchall_memproj == nullptr) {
812 mem = projs.fallthrough_memproj;
813 } else {
814 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
815 mem = projs.fallthrough_memproj;
816 } else {
817 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
818 mem = projs.catchall_memproj;
819 }
820 }
821 }
822 } else {
823 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
824 if (proj != nullptr &&
825 proj->adr_type() == TypePtr::BOTTOM) {
826 mem = proj;
827 }
828 }
829 } else {
830 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
831 Node* u = c->fast_out(i);
832 if (u->is_Proj() &&
833 u->bottom_type() == Type::MEMORY &&
834 u->adr_type() == TypePtr::BOTTOM) {
835 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
836 assert(mem == nullptr, "only one proj");
837 mem = u;
838 }
839 }
840 assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
841 }
842 }
843 c = phase->idom(c);
844 } while (mem == nullptr);
845 return mem;
846 }
847
848 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
849 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
850 Node* u = n->fast_out(i);
851 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
852 uses.push(u);
853 }
854 }
855 }
856
857 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
858 OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
859 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
860 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
861 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
862 phase->register_control(new_le, phase->get_loop(le), le->in(0));
863 phase->replace_node_and_forward_ctrl(outer, new_outer);
864 phase->replace_node_and_forward_ctrl(le, new_le);
865 inner->clear_strip_mined();
866 }
867
868 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
869 PhaseIdealLoop* phase, int flags) {
870 PhaseIterGVN& igvn = phase->igvn();
871 Node* old_ctrl = ctrl;
872
873 Node* thread = new ThreadLocalNode();
874 Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
875 Node* gc_state_addr = AddPNode::make_off_heap(thread, gc_state_offset);
876 Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
877 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
878 TypeInt::BYTE, MemNode::unordered);
879 Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
880 Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
881 Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne);
882
883 IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
884 ctrl = new IfTrueNode(gc_state_iff);
885 test_fail_ctrl = new IfFalseNode(gc_state_iff);
886
887 IdealLoopTree* loop = phase->get_loop(old_ctrl);
888 phase->register_control(gc_state_iff, loop, old_ctrl);
889 phase->register_control(ctrl, loop, gc_state_iff);
890 phase->register_control(test_fail_ctrl, loop, gc_state_iff);
891
892 phase->register_new_node(thread, old_ctrl);
893 phase->register_new_node(gc_state_addr, old_ctrl);
894 phase->register_new_node(gc_state, old_ctrl);
895 phase->register_new_node(gc_state_and, old_ctrl);
896 phase->register_new_node(gc_state_cmp, old_ctrl);
897 phase->register_new_node(gc_state_bool, old_ctrl);
898
899 phase->set_root_as_ctrl(gc_state_offset);
900
901 assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
902 }
903
904 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
905 Node* old_ctrl = ctrl;
906 PhaseIterGVN& igvn = phase->igvn();
907
908 const Type* val_t = igvn.type(val);
909 if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
910 Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT));
911 Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
912
913 IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
914 ctrl = new IfTrueNode(null_iff);
915 null_ctrl = new IfFalseNode(null_iff);
916
917 IdealLoopTree* loop = phase->get_loop(old_ctrl);
918 phase->register_control(null_iff, loop, old_ctrl);
919 phase->register_control(ctrl, loop, null_iff);
920 phase->register_control(null_ctrl, loop, null_iff);
921
922 phase->register_new_node(null_cmp, old_ctrl);
923 phase->register_new_node(null_test, old_ctrl);
924 }
925 }
926
927 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
928 Node* old_ctrl = ctrl;
929 PhaseIterGVN& igvn = phase->igvn();
930
931 Node* raw_val = new CastP2XNode(old_ctrl, val);
932 Node* region_size_shift = nullptr;
933 if (AOTCodeCache::is_on_for_dump()) {
934 Node* aot_addr = igvn.makecon(TypeRawPtr::make(AOTRuntimeConstants::grain_shift_address()));
935 region_size_shift = new LoadINode(old_ctrl, raw_mem, aot_addr,
936 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
937 TypeInt::INT, MemNode::unordered);
938 phase->register_new_node(region_size_shift, old_ctrl);
939 } else {
940 region_size_shift = igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint());
941 }
942 Node* cset_idx = new URShiftXNode(raw_val, region_size_shift);
943
944 // Figure out the target cset address with raw pointer math.
945 // This avoids matching AddP+LoadB that would emit inefficient code.
946 // See JDK-8245465.
947 Node* cset_addr_ptr = nullptr;
948 if (AOTCodeCache::is_on_for_dump()) {
949 Node* aot_addr = igvn.makecon(TypeRawPtr::make(AOTRuntimeConstants::cset_base_address()));
950 cset_addr_ptr = new LoadPNode(old_ctrl, raw_mem, aot_addr,
951 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
952 TypeRawPtr::NOTNULL, MemNode::unordered);
953 phase->register_new_node(cset_addr_ptr, old_ctrl);
954 } else {
955 cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
956 }
957 Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr);
958 Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
959 Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
960
961 Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
962 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
963 TypeInt::BYTE, MemNode::unordered);
964 Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
965 Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
966
967 IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
968 ctrl = new IfTrueNode(cset_iff);
969 not_cset_ctrl = new IfFalseNode(cset_iff);
970
971 IdealLoopTree *loop = phase->get_loop(old_ctrl);
972 phase->register_control(cset_iff, loop, old_ctrl);
973 phase->register_control(ctrl, loop, cset_iff);
974 phase->register_control(not_cset_ctrl, loop, cset_iff);
975
976 phase->set_root_as_ctrl(cset_addr_ptr);
977
978 phase->register_new_node(raw_val, old_ctrl);
979 phase->register_new_node(cset_idx, old_ctrl);
980 phase->register_new_node(cset_addr, old_ctrl);
981 phase->register_new_node(cset_load_addr, old_ctrl);
982 phase->register_new_node(cset_load_ptr, old_ctrl);
983 phase->register_new_node(cset_load, old_ctrl);
984 phase->register_new_node(cset_cmp, old_ctrl);
985 phase->register_new_node(cset_bool, old_ctrl);
986 }
987
988 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
989 DecoratorSet decorators, PhaseIdealLoop* phase) {
990 IdealLoopTree*loop = phase->get_loop(ctrl);
991 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
992
993 address calladdr = nullptr;
994 const char* name = nullptr;
995 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
996 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
997 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
998 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
999 bool is_narrow = UseCompressedOops && !is_native;
1000 if (is_strong) {
1001 if (is_narrow) {
1002 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
1003 name = "load_reference_barrier_strong_narrow";
1004 } else {
1005 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1006 name = "load_reference_barrier_strong";
1007 }
1008 } else if (is_weak) {
1009 if (is_narrow) {
1010 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1011 name = "load_reference_barrier_weak_narrow";
1012 } else {
1013 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1014 name = "load_reference_barrier_weak";
1015 }
1016 } else {
1017 assert(is_phantom, "only remaining strength");
1018 if (is_narrow) {
1019 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1020 name = "load_reference_barrier_phantom_narrow";
1021 } else {
1022 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1023 name = "load_reference_barrier_phantom";
1024 }
1025 }
1026 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1027
1028 call->init_req(TypeFunc::Control, ctrl);
1029 call->init_req(TypeFunc::I_O, phase->C->top());
1030 call->init_req(TypeFunc::Memory, phase->C->top());
1031 call->init_req(TypeFunc::FramePtr, phase->C->top());
1032 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1033 call->init_req(TypeFunc::Parms, val);
1034 call->init_req(TypeFunc::Parms+1, load_addr);
1035 phase->register_control(call, loop, ctrl);
1036 ctrl = new ProjNode(call, TypeFunc::Control);
1037 phase->register_control(ctrl, loop, call);
1038 val = new ProjNode(call, TypeFunc::Parms);
1039 phase->register_new_node(val, call);
1040 val = new CheckCastPPNode(ctrl, val, obj_type);
1041 phase->register_new_node(val, ctrl);
1042 }
1043
1044 void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
1045 nodes_above_barrier.clear();
1046 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1047 nodes_above_barrier.push(init_raw_mem);
1048 }
1049 for (uint next = 0; next < nodes_above_barrier.size(); next++) {
1050 Node* n = nodes_above_barrier.at(next);
1051 // Take anti-dependencies into account
1052 maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
1053 push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
1054 }
1055 }
1056
1057 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
1058 Node* ctrl = phase->get_ctrl(barrier);
1059 Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1060
1061 // Update the control of all nodes that should be after the
1062 // barrier control flow
1063 uses.clear();
1064 // Every node that is control dependent on the barrier's input
1065 // control will be after the expanded barrier. The raw memory (if
1066 // its memory is control dependent on the barrier's input control)
1067 // must stay above the barrier.
1068 collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
1069 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1070 Node* u = ctrl->fast_out(i);
1071 if (u->_idx < last &&
1072 u != barrier &&
1073 !u->depends_only_on_test() && // preserve dependency on test
1074 !nodes_above_barrier.member(u) &&
1075 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1076 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1077 Node* old_c = phase->ctrl_or_self(u);
1078 if (old_c != ctrl ||
1079 is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1080 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1081 phase->igvn().rehash_node_delayed(u);
1082 int nb = u->replace_edge(ctrl, region, &phase->igvn());
1083 if (u->is_CFG()) {
1084 if (phase->idom(u) == ctrl) {
1085 phase->set_idom(u, region, phase->dom_depth(region));
1086 }
1087 } else if (phase->get_ctrl(u) == ctrl) {
1088 assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1089 uses.push(u);
1090 }
1091 assert(nb == 1, "more than 1 ctrl input?");
1092 --i, imax -= nb;
1093 }
1094 }
1095 }
1096 }
1097
1098 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1099 Node* region = nullptr;
1100 while (c != ctrl) {
1101 if (c->is_Region()) {
1102 region = c;
1103 }
1104 c = phase->idom(c);
1105 }
1106 assert(region != nullptr, "");
1107 Node* phi = new PhiNode(region, n->bottom_type());
1108 for (uint j = 1; j < region->req(); j++) {
1109 Node* in = region->in(j);
1110 if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1111 phi->init_req(j, n);
1112 } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1113 phi->init_req(j, n_clone);
1114 } else {
1115 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1116 }
1117 }
1118 phase->register_new_node(phi, region);
1119 return phi;
1120 }
1121
1122 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1123 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1124
1125 Unique_Node_List uses;
1126 Node_Stack stack(0);
1127 Node_List clones;
1128 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1129 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1130
1131 Node* ctrl = phase->get_ctrl(lrb);
1132 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1133
1134 CallStaticJavaNode* unc = nullptr;
1135 Node* unc_ctrl = nullptr;
1136 Node* uncasted_val = val;
1137
1138 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1139 Node* u = lrb->fast_out(i);
1140 if (u->Opcode() == Op_CastPP &&
1141 u->in(0) != nullptr &&
1142 phase->is_dominator(u->in(0), ctrl)) {
1143 const Type* u_t = phase->igvn().type(u);
1144
1145 if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1146 u->in(0)->Opcode() == Op_IfTrue &&
1147 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern() &&
1148 u->in(0)->in(0)->is_If() &&
1149 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1150 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1151 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1152 u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1153 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1154 IdealLoopTree* loop = phase->get_loop(ctrl);
1155 IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1156
1157 if (!unc_loop->is_member(loop)) {
1158 continue;
1159 }
1160
1161 Node* branch = no_branches(ctrl, u->in(0), false, phase);
1162 assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1163 if (branch == NodeSentinel) {
1164 continue;
1165 }
1166
1167 Node* iff = u->in(0)->in(0);
1168 Node* bol = iff->in(1)->clone();
1169 Node* cmp = bol->in(1)->clone();
1170 cmp->set_req(1, lrb);
1171 bol->set_req(1, cmp);
1172 phase->igvn().replace_input_of(iff, 1, bol);
1173 phase->set_ctrl(lrb, iff->in(0));
1174 phase->register_new_node(cmp, iff->in(0));
1175 phase->register_new_node(bol, iff->in(0));
1176 break;
1177 }
1178 }
1179 }
1180 // Load barrier on the control output of a call
1181 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1182 CallJavaNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1183 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1184 // The rethrow call may have too many projections to be
1185 // properly handled here. Given there's no reason for a
1186 // barrier to depend on the call, move it above the call
1187 stack.push(lrb, 0);
1188 do {
1189 Node* n = stack.node();
1190 uint idx = stack.index();
1191 if (idx < n->req()) {
1192 Node* in = n->in(idx);
1193 stack.set_index(idx+1);
1194 if (in != nullptr) {
1195 if (phase->has_ctrl(in)) {
1196 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1197 #ifdef ASSERT
1198 for (uint i = 0; i < stack.size(); i++) {
1199 assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1200 }
1201 #endif
1202 stack.push(in, 0);
1203 }
1204 } else {
1205 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1206 }
1207 }
1208 } else {
1209 phase->set_ctrl(n, call->in(0));
1210 stack.pop();
1211 }
1212 } while(stack.size() > 0);
1213 continue;
1214 }
1215 CallProjections projs;
1216 call->extract_projections(&projs, false, false);
1217
1218 // If this is a runtime call, it doesn't have an exception handling path
1219 if (projs.fallthrough_catchproj == nullptr) {
1220 assert(call->method() == nullptr, "should be runtime call");
1221 assert(projs.catchall_catchproj == nullptr, "runtime call should not have catch all projection");
1222 continue;
1223 }
1224
1225 // Otherwise, clone the barrier so there's one for the fallthrough and one for the exception handling path
1226 #ifdef ASSERT
1227 VectorSet cloned;
1228 #endif
1229 Node* lrb_clone = lrb->clone();
1230 phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1231 phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1232
1233 stack.push(lrb, 0);
1234 clones.push(lrb_clone);
1235
1236 do {
1237 assert(stack.size() == clones.size(), "");
1238 Node* n = stack.node();
1239 #ifdef ASSERT
1240 if (n->is_Load()) {
1241 Node* mem = n->in(MemNode::Memory);
1242 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1243 Node* u = mem->fast_out(j);
1244 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1245 }
1246 }
1247 #endif
1248 uint idx = stack.index();
1249 Node* n_clone = clones.at(clones.size()-1);
1250 if (idx < n->outcnt()) {
1251 Node* u = n->raw_out(idx);
1252 Node* c = phase->ctrl_or_self(u);
1253 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1254 stack.set_index(idx+1);
1255 assert(!u->is_CFG(), "");
1256 stack.push(u, 0);
1257 assert(!cloned.test_set(u->_idx), "only one clone");
1258 Node* u_clone = u->clone();
1259 int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1260 assert(nb > 0, "should have replaced some uses");
1261 phase->register_new_node(u_clone, projs.catchall_catchproj);
1262 clones.push(u_clone);
1263 phase->set_ctrl(u, projs.fallthrough_catchproj);
1264 } else {
1265 bool replaced = false;
1266 if (u->is_Phi()) {
1267 for (uint k = 1; k < u->req(); k++) {
1268 if (u->in(k) == n) {
1269 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1270 phase->igvn().replace_input_of(u, k, n_clone);
1271 replaced = true;
1272 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1273 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1274 replaced = true;
1275 }
1276 }
1277 }
1278 } else {
1279 if (phase->is_dominator(projs.catchall_catchproj, c)) {
1280 phase->igvn().rehash_node_delayed(u);
1281 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1282 assert(nb > 0, "should have replaced some uses");
1283 replaced = true;
1284 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1285 if (u->is_If()) {
1286 // Can't break If/Bool/Cmp chain
1287 assert(n->is_Bool(), "unexpected If shape");
1288 assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1289 assert(n_clone->is_Bool(), "unexpected clone");
1290 assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1291 Node* bol_clone = n->clone();
1292 Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1293 bol_clone->set_req(1, cmp_clone);
1294
1295 Node* nn = stack.node_at(stack.size()-3);
1296 Node* nn_clone = clones.at(clones.size()-3);
1297 assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1298
1299 int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1300 &phase->igvn());
1301 assert(nb > 0, "should have replaced some uses");
1302
1303 phase->register_new_node(bol_clone, u->in(0));
1304 phase->register_new_node(cmp_clone, u->in(0));
1305
1306 phase->igvn().replace_input_of(u, 1, bol_clone);
1307
1308 } else {
1309 phase->igvn().rehash_node_delayed(u);
1310 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1311 assert(nb > 0, "should have replaced some uses");
1312 }
1313 replaced = true;
1314 }
1315 }
1316 if (!replaced) {
1317 stack.set_index(idx+1);
1318 }
1319 }
1320 } else {
1321 stack.pop();
1322 clones.pop();
1323 }
1324 } while (stack.size() > 0);
1325 assert(stack.size() == 0 && clones.size() == 0, "");
1326 }
1327 }
1328
1329 for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1330 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1331 Node* ctrl = phase->get_ctrl(lrb);
1332 IdealLoopTree* loop = phase->get_loop(ctrl);
1333 Node* head = loop->head();
1334 if (head->is_OuterStripMinedLoop()) {
1335 // Expanding a barrier here will break loop strip mining
1336 // verification. Transform the loop so the loop nest doesn't
1337 // appear as strip mined.
1338 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1339 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1340 }
1341 if (head->is_BaseCountedLoop() && ctrl->is_IfProj() && ctrl->in(0)->is_BaseCountedLoopEnd() &&
1342 head->as_BaseCountedLoop()->loopexit() == ctrl->in(0)) {
1343 Node* entry = head->in(LoopNode::EntryControl);
1344 Node* backedge = head->in(LoopNode::LoopBackControl);
1345 Node* new_head = new LoopNode(entry, backedge);
1346 phase->register_control(new_head, phase->get_loop(entry), entry);
1347 phase->replace_node_and_forward_ctrl(head, new_head);
1348 }
1349 }
1350
1351 // Expand load-reference-barriers
1352 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1353 Unique_Node_List nodes_above_barriers;
1354 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1355 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1356 uint last = phase->C->unique();
1357 Node* ctrl = phase->get_ctrl(lrb);
1358 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1359
1360 Node* orig_ctrl = ctrl;
1361
1362 Node* raw_mem = fixer.find_mem(ctrl, lrb);
1363 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1364
1365 IdealLoopTree *loop = phase->get_loop(ctrl);
1366
1367 Node* heap_stable_ctrl = nullptr;
1368 Node* null_ctrl = nullptr;
1369
1370 assert(val->bottom_type()->make_oopptr(), "need oop");
1371 assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1372
1373 enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1374 Node* region = new RegionNode(PATH_LIMIT);
1375 Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1376
1377 // Stable path.
1378 int flags = ShenandoahHeap::HAS_FORWARDED;
1379 if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1380 flags |= ShenandoahHeap::WEAK_ROOTS;
1381 }
1382 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1383 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1384
1385 // Heap stable case
1386 region->init_req(_heap_stable, heap_stable_ctrl);
1387 val_phi->init_req(_heap_stable, val);
1388
1389 // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1390 // even for non-cset objects to prevent resurrection of such objects.
1391 // Wires !in_cset(obj) to slot 2 of region and phis
1392 Node* not_cset_ctrl = nullptr;
1393 if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1394 test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1395 }
1396 if (not_cset_ctrl != nullptr) {
1397 region->init_req(_not_cset, not_cset_ctrl);
1398 val_phi->init_req(_not_cset, val);
1399 } else {
1400 region->del_req(_not_cset);
1401 val_phi->del_req(_not_cset);
1402 }
1403
1404 // Resolve object when orig-value is in cset.
1405 // Make the unconditional resolve for fwdptr.
1406
1407 // Call lrb-stub and wire up that path in slots 4
1408 Node* result_mem = nullptr;
1409
1410 Node* addr;
1411 {
1412 VectorSet visited;
1413 addr = get_load_addr(phase, visited, lrb);
1414 }
1415 if (addr->Opcode() == Op_AddP) {
1416 Node* orig_base = addr->in(AddPNode::Base);
1417 Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::DependencyType::NonFloatingNarrowing);
1418 phase->register_new_node(base, ctrl);
1419 if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1420 // Field access
1421 addr = addr->clone();
1422 addr->set_req(AddPNode::Base, base);
1423 addr->set_req(AddPNode::Address, base);
1424 phase->register_new_node(addr, ctrl);
1425 } else {
1426 Node* addr2 = addr->in(AddPNode::Address);
1427 if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1428 addr2->in(AddPNode::Base) == orig_base) {
1429 addr2 = addr2->clone();
1430 addr2->set_req(AddPNode::Base, base);
1431 addr2->set_req(AddPNode::Address, base);
1432 phase->register_new_node(addr2, ctrl);
1433 addr = addr->clone();
1434 addr->set_req(AddPNode::Base, base);
1435 addr->set_req(AddPNode::Address, addr2);
1436 phase->register_new_node(addr, ctrl);
1437 }
1438 }
1439 }
1440 call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1441 region->init_req(_evac_path, ctrl);
1442 val_phi->init_req(_evac_path, val);
1443
1444 phase->register_control(region, loop, heap_stable_iff);
1445 Node* out_val = val_phi;
1446 phase->register_new_node(val_phi, region);
1447
1448 fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
1449
1450 ctrl = orig_ctrl;
1451
1452 phase->igvn().replace_node(lrb, out_val);
1453
1454 follow_barrier_uses(out_val, ctrl, uses, phase);
1455
1456 for(uint next = 0; next < uses.size(); next++ ) {
1457 Node *n = uses.at(next);
1458 assert(phase->get_ctrl(n) == ctrl, "bad control");
1459 assert(n != raw_mem, "should leave input raw mem above the barrier");
1460 phase->set_ctrl(n, region);
1461 follow_barrier_uses(n, ctrl, uses, phase);
1462 }
1463 fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1464 }
1465 // Done expanding load-reference-barriers.
1466 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1467 }
1468
1469 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1470 if (visited.test_set(in->_idx)) {
1471 return nullptr;
1472 }
1473 switch (in->Opcode()) {
1474 case Op_Proj:
1475 return get_load_addr(phase, visited, in->in(0));
1476 case Op_CastPP:
1477 case Op_CheckCastPP:
1478 case Op_DecodeN:
1479 case Op_EncodeP:
1480 return get_load_addr(phase, visited, in->in(1));
1481 case Op_LoadN:
1482 case Op_LoadP:
1483 return in->in(MemNode::Address);
1484 case Op_CompareAndExchangeN:
1485 case Op_CompareAndExchangeP:
1486 case Op_GetAndSetN:
1487 case Op_GetAndSetP:
1488 case Op_ShenandoahCompareAndExchangeP:
1489 case Op_ShenandoahCompareAndExchangeN:
1490 // Those instructions would just have stored a different
1491 // value into the field. No use to attempt to fix it at this point.
1492 return phase->igvn().zerocon(T_OBJECT);
1493 case Op_CMoveP:
1494 case Op_CMoveN: {
1495 Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1496 Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1497 // Handle unambiguous cases: single address reported on both branches.
1498 if (t != nullptr && f == nullptr) return t;
1499 if (t == nullptr && f != nullptr) return f;
1500 if (t != nullptr && t == f) return t;
1501 // Ambiguity.
1502 return phase->igvn().zerocon(T_OBJECT);
1503 }
1504 case Op_Phi: {
1505 Node* addr = nullptr;
1506 for (uint i = 1; i < in->req(); i++) {
1507 Node* addr1 = get_load_addr(phase, visited, in->in(i));
1508 if (addr == nullptr) {
1509 addr = addr1;
1510 }
1511 if (addr != addr1) {
1512 return phase->igvn().zerocon(T_OBJECT);
1513 }
1514 }
1515 return addr;
1516 }
1517 case Op_ShenandoahLoadReferenceBarrier:
1518 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1519 case Op_CallDynamicJava:
1520 case Op_CallLeaf:
1521 case Op_CallStaticJava:
1522 case Op_ConN:
1523 case Op_ConP:
1524 case Op_Parm:
1525 case Op_CreateEx:
1526 return phase->igvn().zerocon(T_OBJECT);
1527 default:
1528 #ifdef ASSERT
1529 fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1530 #endif
1531 return phase->igvn().zerocon(T_OBJECT);
1532 }
1533
1534 }
1535
1536 #ifdef ASSERT
1537 static bool has_never_branch(Node* root) {
1538 for (uint i = 1; i < root->req(); i++) {
1539 Node* in = root->in(i);
1540 if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
1541 return true;
1542 }
1543 }
1544 return false;
1545 }
1546 #endif
1547
1548 void MemoryGraphFixer::collect_memory_nodes() {
1549 Node_Stack stack(0);
1550 VectorSet visited;
1551 Node_List regions;
1552
1553 // Walk the raw memory graph and create a mapping from CFG node to
1554 // memory node. Exclude phis for now.
1555 stack.push(_phase->C->root(), 1);
1556 do {
1557 Node* n = stack.node();
1558 int opc = n->Opcode();
1559 uint i = stack.index();
1560 if (i < n->req()) {
1561 Node* mem = nullptr;
1562 if (opc == Op_Root) {
1563 Node* in = n->in(i);
1564 int in_opc = in->Opcode();
1565 if (in_opc == Op_Return || in_opc == Op_Rethrow) {
1566 mem = in->in(TypeFunc::Memory);
1567 } else if (in_opc == Op_Halt) {
1568 if (in->in(0)->is_Region()) {
1569 Node* r = in->in(0);
1570 for (uint j = 1; j < r->req(); j++) {
1571 assert(!r->in(j)->is_NeverBranch(), "");
1572 }
1573 } else {
1574 Node* proj = in->in(0);
1575 assert(proj->is_Proj(), "");
1576 Node* in = proj->in(0);
1577 assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
1578 if (in->is_CallStaticJava()) {
1579 mem = in->in(TypeFunc::Memory);
1580 } else if (in->Opcode() == Op_Catch) {
1581 Node* call = in->in(0)->in(0);
1582 assert(call->is_Call(), "");
1583 mem = call->in(TypeFunc::Memory);
1584 } else if (in->is_NeverBranch()) {
1585 mem = collect_memory_for_infinite_loop(in);
1586 }
1587 }
1588 } else {
1589 #ifdef ASSERT
1590 n->dump();
1591 in->dump();
1592 #endif
1593 ShouldNotReachHere();
1594 }
1595 } else {
1596 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
1597 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
1598 mem = n->in(i);
1599 }
1600 i++;
1601 stack.set_index(i);
1602 if (mem == nullptr) {
1603 continue;
1604 }
1605 for (;;) {
1606 if (visited.test_set(mem->_idx) || mem->is_Start()) {
1607 break;
1608 }
1609 if (mem->is_Phi()) {
1610 stack.push(mem, 2);
1611 mem = mem->in(1);
1612 } else if (mem->is_Proj()) {
1613 stack.push(mem, mem->req());
1614 mem = mem->in(0);
1615 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
1616 mem = mem->in(TypeFunc::Memory);
1617 } else if (mem->is_MergeMem()) {
1618 MergeMemNode* mm = mem->as_MergeMem();
1619 mem = mm->memory_at(_alias);
1620 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1621 assert(_alias == Compile::AliasIdxRaw, "");
1622 stack.push(mem, mem->req());
1623 mem = mem->in(MemNode::Memory);
1624 } else {
1625 #ifdef ASSERT
1626 mem->dump();
1627 #endif
1628 ShouldNotReachHere();
1629 }
1630 }
1631 } else {
1632 if (n->is_Phi()) {
1633 // Nothing
1634 } else if (!n->is_Root()) {
1635 Node* c = get_ctrl(n);
1636 _memory_nodes.map(c->_idx, n);
1637 }
1638 stack.pop();
1639 }
1640 } while(stack.is_nonempty());
1641
1642 // Iterate over CFG nodes in rpo and propagate memory state to
1643 // compute memory state at regions, creating new phis if needed.
1644 Node_List rpo_list;
1645 visited.clear();
1646 _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
1647 Node* root = rpo_list.pop();
1648 assert(root == _phase->C->root(), "");
1649
1650 const bool trace = false;
1651 #ifdef ASSERT
1652 if (trace) {
1653 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1654 Node* c = rpo_list.at(i);
1655 if (_memory_nodes[c->_idx] != nullptr) {
1656 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
1657 }
1658 }
1659 }
1660 #endif
1661 uint last = _phase->C->unique();
1662
1663 #ifdef ASSERT
1664 uint16_t max_depth = 0;
1665 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
1666 IdealLoopTree* lpt = iter.current();
1667 max_depth = MAX2(max_depth, lpt->_nest);
1668 }
1669 #endif
1670
1671 bool progress = true;
1672 int iteration = 0;
1673 Node_List dead_phis;
1674 while (progress) {
1675 progress = false;
1676 iteration++;
1677 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1678 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
1679
1680 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1681 Node* c = rpo_list.at(i);
1682
1683 Node* prev_mem = _memory_nodes[c->_idx];
1684 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1685 Node* prev_region = regions[c->_idx];
1686 Node* unique = nullptr;
1687 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
1688 Node* m = _memory_nodes[c->in(j)->_idx];
1689 assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
1690 if (m != nullptr) {
1691 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
1692 assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1693 // continue
1694 } else if (unique == nullptr) {
1695 unique = m;
1696 } else if (m == unique) {
1697 // continue
1698 } else {
1699 unique = NodeSentinel;
1700 }
1701 }
1702 }
1703 assert(unique != nullptr, "empty phi???");
1704 if (unique != NodeSentinel) {
1705 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
1706 dead_phis.push(prev_region);
1707 }
1708 regions.map(c->_idx, unique);
1709 } else {
1710 Node* phi = nullptr;
1711 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
1712 phi = prev_region;
1713 for (uint k = 1; k < c->req(); k++) {
1714 Node* m = _memory_nodes[c->in(k)->_idx];
1715 assert(m != nullptr, "expect memory state");
1716 phi->set_req(k, m);
1717 }
1718 } else {
1719 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
1720 Node* u = c->fast_out(j);
1721 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1722 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
1723 phi = u;
1724 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
1725 Node* m = _memory_nodes[c->in(k)->_idx];
1726 assert(m != nullptr, "expect memory state");
1727 if (u->in(k) != m) {
1728 phi = NodeSentinel;
1729 }
1730 }
1731 }
1732 }
1733 if (phi == NodeSentinel) {
1734 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
1735 for (uint k = 1; k < c->req(); k++) {
1736 Node* m = _memory_nodes[c->in(k)->_idx];
1737 assert(m != nullptr, "expect memory state");
1738 phi->init_req(k, m);
1739 }
1740 }
1741 }
1742 if (phi != nullptr) {
1743 regions.map(c->_idx, phi);
1744 } else {
1745 assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1746 }
1747 }
1748 Node* current_region = regions[c->_idx];
1749 if (current_region != prev_region) {
1750 progress = true;
1751 if (prev_region == prev_mem) {
1752 _memory_nodes.map(c->_idx, current_region);
1753 }
1754 }
1755 } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
1756 Node* m = _memory_nodes[_phase->idom(c)->_idx];
1757 assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
1758 if (m != prev_mem) {
1759 _memory_nodes.map(c->_idx, m);
1760 progress = true;
1761 }
1762 }
1763 #ifdef ASSERT
1764 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); }
1765 #endif
1766 }
1767 }
1768
1769 // Replace existing phi with computed memory state for that region
1770 // if different (could be a new phi or a dominating memory node if
1771 // that phi was found to be useless).
1772 while (dead_phis.size() > 0) {
1773 Node* n = dead_phis.pop();
1774 n->replace_by(_phase->C->top());
1775 n->destruct(&_phase->igvn());
1776 }
1777 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1778 Node* c = rpo_list.at(i);
1779 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1780 Node* n = regions[c->_idx];
1781 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1782 if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
1783 _phase->register_new_node(n, c);
1784 }
1785 }
1786 }
1787 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1788 Node* c = rpo_list.at(i);
1789 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1790 Node* n = regions[c->_idx];
1791 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1792 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
1793 Node* u = c->fast_out(i);
1794 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1795 u != n) {
1796 assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
1797 if (u->adr_type() == TypePtr::BOTTOM) {
1798 fix_memory_uses(u, n, n, c);
1799 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1800 _phase->igvn().replace_node(u, n);
1801 --i; --imax;
1802 }
1803 }
1804 }
1805 }
1806 }
1807 }
1808
1809 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
1810 Node* mem = nullptr;
1811 Node* head = in->in(0);
1812 assert(head->is_Region(), "unexpected infinite loop graph shape");
1813
1814 Node* phi_mem = nullptr;
1815 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
1816 Node* u = head->fast_out(j);
1817 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
1818 if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1819 assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
1820 phi_mem = u;
1821 } else if (u->adr_type() == TypePtr::BOTTOM) {
1822 assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
1823 if (phi_mem == nullptr) {
1824 phi_mem = u;
1825 }
1826 }
1827 }
1828 }
1829 if (phi_mem == nullptr) {
1830 ResourceMark rm;
1831 Node_Stack stack(0);
1832 stack.push(head, 1);
1833 do {
1834 Node* n = stack.node();
1835 uint i = stack.index();
1836 if (i >= n->req()) {
1837 stack.pop();
1838 } else {
1839 stack.set_index(i + 1);
1840 Node* c = n->in(i);
1841 assert(c != head, "should have found a safepoint on the way");
1842 if (stack.size() != 1 || _phase->is_dominator(head, c)) {
1843 for (;;) {
1844 if (c->is_Region()) {
1845 stack.push(c, 1);
1846 break;
1847 } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
1848 Node* m = c->in(TypeFunc::Memory);
1849 if (m->is_MergeMem()) {
1850 m = m->as_MergeMem()->memory_at(_alias);
1851 }
1852 assert(mem == nullptr || mem == m, "several memory states");
1853 mem = m;
1854 break;
1855 } else {
1856 assert(c != c->in(0), "");
1857 c = c->in(0);
1858 }
1859 }
1860 }
1861 }
1862 } while (stack.size() > 0);
1863 assert(mem != nullptr, "should have found safepoint");
1864 } else {
1865 mem = phi_mem;
1866 }
1867 return mem;
1868 }
1869
1870 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
1871 Node* c = _phase->get_ctrl(n);
1872 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
1873 assert(c == n->in(0), "");
1874 CallNode* call = c->as_Call();
1875 CallProjections projs;
1876 call->extract_projections(&projs, true, false);
1877 if (projs.catchall_memproj != nullptr) {
1878 if (projs.fallthrough_memproj == n) {
1879 c = projs.fallthrough_catchproj;
1880 } else {
1881 assert(projs.catchall_memproj == n, "");
1882 c = projs.catchall_catchproj;
1883 }
1884 }
1885 }
1886 return c;
1887 }
1888
1889 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
1890 if (_phase->has_ctrl(n))
1891 return get_ctrl(n);
1892 else {
1893 assert (n->is_CFG(), "must be a CFG node");
1894 return n;
1895 }
1896 }
1897
1898 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
1899 return m != nullptr && get_ctrl(m) == c;
1900 }
1901
1902 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
1903 assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
1904 assert(!ctrl->is_Call() || ctrl == n, "projection expected");
1905 #ifdef ASSERT
1906 if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
1907 (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
1908 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
1909 int mems = 0;
1910 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
1911 Node* u = call->fast_out(i);
1912 if (u->bottom_type() == Type::MEMORY) {
1913 mems++;
1914 }
1915 }
1916 assert(mems <= 1, "No node right after call if multiple mem projections");
1917 }
1918 #endif
1919 Node* mem = _memory_nodes[ctrl->_idx];
1920 Node* c = ctrl;
1921 while (!mem_is_valid(mem, c) &&
1922 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
1923 c = _phase->idom(c);
1924 mem = _memory_nodes[c->_idx];
1925 }
1926 if (n != nullptr && mem_is_valid(mem, c)) {
1927 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
1928 mem = next_mem(mem, _alias);
1929 }
1930 if (mem->is_MergeMem()) {
1931 mem = mem->as_MergeMem()->memory_at(_alias);
1932 }
1933 if (!mem_is_valid(mem, c)) {
1934 do {
1935 c = _phase->idom(c);
1936 mem = _memory_nodes[c->_idx];
1937 } while (!mem_is_valid(mem, c) &&
1938 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
1939 }
1940 }
1941 assert(mem->bottom_type() == Type::MEMORY, "");
1942 return mem;
1943 }
1944
1945 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
1946 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1947 Node* use = region->fast_out(i);
1948 if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
1949 (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
1950 return true;
1951 }
1952 }
1953 return false;
1954 }
1955
1956 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
1957 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
1958 const bool trace = false;
1959 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
1960 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
1961 GrowableArray<Node*> phis;
1962 if (mem_for_ctrl != mem) {
1963 Node* old = mem_for_ctrl;
1964 Node* prev = nullptr;
1965 while (old != mem) {
1966 prev = old;
1967 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
1968 assert(_alias == Compile::AliasIdxRaw, "");
1969 old = old->in(MemNode::Memory);
1970 } else if (old->Opcode() == Op_SCMemProj) {
1971 assert(_alias == Compile::AliasIdxRaw, "");
1972 old = old->in(0);
1973 } else {
1974 ShouldNotReachHere();
1975 }
1976 }
1977 assert(prev != nullptr, "");
1978 if (new_ctrl != ctrl) {
1979 _memory_nodes.map(ctrl->_idx, mem);
1980 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
1981 }
1982 uint input = (uint)MemNode::Memory;
1983 _phase->igvn().replace_input_of(prev, input, new_mem);
1984 } else {
1985 uses.clear();
1986 _memory_nodes.map(new_ctrl->_idx, new_mem);
1987 uses.push(new_ctrl);
1988 for(uint next = 0; next < uses.size(); next++ ) {
1989 Node *n = uses.at(next);
1990 assert(n->is_CFG(), "");
1991 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
1992 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1993 Node* u = n->fast_out(i);
1994 if (!u->is_Root() && u->is_CFG() && u != n) {
1995 Node* m = _memory_nodes[u->_idx];
1996 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
1997 !has_mem_phi(u) &&
1998 u->unique_ctrl_out()->Opcode() != Op_Halt) {
1999 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2000 DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2001
2002 if (!mem_is_valid(m, u) || !m->is_Phi()) {
2003 bool push = true;
2004 bool create_phi = true;
2005 if (_phase->is_dominator(new_ctrl, u)) {
2006 create_phi = false;
2007 }
2008 if (create_phi) {
2009 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2010 _phase->register_new_node(phi, u);
2011 phis.push(phi);
2012 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2013 if (!mem_is_valid(m, u)) {
2014 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2015 _memory_nodes.map(u->_idx, phi);
2016 } else {
2017 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2018 for (;;) {
2019 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2020 Node* next = nullptr;
2021 if (m->is_Proj()) {
2022 next = m->in(0);
2023 } else {
2024 assert(m->is_Mem() || m->is_LoadStore(), "");
2025 assert(_alias == Compile::AliasIdxRaw, "");
2026 next = m->in(MemNode::Memory);
2027 }
2028 if (_phase->get_ctrl(next) != u) {
2029 break;
2030 }
2031 if (next->is_MergeMem()) {
2032 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2033 break;
2034 }
2035 if (next->is_Phi()) {
2036 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2037 break;
2038 }
2039 m = next;
2040 }
2041
2042 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2043 assert(m->is_Mem() || m->is_LoadStore(), "");
2044 uint input = (uint)MemNode::Memory;
2045 _phase->igvn().replace_input_of(m, input, phi);
2046 push = false;
2047 }
2048 } else {
2049 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2050 }
2051 if (push) {
2052 uses.push(u);
2053 }
2054 }
2055 } else if (!mem_is_valid(m, u) &&
2056 !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2057 uses.push(u);
2058 }
2059 }
2060 }
2061 }
2062 for (int i = 0; i < phis.length(); i++) {
2063 Node* n = phis.at(i);
2064 Node* r = n->in(0);
2065 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2066 for (uint j = 1; j < n->req(); j++) {
2067 Node* m = find_mem(r->in(j), nullptr);
2068 _phase->igvn().replace_input_of(n, j, m);
2069 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2070 }
2071 }
2072 }
2073 uint last = _phase->C->unique();
2074 MergeMemNode* mm = nullptr;
2075 int alias = _alias;
2076 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2077 // Process loads first to not miss an anti-dependency: if the memory
2078 // edge of a store is updated before a load is processed then an
2079 // anti-dependency may be missed.
2080 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2081 Node* u = mem->out(i);
2082 if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2083 Node* m = find_mem(_phase->get_ctrl(u), u);
2084 if (m != mem) {
2085 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2086 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2087 --i;
2088 }
2089 }
2090 }
2091 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2092 Node* u = mem->out(i);
2093 if (u->_idx < last) {
2094 if (u->is_Mem()) {
2095 if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2096 Node* m = find_mem(_phase->get_ctrl(u), u);
2097 if (m != mem) {
2098 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2099 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2100 --i;
2101 }
2102 }
2103 } else if (u->is_MergeMem()) {
2104 MergeMemNode* u_mm = u->as_MergeMem();
2105 if (u_mm->memory_at(alias) == mem) {
2106 MergeMemNode* newmm = nullptr;
2107 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2108 Node* uu = u->fast_out(j);
2109 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2110 if (uu->is_Phi()) {
2111 assert(uu->adr_type() == TypePtr::BOTTOM, "");
2112 Node* region = uu->in(0);
2113 int nb = 0;
2114 for (uint k = 1; k < uu->req(); k++) {
2115 if (uu->in(k) == u) {
2116 Node* m = find_mem(region->in(k), nullptr);
2117 if (m != mem) {
2118 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2119 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2120 if (newmm != u) {
2121 _phase->igvn().replace_input_of(uu, k, newmm);
2122 nb++;
2123 --jmax;
2124 }
2125 }
2126 }
2127 }
2128 if (nb > 0) {
2129 --j;
2130 }
2131 } else {
2132 Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2133 if (m != mem) {
2134 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2135 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2136 if (newmm != u) {
2137 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2138 --j, --jmax;
2139 }
2140 }
2141 }
2142 }
2143 }
2144 } else if (u->is_Phi()) {
2145 assert(u->bottom_type() == Type::MEMORY, "what else?");
2146 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2147 Node* region = u->in(0);
2148 bool replaced = false;
2149 for (uint j = 1; j < u->req(); j++) {
2150 if (u->in(j) == mem) {
2151 Node* m = find_mem(region->in(j), nullptr);
2152 Node* nnew = m;
2153 if (m != mem) {
2154 if (u->adr_type() == TypePtr::BOTTOM) {
2155 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2156 nnew = mm;
2157 }
2158 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2159 _phase->igvn().replace_input_of(u, j, nnew);
2160 replaced = true;
2161 }
2162 }
2163 }
2164 if (replaced) {
2165 --i;
2166 }
2167 }
2168 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2169 u->adr_type() == nullptr) {
2170 assert(u->adr_type() != nullptr ||
2171 u->Opcode() == Op_Rethrow ||
2172 u->Opcode() == Op_Return ||
2173 u->Opcode() == Op_SafePoint ||
2174 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2175 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2176 u->Opcode() == Op_CallLeaf, "");
2177 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2178 if (m != mem) {
2179 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2180 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2181 --i;
2182 }
2183 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2184 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2185 if (m != mem) {
2186 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2187 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2188 --i;
2189 }
2190 } else if (u->adr_type() != TypePtr::BOTTOM &&
2191 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2192 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2193 assert(m != mem, "");
2194 // u is on the wrong slice...
2195 assert(u->is_ClearArray(), "");
2196 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2197 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2198 --i;
2199 }
2200 }
2201 }
2202 #ifdef ASSERT
2203 assert(new_mem->outcnt() > 0, "");
2204 for (int i = 0; i < phis.length(); i++) {
2205 Node* n = phis.at(i);
2206 assert(n->outcnt() > 0, "new phi must have uses now");
2207 }
2208 #endif
2209 }
2210
2211 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2212 if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2213 _memory_nodes.map(ctrl->_idx, mem);
2214 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2215 }
2216 }
2217
2218 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2219 MergeMemNode* mm = MergeMemNode::make(mem);
2220 mm->set_memory_at(_alias, rep_proj);
2221 _phase->register_new_node(mm, rep_ctrl);
2222 return mm;
2223 }
2224
2225 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2226 MergeMemNode* newmm = nullptr;
2227 MergeMemNode* u_mm = u->as_MergeMem();
2228 Node* c = _phase->get_ctrl(u);
2229 if (_phase->is_dominator(c, rep_ctrl)) {
2230 c = rep_ctrl;
2231 } else {
2232 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2233 }
2234 if (u->outcnt() == 1) {
2235 if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2236 _phase->igvn().replace_input_of(u, _alias, rep_proj);
2237 --i;
2238 } else {
2239 _phase->igvn().rehash_node_delayed(u);
2240 u_mm->set_memory_at(_alias, rep_proj);
2241 }
2242 newmm = u_mm;
2243 _phase->set_ctrl_and_loop(u, c);
2244 } else {
2245 // can't simply clone u and then change one of its input because
2246 // it adds and then removes an edge which messes with the
2247 // DUIterator
2248 newmm = MergeMemNode::make(u_mm->base_memory());
2249 for (uint j = 0; j < u->req(); j++) {
2250 if (j < newmm->req()) {
2251 if (j == (uint)_alias) {
2252 newmm->set_req(j, rep_proj);
2253 } else if (newmm->in(j) != u->in(j)) {
2254 newmm->set_req(j, u->in(j));
2255 }
2256 } else if (j == (uint)_alias) {
2257 newmm->add_req(rep_proj);
2258 } else {
2259 newmm->add_req(u->in(j));
2260 }
2261 }
2262 if ((uint)_alias >= u->req()) {
2263 newmm->set_memory_at(_alias, rep_proj);
2264 }
2265 _phase->register_new_node(newmm, c);
2266 }
2267 return newmm;
2268 }
2269
2270 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2271 if (phi->adr_type() == TypePtr::BOTTOM) {
2272 Node* region = phi->in(0);
2273 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2274 Node* uu = region->fast_out(j);
2275 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2276 return false;
2277 }
2278 }
2279 return true;
2280 }
2281 return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2282 }
2283
2284 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2285 uint last = _phase-> C->unique();
2286 MergeMemNode* mm = nullptr;
2287 assert(mem->bottom_type() == Type::MEMORY, "");
2288 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2289 Node* u = mem->out(i);
2290 if (u != replacement && u->_idx < last) {
2291 if (u->is_MergeMem()) {
2292 MergeMemNode* u_mm = u->as_MergeMem();
2293 if (u_mm->memory_at(_alias) == mem) {
2294 MergeMemNode* newmm = nullptr;
2295 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2296 Node* uu = u->fast_out(j);
2297 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2298 if (uu->is_Phi()) {
2299 if (should_process_phi(uu)) {
2300 Node* region = uu->in(0);
2301 int nb = 0;
2302 for (uint k = 1; k < uu->req(); k++) {
2303 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2304 if (newmm == nullptr) {
2305 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2306 }
2307 if (newmm != u) {
2308 _phase->igvn().replace_input_of(uu, k, newmm);
2309 nb++;
2310 --jmax;
2311 }
2312 }
2313 }
2314 if (nb > 0) {
2315 --j;
2316 }
2317 }
2318 } else {
2319 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2320 if (newmm == nullptr) {
2321 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2322 }
2323 if (newmm != u) {
2324 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2325 --j, --jmax;
2326 }
2327 }
2328 }
2329 }
2330 }
2331 } else if (u->is_Phi()) {
2332 assert(u->bottom_type() == Type::MEMORY, "what else?");
2333 Node* region = u->in(0);
2334 if (should_process_phi(u)) {
2335 bool replaced = false;
2336 for (uint j = 1; j < u->req(); j++) {
2337 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2338 Node* nnew = rep_proj;
2339 if (u->adr_type() == TypePtr::BOTTOM) {
2340 if (mm == nullptr) {
2341 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2342 }
2343 nnew = mm;
2344 }
2345 _phase->igvn().replace_input_of(u, j, nnew);
2346 replaced = true;
2347 }
2348 }
2349 if (replaced) {
2350 --i;
2351 }
2352
2353 }
2354 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2355 u->adr_type() == nullptr) {
2356 assert(u->adr_type() != nullptr ||
2357 u->Opcode() == Op_Rethrow ||
2358 u->Opcode() == Op_Return ||
2359 u->Opcode() == Op_SafePoint ||
2360 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2361 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2362 u->Opcode() == Op_CallLeaf, "%s", u->Name());
2363 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2364 if (mm == nullptr) {
2365 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2366 }
2367 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2368 --i;
2369 }
2370 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2371 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2372 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2373 --i;
2374 }
2375 }
2376 }
2377 }
2378 }
2379
2380 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2381 : Node(ctrl, obj), _decorators(decorators) {
2382 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2383 }
2384
2385 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2386 return _decorators;
2387 }
2388
2389 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2390 return sizeof(*this);
2391 }
2392
2393 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2394 return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2395 }
2396
2397 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2398 uint hash = Node::hash();
2399 hash += mask_decorators(_decorators);
2400 return hash;
2401 }
2402
2403 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2404 return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2405 mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2406 }
2407
2408 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2409 if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2410 return Type::TOP;
2411 }
2412 const Type* t = in(ValueIn)->bottom_type();
2413 if (t == TypePtr::NULL_PTR) {
2414 return t;
2415 }
2416
2417 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2418 return t;
2419 }
2420
2421 return t->meet(TypePtr::NULL_PTR);
2422 }
2423
2424 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2425 // Either input is TOP ==> the result is TOP
2426 const Type *t2 = phase->type(in(ValueIn));
2427 if( t2 == Type::TOP ) return Type::TOP;
2428
2429 if (t2 == TypePtr::NULL_PTR) {
2430 return t2;
2431 }
2432
2433 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2434 return t2;
2435 }
2436
2437 return t2->meet(TypePtr::NULL_PTR);
2438 }
2439
2440 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2441 Node* value = in(ValueIn);
2442 if (!needs_barrier(phase, value)) {
2443 return value;
2444 }
2445 return this;
2446 }
2447
2448 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2449 Unique_Node_List visited;
2450 return needs_barrier_impl(phase, n, visited);
2451 }
2452
2453 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2454 if (n == nullptr) return false;
2455 if (visited.member(n)) {
2456 return false; // Been there.
2457 }
2458 visited.push(n);
2459
2460 if (n->is_Allocate()) {
2461 // tty->print_cr("optimize barrier on alloc");
2462 return false;
2463 }
2464 if (n->is_Call()) {
2465 // tty->print_cr("optimize barrier on call");
2466 return false;
2467 }
2468
2469 const Type* type = phase->type(n);
2470 if (type == Type::TOP) {
2471 return false;
2472 }
2473 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2474 // tty->print_cr("optimize barrier on null");
2475 return false;
2476 }
2477 if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2478 // tty->print_cr("optimize barrier on constant");
2479 return false;
2480 }
2481
2482 switch (n->Opcode()) {
2483 case Op_AddP:
2484 return true; // TODO: Can refine?
2485 case Op_LoadP:
2486 case Op_ShenandoahCompareAndExchangeN:
2487 case Op_ShenandoahCompareAndExchangeP:
2488 case Op_CompareAndExchangeN:
2489 case Op_CompareAndExchangeP:
2490 case Op_GetAndSetN:
2491 case Op_GetAndSetP:
2492 return true;
2493 case Op_Phi: {
2494 for (uint i = 1; i < n->req(); i++) {
2495 if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2496 }
2497 return false;
2498 }
2499 case Op_CheckCastPP:
2500 case Op_CastPP:
2501 return needs_barrier_impl(phase, n->in(1), visited);
2502 case Op_Proj:
2503 return needs_barrier_impl(phase, n->in(0), visited);
2504 case Op_ShenandoahLoadReferenceBarrier:
2505 // tty->print_cr("optimize barrier on barrier");
2506 return false;
2507 case Op_Parm:
2508 // tty->print_cr("optimize barrier on input arg");
2509 return false;
2510 case Op_DecodeN:
2511 case Op_EncodeP:
2512 return needs_barrier_impl(phase, n->in(1), visited);
2513 case Op_LoadN:
2514 return true;
2515 case Op_CMoveN:
2516 case Op_CMoveP:
2517 return needs_barrier_impl(phase, n->in(2), visited) ||
2518 needs_barrier_impl(phase, n->in(3), visited);
2519 case Op_CreateEx:
2520 return false;
2521 default:
2522 break;
2523 }
2524 #ifdef ASSERT
2525 tty->print("need barrier on?: ");
2526 tty->print_cr("ins:");
2527 n->dump(2);
2528 tty->print_cr("outs:");
2529 n->dump(-2);
2530 ShouldNotReachHere();
2531 #endif
2532 return true;
2533 }