1 /*
2 * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022, Tencent. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
30 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
31 #include "gc/shenandoah/shenandoahForwarding.hpp"
32 #include "gc/shenandoah/shenandoahHeap.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahRuntime.hpp"
35 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
36 #include "opto/arraycopynode.hpp"
37 #include "opto/block.hpp"
38 #include "opto/callnode.hpp"
39 #include "opto/castnode.hpp"
40 #include "opto/movenode.hpp"
41 #include "opto/phaseX.hpp"
42 #include "opto/rootnode.hpp"
43 #include "opto/runtime.hpp"
44 #include "opto/subnode.hpp"
45
46 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
47 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
48 if (state->load_reference_barriers_count() > 0) {
49 assert(C->post_loop_opts_phase(), "no loop opts allowed");
50 C->reset_post_loop_opts_phase(); // ... but we know what we are doing
51 C->clear_major_progress();
52 PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
53 if (C->failing()) return false;
54 C->process_for_post_loop_opts_igvn(igvn);
55 if (C->failing()) return false;
56
57 C->set_post_loop_opts_phase(); // now for real!
58 }
59 return true;
60 }
61
62 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
63 if (!UseShenandoahGC) {
64 return false;
65 }
66 assert(iff->is_If(), "bad input");
67 if (iff->Opcode() != Op_If) {
68 return false;
69 }
70 Node* bol = iff->in(1);
71 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
72 return false;
73 }
74 Node* cmp = bol->in(1);
75 if (cmp->Opcode() != Op_CmpI) {
76 return false;
77 }
78 Node* in1 = cmp->in(1);
79 Node* in2 = cmp->in(2);
80 if (in2->find_int_con(-1) != 0) {
81 return false;
82 }
83 if (in1->Opcode() != Op_AndI) {
84 return false;
85 }
86 in2 = in1->in(2);
87 if (in2->find_int_con(-1) != mask) {
88 return false;
89 }
90 in1 = in1->in(1);
91
92 return is_gc_state_load(in1);
93 }
94
95 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
96 return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
97 }
98
99 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
100 if (!UseShenandoahGC) {
101 return false;
102 }
103 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
104 return false;
105 }
106 Node* addp = n->in(MemNode::Address);
107 if (!addp->is_AddP()) {
108 return false;
109 }
110 Node* base = addp->in(AddPNode::Address);
111 Node* off = addp->in(AddPNode::Offset);
112 if (base->Opcode() != Op_ThreadLocal) {
113 return false;
114 }
115 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
116 return false;
117 }
118 return true;
119 }
120
121 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
122 assert(phase->is_dominator(stop, start), "bad inputs");
123 ResourceMark rm;
124 Unique_Node_List wq;
125 wq.push(start);
126 for (uint next = 0; next < wq.size(); next++) {
127 Node *m = wq.at(next);
128 if (m == stop) {
129 continue;
130 }
131 if (m->is_SafePoint() && !m->is_CallLeaf()) {
132 return true;
133 }
134 if (m->is_Region()) {
135 for (uint i = 1; i < m->req(); i++) {
136 wq.push(m->in(i));
137 }
138 } else {
139 wq.push(m->in(0));
140 }
141 }
142 return false;
143 }
144
145 #ifdef ASSERT
146 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
147 assert(phis.size() == 0, "");
148
149 while (true) {
150 if (in->bottom_type() == TypePtr::NULL_PTR) {
151 if (trace) {tty->print_cr("null");}
152 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
153 if (trace) {tty->print_cr("Non oop");}
154 } else {
155 if (in->is_ConstraintCast()) {
156 in = in->in(1);
157 continue;
158 } else if (in->is_AddP()) {
159 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
160 in = in->in(AddPNode::Address);
161 continue;
162 } else if (in->is_Con()) {
163 if (trace) {
164 tty->print("Found constant");
165 in->dump();
166 }
167 } else if (in->Opcode() == Op_Parm) {
168 if (trace) {
169 tty->print("Found argument");
170 }
171 } else if (in->Opcode() == Op_CreateEx) {
172 if (trace) {
173 tty->print("Found create-exception");
174 }
175 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
176 if (trace) {
177 tty->print("Found raw LoadP (OSR argument?)");
178 }
179 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
180 if (t == ShenandoahOopStore) {
181 return false;
182 }
183 barriers_used.push(in);
184 if (trace) {tty->print("Found barrier"); in->dump();}
185 } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
186 if (trace) {
187 tty->print("Found alloc");
188 in->in(0)->dump();
189 }
190 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
191 if (trace) {
192 tty->print("Found Java call");
193 }
194 } else if (in->is_Phi()) {
195 if (!visited.test_set(in->_idx)) {
196 if (trace) {tty->print("Pushed phi:"); in->dump();}
197 phis.push(in, 2);
198 in = in->in(1);
199 continue;
200 }
201 if (trace) {tty->print("Already seen phi:"); in->dump();}
202 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
203 if (!visited.test_set(in->_idx)) {
204 if (trace) {tty->print("Pushed cmovep:"); in->dump();}
205 phis.push(in, CMoveNode::IfTrue);
206 in = in->in(CMoveNode::IfFalse);
207 continue;
208 }
209 if (trace) {tty->print("Already seen cmovep:"); in->dump();}
210 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
211 in = in->in(1);
212 continue;
213 } else {
214 return false;
215 }
216 }
217 bool cont = false;
218 while (phis.is_nonempty()) {
219 uint idx = phis.index();
220 Node* phi = phis.node();
221 if (idx >= phi->req()) {
222 if (trace) {tty->print("Popped phi:"); phi->dump();}
223 phis.pop();
224 continue;
225 }
226 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
227 in = phi->in(idx);
228 phis.set_index(idx+1);
229 cont = true;
230 break;
231 }
232 if (!cont) {
233 break;
234 }
235 }
236 return true;
237 }
238
239 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
240 if (n1 != nullptr) {
241 n1->dump(+10);
242 }
243 if (n2 != nullptr) {
244 n2->dump(+10);
245 }
246 fatal("%s", msg);
247 }
248
249 void ShenandoahBarrierC2Support::verify(RootNode* root) {
250 ResourceMark rm;
251 Unique_Node_List wq;
252 GrowableArray<Node*> barriers;
253 Unique_Node_List barriers_used;
254 Node_Stack phis(0);
255 VectorSet visited;
256 const bool trace = false;
257 const bool verify_no_useless_barrier = false;
258
259 wq.push(root);
260 for (uint next = 0; next < wq.size(); next++) {
261 Node *n = wq.at(next);
262 if (n->is_Load()) {
263 const bool trace = false;
264 if (trace) {tty->print("Verifying"); n->dump();}
265 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
266 if (trace) {tty->print_cr("Load range/klass");}
267 } else {
268 const TypePtr* adr_type = n->as_Load()->adr_type();
269
270 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
271 if (trace) {tty->print_cr("Mark load");}
272 } else if (adr_type->isa_instptr() &&
273 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
274 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
275 if (trace) {tty->print_cr("Reference.get()");}
276 } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
277 report_verify_failure("Shenandoah verification: Load should have barriers", n);
278 }
279 }
280 } else if (n->is_Store()) {
281 const bool trace = false;
282
283 if (trace) {tty->print("Verifying"); n->dump();}
284 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
285 Node* adr = n->in(MemNode::Address);
286 bool verify = true;
287
288 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
289 adr = adr->in(AddPNode::Address);
290 if (adr->is_AddP()) {
291 assert(adr->in(AddPNode::Base)->is_top(), "");
292 adr = adr->in(AddPNode::Address);
293 if (adr->Opcode() == Op_LoadP &&
294 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
295 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
296 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
297 if (trace) {tty->print_cr("SATB prebarrier");}
298 verify = false;
299 }
300 }
301 }
302
303 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
304 report_verify_failure("Shenandoah verification: Store should have barriers", n);
305 }
306 }
307 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
308 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
309 }
310 } else if (n->Opcode() == Op_CmpP) {
311 const bool trace = false;
312
313 Node* in1 = n->in(1);
314 Node* in2 = n->in(2);
315 if (in1->bottom_type()->isa_oopptr()) {
316 if (trace) {tty->print("Verifying"); n->dump();}
317
318 bool mark_inputs = false;
319 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
320 (in1->is_Con() || in2->is_Con())) {
321 if (trace) {tty->print_cr("Comparison against a constant");}
322 mark_inputs = true;
323 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
324 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
325 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
326 mark_inputs = true;
327 } else {
328 assert(in2->bottom_type()->isa_oopptr(), "");
329
330 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
331 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
332 report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
333 }
334 }
335 if (verify_no_useless_barrier &&
336 mark_inputs &&
337 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
338 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
339 phis.clear();
340 visited.reset();
341 }
342 }
343 } else if (n->is_LoadStore()) {
344 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
345 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
346 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
347 }
348
349 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
350 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
351 }
352 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
353 CallNode* call = n->as_Call();
354
355 static struct {
356 const char* name;
357 struct {
358 int pos;
359 verify_type t;
360 } args[6];
361 } calls[] = {
362 "array_partition_stub",
363 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore }, { -1, ShenandoahNone },
364 { -1, ShenandoahNone }, { -1, ShenandoahNone }, { -1, ShenandoahNone } },
365 "arraysort_stub",
366 { { TypeFunc::Parms, ShenandoahStore }, { -1, ShenandoahNone }, { -1, ShenandoahNone },
367 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
368 "aescrypt_encryptBlock",
369 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
370 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
371 "aescrypt_decryptBlock",
372 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
373 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
374 "multiplyToLen",
375 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore },
376 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
377 "squareToLen",
378 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone},
379 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
380 "montgomery_multiply",
381 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
382 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
383 "montgomery_square",
384 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore },
385 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
386 "mulAdd",
387 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
388 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
389 "vectorizedMismatch",
390 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
391 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
392 "updateBytesCRC32",
393 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
394 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
395 "updateBytesAdler32",
396 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
397 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
398 "updateBytesCRC32C",
399 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone},
400 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
401 "counterMode_AESCrypt",
402 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
403 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
404 "cipherBlockChaining_encryptAESCrypt",
405 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
406 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
407 "cipherBlockChaining_decryptAESCrypt",
408 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
409 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
410 "shenandoah_clone",
411 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
412 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
413 "ghash_processBlocks",
414 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
415 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
416 "sha1_implCompress",
417 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
418 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
419 "sha256_implCompress",
420 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
421 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
422 "sha512_implCompress",
423 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
424 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
425 "sha1_implCompressMB",
426 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
427 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
428 "sha256_implCompressMB",
429 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
430 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
431 "sha512_implCompressMB",
432 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
433 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
434 "encodeBlock",
435 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
436 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
437 "decodeBlock",
438 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
439 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
440 "intpoly_montgomeryMult_P256",
441 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahStore },
442 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
443 "intpoly_assign",
444 { { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone },
445 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
446 };
447
448 if (call->is_call_to_arraycopystub()) {
449 Node* dest = nullptr;
450 const TypeTuple* args = n->as_Call()->_tf->domain();
451 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
452 if (args->field_at(i)->isa_ptr()) {
453 j++;
454 if (j == 2) {
455 dest = n->in(i);
456 break;
457 }
458 }
459 }
460 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
461 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
462 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
463 }
464 } else if (strlen(call->_name) > 5 &&
465 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
466 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
467 report_verify_failure("Shenandoah verification: _fill should have barriers", n);
468 }
469 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
470 // skip
471 } else {
472 const int calls_len = sizeof(calls) / sizeof(calls[0]);
473 int i = 0;
474 for (; i < calls_len; i++) {
475 if (!strcmp(calls[i].name, call->_name)) {
476 break;
477 }
478 }
479 if (i != calls_len) {
480 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
481 for (uint j = 0; j < args_len; j++) {
482 int pos = calls[i].args[j].pos;
483 if (pos == -1) {
484 break;
485 }
486 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
487 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
488 }
489 }
490 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
491 if (call->in(j)->bottom_type()->make_ptr() &&
492 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
493 uint k = 0;
494 for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
495 if (k == args_len) {
496 fatal("arg %d for call %s not covered", j, call->_name);
497 }
498 }
499 }
500 } else {
501 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
502 if (call->in(j)->bottom_type()->make_ptr() &&
503 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
504 fatal("%s not covered", call->_name);
505 }
506 }
507 }
508 }
509 } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
510 // skip
511 } else if (n->is_AddP()
512 || n->is_Phi()
513 || n->is_ConstraintCast()
514 || n->Opcode() == Op_Return
515 || n->Opcode() == Op_CMoveP
516 || n->Opcode() == Op_CMoveN
517 || n->Opcode() == Op_Rethrow
518 || n->is_MemBar()
519 || n->Opcode() == Op_Conv2B
520 || n->Opcode() == Op_SafePoint
521 || n->is_CallJava()
522 || n->Opcode() == Op_Unlock
523 || n->Opcode() == Op_EncodeP
524 || n->Opcode() == Op_DecodeN) {
525 // nothing to do
526 } else {
527 static struct {
528 int opcode;
529 struct {
530 int pos;
531 verify_type t;
532 } inputs[2];
533 } others[] = {
534 Op_FastLock,
535 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
536 Op_Lock,
537 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} },
538 Op_ArrayCopy,
539 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
540 Op_StrCompressedCopy,
541 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
542 Op_StrInflatedCopy,
543 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
544 Op_AryEq,
545 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
546 Op_StrIndexOf,
547 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
548 Op_StrComp,
549 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
550 Op_StrEquals,
551 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
552 Op_VectorizedHashCode,
553 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
554 Op_EncodeISOArray,
555 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
556 Op_CountPositives,
557 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} },
558 Op_CastP2X,
559 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
560 Op_StrIndexOfChar,
561 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
562 };
563
564 const int others_len = sizeof(others) / sizeof(others[0]);
565 int i = 0;
566 for (; i < others_len; i++) {
567 if (others[i].opcode == n->Opcode()) {
568 break;
569 }
570 }
571 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
572 if (i != others_len) {
573 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
574 for (uint j = 0; j < inputs_len; j++) {
575 int pos = others[i].inputs[j].pos;
576 if (pos == -1) {
577 break;
578 }
579 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
580 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
581 }
582 }
583 for (uint j = 1; j < stop; j++) {
584 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
585 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
586 uint k = 0;
587 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
588 if (k == inputs_len) {
589 fatal("arg %d for node %s not covered", j, n->Name());
590 }
591 }
592 }
593 } else {
594 for (uint j = 1; j < stop; j++) {
595 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
596 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
597 fatal("%s not covered", n->Name());
598 }
599 }
600 }
601 }
602
603 if (n->is_SafePoint()) {
604 SafePointNode* sfpt = n->as_SafePoint();
605 if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
606 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
607 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
608 phis.clear();
609 visited.reset();
610 }
611 }
612 }
613 }
614 }
615
616 if (verify_no_useless_barrier) {
617 for (int i = 0; i < barriers.length(); i++) {
618 Node* n = barriers.at(i);
619 if (!barriers_used.member(n)) {
620 tty->print("XXX useless barrier"); n->dump(-2);
621 ShouldNotReachHere();
622 }
623 }
624 }
625 }
626 #endif
627
628 bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
629 Node* control) {
630 return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
631 phase->ctrl_or_self(maybe_load) == control;
632 }
633
634 void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
635 if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
636 return;
637 }
638 Node* mem = maybe_store->in(MemNode::Memory);
639 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
640 Node* u = mem->fast_out(i);
641 if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
642 wq.push(u);
643 }
644 }
645 }
646
647 void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
648 for (uint i = 0; i < n->req(); i++) {
649 Node* in = n->in(i);
650 if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
651 wq.push(in);
652 }
653 }
654 }
655
656 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
657 // That both nodes have the same control is not sufficient to prove
658 // domination, verify that there's no path from d to n
659 ResourceMark rm;
660 Unique_Node_List wq;
661 wq.push(d);
662 for (uint next = 0; next < wq.size(); next++) {
663 Node *m = wq.at(next);
664 if (m == n) {
665 return false;
666 }
667 if (m->is_Phi() && m->in(0)->is_Loop()) {
668 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
669 } else {
670 // Take anti-dependencies into account
671 maybe_push_anti_dependent_loads(phase, m, c, wq);
672 push_data_inputs_at_control(phase, m, c, wq);
673 }
674 }
675 return true;
676 }
677
678 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
679 if (d_c != n_c) {
680 return phase->is_dominator(d_c, n_c);
681 }
682 return is_dominator_same_ctrl(d_c, d, n, phase);
683 }
684
685 Node* next_mem(Node* mem, int alias) {
686 Node* res = nullptr;
687 if (mem->is_Proj()) {
688 res = mem->in(0);
689 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
690 res = mem->in(TypeFunc::Memory);
691 } else if (mem->is_Phi()) {
692 res = mem->in(1);
693 } else if (mem->is_MergeMem()) {
694 res = mem->as_MergeMem()->memory_at(alias);
695 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
696 assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
697 res = mem->in(MemNode::Memory);
698 } else {
699 #ifdef ASSERT
700 mem->dump();
701 #endif
702 ShouldNotReachHere();
703 }
704 return res;
705 }
706
707 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
708 Node* iffproj = nullptr;
709 while (c != dom) {
710 Node* next = phase->idom(c);
711 assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
712 if (c->is_Region()) {
713 ResourceMark rm;
714 Unique_Node_List wq;
715 wq.push(c);
716 for (uint i = 0; i < wq.size(); i++) {
717 Node *n = wq.at(i);
718 if (n == next) {
719 continue;
720 }
721 if (n->is_Region()) {
722 for (uint j = 1; j < n->req(); j++) {
723 wq.push(n->in(j));
724 }
725 } else {
726 wq.push(n->in(0));
727 }
728 }
729 for (uint i = 0; i < wq.size(); i++) {
730 Node *n = wq.at(i);
731 assert(n->is_CFG(), "");
732 if (n->is_Multi()) {
733 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
734 Node* u = n->fast_out(j);
735 if (u->is_CFG()) {
736 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj()) {
737 return NodeSentinel;
738 }
739 }
740 }
741 }
742 }
743 } else if (c->is_Proj()) {
744 if (c->is_IfProj()) {
745 if (c->as_Proj()->is_uncommon_trap_if_pattern() != nullptr) {
746 // continue;
747 } else {
748 if (!allow_one_proj) {
749 return NodeSentinel;
750 }
751 if (iffproj == nullptr) {
752 iffproj = c;
753 } else {
754 return NodeSentinel;
755 }
756 }
757 } else if (c->Opcode() == Op_JumpProj) {
758 return NodeSentinel; // unsupported
759 } else if (c->Opcode() == Op_CatchProj) {
760 return NodeSentinel; // unsupported
761 } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
762 return NodeSentinel; // unsupported
763 } else {
764 assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
765 }
766 }
767 c = next;
768 }
769 return iffproj;
770 }
771
772 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
773 ResourceMark rm;
774 VectorSet wq;
775 wq.set(mem->_idx);
776 mem_ctrl = phase->ctrl_or_self(mem);
777 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
778 mem = next_mem(mem, alias);
779 if (wq.test_set(mem->_idx)) {
780 return nullptr;
781 }
782 mem_ctrl = phase->ctrl_or_self(mem);
783 }
784 if (mem->is_MergeMem()) {
785 mem = mem->as_MergeMem()->memory_at(alias);
786 mem_ctrl = phase->ctrl_or_self(mem);
787 }
788 return mem;
789 }
790
791 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
792 Node* mem = nullptr;
793 Node* c = ctrl;
794 do {
795 if (c->is_Region()) {
796 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
797 Node* u = c->fast_out(i);
798 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
799 if (u->adr_type() == TypePtr::BOTTOM) {
800 mem = u;
801 }
802 }
803 }
804 } else {
805 if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
806 CallProjections projs;
807 c->as_Call()->extract_projections(&projs, true, false);
808 if (projs.fallthrough_memproj != nullptr) {
809 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
810 if (projs.catchall_memproj == nullptr) {
811 mem = projs.fallthrough_memproj;
812 } else {
813 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
814 mem = projs.fallthrough_memproj;
815 } else {
816 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
817 mem = projs.catchall_memproj;
818 }
819 }
820 }
821 } else {
822 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
823 if (proj != nullptr &&
824 proj->adr_type() == TypePtr::BOTTOM) {
825 mem = proj;
826 }
827 }
828 } else {
829 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
830 Node* u = c->fast_out(i);
831 if (u->is_Proj() &&
832 u->bottom_type() == Type::MEMORY &&
833 u->adr_type() == TypePtr::BOTTOM) {
834 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
835 assert(mem == nullptr, "only one proj");
836 mem = u;
837 }
838 }
839 assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
840 }
841 }
842 c = phase->idom(c);
843 } while (mem == nullptr);
844 return mem;
845 }
846
847 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
848 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
849 Node* u = n->fast_out(i);
850 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
851 uses.push(u);
852 }
853 }
854 }
855
856 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
857 OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
858 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
859 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
860 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
861 phase->register_control(new_le, phase->get_loop(le), le->in(0));
862 phase->replace_node_and_forward_ctrl(outer, new_outer);
863 phase->replace_node_and_forward_ctrl(le, new_le);
864 inner->clear_strip_mined();
865 }
866
867 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
868 PhaseIdealLoop* phase, int flags) {
869 PhaseIterGVN& igvn = phase->igvn();
870 Node* old_ctrl = ctrl;
871
872 Node* thread = new ThreadLocalNode();
873 Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
874 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset);
875 Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
876 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
877 TypeInt::BYTE, MemNode::unordered);
878 Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
879 Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
880 Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne);
881
882 IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
883 ctrl = new IfTrueNode(gc_state_iff);
884 test_fail_ctrl = new IfFalseNode(gc_state_iff);
885
886 IdealLoopTree* loop = phase->get_loop(old_ctrl);
887 phase->register_control(gc_state_iff, loop, old_ctrl);
888 phase->register_control(ctrl, loop, gc_state_iff);
889 phase->register_control(test_fail_ctrl, loop, gc_state_iff);
890
891 phase->register_new_node(thread, old_ctrl);
892 phase->register_new_node(gc_state_addr, old_ctrl);
893 phase->register_new_node(gc_state, old_ctrl);
894 phase->register_new_node(gc_state_and, old_ctrl);
895 phase->register_new_node(gc_state_cmp, old_ctrl);
896 phase->register_new_node(gc_state_bool, old_ctrl);
897
898 phase->set_root_as_ctrl(gc_state_offset);
899
900 assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
901 }
902
903 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
904 Node* old_ctrl = ctrl;
905 PhaseIterGVN& igvn = phase->igvn();
906
907 const Type* val_t = igvn.type(val);
908 if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
909 Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT));
910 Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
911
912 IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
913 ctrl = new IfTrueNode(null_iff);
914 null_ctrl = new IfFalseNode(null_iff);
915
916 IdealLoopTree* loop = phase->get_loop(old_ctrl);
917 phase->register_control(null_iff, loop, old_ctrl);
918 phase->register_control(ctrl, loop, null_iff);
919 phase->register_control(null_ctrl, loop, null_iff);
920
921 phase->register_new_node(null_cmp, old_ctrl);
922 phase->register_new_node(null_test, old_ctrl);
923 }
924 }
925
926 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
927 Node* old_ctrl = ctrl;
928 PhaseIterGVN& igvn = phase->igvn();
929
930 Node* raw_val = new CastP2XNode(old_ctrl, val);
931 Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
932
933 // Figure out the target cset address with raw pointer math.
934 // This avoids matching AddP+LoadB that would emit inefficient code.
935 // See JDK-8245465.
936 Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
937 Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr);
938 Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
939 Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
940
941 Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
942 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
943 TypeInt::BYTE, MemNode::unordered);
944 Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
945 Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
946
947 IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
948 ctrl = new IfTrueNode(cset_iff);
949 not_cset_ctrl = new IfFalseNode(cset_iff);
950
951 IdealLoopTree *loop = phase->get_loop(old_ctrl);
952 phase->register_control(cset_iff, loop, old_ctrl);
953 phase->register_control(ctrl, loop, cset_iff);
954 phase->register_control(not_cset_ctrl, loop, cset_iff);
955
956 phase->set_root_as_ctrl(cset_addr_ptr);
957
958 phase->register_new_node(raw_val, old_ctrl);
959 phase->register_new_node(cset_idx, old_ctrl);
960 phase->register_new_node(cset_addr, old_ctrl);
961 phase->register_new_node(cset_load_addr, old_ctrl);
962 phase->register_new_node(cset_load_ptr, old_ctrl);
963 phase->register_new_node(cset_load, old_ctrl);
964 phase->register_new_node(cset_cmp, old_ctrl);
965 phase->register_new_node(cset_bool, old_ctrl);
966 }
967
968 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
969 DecoratorSet decorators, PhaseIdealLoop* phase) {
970 IdealLoopTree*loop = phase->get_loop(ctrl);
971 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
972
973 address calladdr = nullptr;
974 const char* name = nullptr;
975 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
976 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
977 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
978 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
979 bool is_narrow = UseCompressedOops && !is_native;
980 if (is_strong) {
981 if (is_narrow) {
982 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
983 name = "load_reference_barrier_strong_narrow";
984 } else {
985 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
986 name = "load_reference_barrier_strong";
987 }
988 } else if (is_weak) {
989 if (is_narrow) {
990 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
991 name = "load_reference_barrier_weak_narrow";
992 } else {
993 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
994 name = "load_reference_barrier_weak";
995 }
996 } else {
997 assert(is_phantom, "only remaining strength");
998 if (is_narrow) {
999 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1000 name = "load_reference_barrier_phantom_narrow";
1001 } else {
1002 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1003 name = "load_reference_barrier_phantom";
1004 }
1005 }
1006 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1007
1008 call->init_req(TypeFunc::Control, ctrl);
1009 call->init_req(TypeFunc::I_O, phase->C->top());
1010 call->init_req(TypeFunc::Memory, phase->C->top());
1011 call->init_req(TypeFunc::FramePtr, phase->C->top());
1012 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1013 call->init_req(TypeFunc::Parms, val);
1014 call->init_req(TypeFunc::Parms+1, load_addr);
1015 phase->register_control(call, loop, ctrl);
1016 ctrl = new ProjNode(call, TypeFunc::Control);
1017 phase->register_control(ctrl, loop, call);
1018 val = new ProjNode(call, TypeFunc::Parms);
1019 phase->register_new_node(val, call);
1020 val = new CheckCastPPNode(ctrl, val, obj_type);
1021 phase->register_new_node(val, ctrl);
1022 }
1023
1024 void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
1025 nodes_above_barrier.clear();
1026 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1027 nodes_above_barrier.push(init_raw_mem);
1028 }
1029 for (uint next = 0; next < nodes_above_barrier.size(); next++) {
1030 Node* n = nodes_above_barrier.at(next);
1031 // Take anti-dependencies into account
1032 maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
1033 push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
1034 }
1035 }
1036
1037 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
1038 Node* ctrl = phase->get_ctrl(barrier);
1039 Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1040
1041 // Update the control of all nodes that should be after the
1042 // barrier control flow
1043 uses.clear();
1044 // Every node that is control dependent on the barrier's input
1045 // control will be after the expanded barrier. The raw memory (if
1046 // its memory is control dependent on the barrier's input control)
1047 // must stay above the barrier.
1048 collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
1049 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1050 Node* u = ctrl->fast_out(i);
1051 if (u->_idx < last &&
1052 u != barrier &&
1053 !u->depends_only_on_test() && // preserve dependency on test
1054 !nodes_above_barrier.member(u) &&
1055 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1056 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1057 Node* old_c = phase->ctrl_or_self(u);
1058 if (old_c != ctrl ||
1059 is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1060 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1061 phase->igvn().rehash_node_delayed(u);
1062 int nb = u->replace_edge(ctrl, region, &phase->igvn());
1063 if (u->is_CFG()) {
1064 if (phase->idom(u) == ctrl) {
1065 phase->set_idom(u, region, phase->dom_depth(region));
1066 }
1067 } else if (phase->get_ctrl(u) == ctrl) {
1068 assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1069 uses.push(u);
1070 }
1071 assert(nb == 1, "more than 1 ctrl input?");
1072 --i, imax -= nb;
1073 }
1074 }
1075 }
1076 }
1077
1078 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1079 Node* region = nullptr;
1080 while (c != ctrl) {
1081 if (c->is_Region()) {
1082 region = c;
1083 }
1084 c = phase->idom(c);
1085 }
1086 assert(region != nullptr, "");
1087 Node* phi = new PhiNode(region, n->bottom_type());
1088 for (uint j = 1; j < region->req(); j++) {
1089 Node* in = region->in(j);
1090 if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1091 phi->init_req(j, n);
1092 } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1093 phi->init_req(j, n_clone);
1094 } else {
1095 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1096 }
1097 }
1098 phase->register_new_node(phi, region);
1099 return phi;
1100 }
1101
1102 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1103 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1104
1105 Unique_Node_List uses;
1106 Node_Stack stack(0);
1107 Node_List clones;
1108 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1109 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1110
1111 Node* ctrl = phase->get_ctrl(lrb);
1112 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1113
1114 CallStaticJavaNode* unc = nullptr;
1115 Node* unc_ctrl = nullptr;
1116 Node* uncasted_val = val;
1117
1118 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1119 Node* u = lrb->fast_out(i);
1120 if (u->Opcode() == Op_CastPP &&
1121 u->in(0) != nullptr &&
1122 phase->is_dominator(u->in(0), ctrl)) {
1123 const Type* u_t = phase->igvn().type(u);
1124
1125 if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1126 u->in(0)->Opcode() == Op_IfTrue &&
1127 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern() &&
1128 u->in(0)->in(0)->is_If() &&
1129 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1130 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1131 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1132 u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1133 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1134 IdealLoopTree* loop = phase->get_loop(ctrl);
1135 IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1136
1137 if (!unc_loop->is_member(loop)) {
1138 continue;
1139 }
1140
1141 Node* branch = no_branches(ctrl, u->in(0), false, phase);
1142 assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1143 if (branch == NodeSentinel) {
1144 continue;
1145 }
1146
1147 Node* iff = u->in(0)->in(0);
1148 Node* bol = iff->in(1)->clone();
1149 Node* cmp = bol->in(1)->clone();
1150 cmp->set_req(1, lrb);
1151 bol->set_req(1, cmp);
1152 phase->igvn().replace_input_of(iff, 1, bol);
1153 phase->set_ctrl(lrb, iff->in(0));
1154 phase->register_new_node(cmp, iff->in(0));
1155 phase->register_new_node(bol, iff->in(0));
1156 break;
1157 }
1158 }
1159 }
1160 // Load barrier on the control output of a call
1161 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1162 CallJavaNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1163 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1164 // The rethrow call may have too many projections to be
1165 // properly handled here. Given there's no reason for a
1166 // barrier to depend on the call, move it above the call
1167 stack.push(lrb, 0);
1168 do {
1169 Node* n = stack.node();
1170 uint idx = stack.index();
1171 if (idx < n->req()) {
1172 Node* in = n->in(idx);
1173 stack.set_index(idx+1);
1174 if (in != nullptr) {
1175 if (phase->has_ctrl(in)) {
1176 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1177 #ifdef ASSERT
1178 for (uint i = 0; i < stack.size(); i++) {
1179 assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1180 }
1181 #endif
1182 stack.push(in, 0);
1183 }
1184 } else {
1185 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1186 }
1187 }
1188 } else {
1189 phase->set_ctrl(n, call->in(0));
1190 stack.pop();
1191 }
1192 } while(stack.size() > 0);
1193 continue;
1194 }
1195 CallProjections projs;
1196 call->extract_projections(&projs, false, false);
1197
1198 // If this is a runtime call, it doesn't have an exception handling path
1199 if (projs.fallthrough_catchproj == nullptr) {
1200 assert(call->method() == nullptr, "should be runtime call");
1201 assert(projs.catchall_catchproj == nullptr, "runtime call should not have catch all projection");
1202 continue;
1203 }
1204
1205 // Otherwise, clone the barrier so there's one for the fallthrough and one for the exception handling path
1206 #ifdef ASSERT
1207 VectorSet cloned;
1208 #endif
1209 Node* lrb_clone = lrb->clone();
1210 phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1211 phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1212
1213 stack.push(lrb, 0);
1214 clones.push(lrb_clone);
1215
1216 do {
1217 assert(stack.size() == clones.size(), "");
1218 Node* n = stack.node();
1219 #ifdef ASSERT
1220 if (n->is_Load()) {
1221 Node* mem = n->in(MemNode::Memory);
1222 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1223 Node* u = mem->fast_out(j);
1224 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1225 }
1226 }
1227 #endif
1228 uint idx = stack.index();
1229 Node* n_clone = clones.at(clones.size()-1);
1230 if (idx < n->outcnt()) {
1231 Node* u = n->raw_out(idx);
1232 Node* c = phase->ctrl_or_self(u);
1233 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1234 stack.set_index(idx+1);
1235 assert(!u->is_CFG(), "");
1236 stack.push(u, 0);
1237 assert(!cloned.test_set(u->_idx), "only one clone");
1238 Node* u_clone = u->clone();
1239 int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1240 assert(nb > 0, "should have replaced some uses");
1241 phase->register_new_node(u_clone, projs.catchall_catchproj);
1242 clones.push(u_clone);
1243 phase->set_ctrl(u, projs.fallthrough_catchproj);
1244 } else {
1245 bool replaced = false;
1246 if (u->is_Phi()) {
1247 for (uint k = 1; k < u->req(); k++) {
1248 if (u->in(k) == n) {
1249 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1250 phase->igvn().replace_input_of(u, k, n_clone);
1251 replaced = true;
1252 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1253 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1254 replaced = true;
1255 }
1256 }
1257 }
1258 } else {
1259 if (phase->is_dominator(projs.catchall_catchproj, c)) {
1260 phase->igvn().rehash_node_delayed(u);
1261 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1262 assert(nb > 0, "should have replaced some uses");
1263 replaced = true;
1264 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1265 if (u->is_If()) {
1266 // Can't break If/Bool/Cmp chain
1267 assert(n->is_Bool(), "unexpected If shape");
1268 assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1269 assert(n_clone->is_Bool(), "unexpected clone");
1270 assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1271 Node* bol_clone = n->clone();
1272 Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1273 bol_clone->set_req(1, cmp_clone);
1274
1275 Node* nn = stack.node_at(stack.size()-3);
1276 Node* nn_clone = clones.at(clones.size()-3);
1277 assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1278
1279 int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1280 &phase->igvn());
1281 assert(nb > 0, "should have replaced some uses");
1282
1283 phase->register_new_node(bol_clone, u->in(0));
1284 phase->register_new_node(cmp_clone, u->in(0));
1285
1286 phase->igvn().replace_input_of(u, 1, bol_clone);
1287
1288 } else {
1289 phase->igvn().rehash_node_delayed(u);
1290 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1291 assert(nb > 0, "should have replaced some uses");
1292 }
1293 replaced = true;
1294 }
1295 }
1296 if (!replaced) {
1297 stack.set_index(idx+1);
1298 }
1299 }
1300 } else {
1301 stack.pop();
1302 clones.pop();
1303 }
1304 } while (stack.size() > 0);
1305 assert(stack.size() == 0 && clones.size() == 0, "");
1306 }
1307 }
1308
1309 for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1310 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1311 Node* ctrl = phase->get_ctrl(lrb);
1312 IdealLoopTree* loop = phase->get_loop(ctrl);
1313 Node* head = loop->head();
1314 if (head->is_OuterStripMinedLoop()) {
1315 // Expanding a barrier here will break loop strip mining
1316 // verification. Transform the loop so the loop nest doesn't
1317 // appear as strip mined.
1318 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1319 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1320 }
1321 if (head->is_BaseCountedLoop() && ctrl->is_IfProj() && ctrl->in(0)->is_BaseCountedLoopEnd() &&
1322 head->as_BaseCountedLoop()->loopexit() == ctrl->in(0)) {
1323 Node* entry = head->in(LoopNode::EntryControl);
1324 Node* backedge = head->in(LoopNode::LoopBackControl);
1325 Node* new_head = new LoopNode(entry, backedge);
1326 phase->register_control(new_head, phase->get_loop(entry), entry);
1327 phase->replace_node_and_forward_ctrl(head, new_head);
1328 }
1329 }
1330
1331 // Expand load-reference-barriers
1332 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1333 Unique_Node_List nodes_above_barriers;
1334 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1335 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1336 uint last = phase->C->unique();
1337 Node* ctrl = phase->get_ctrl(lrb);
1338 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1339
1340 Node* orig_ctrl = ctrl;
1341
1342 Node* raw_mem = fixer.find_mem(ctrl, lrb);
1343 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1344
1345 IdealLoopTree *loop = phase->get_loop(ctrl);
1346
1347 Node* heap_stable_ctrl = nullptr;
1348 Node* null_ctrl = nullptr;
1349
1350 assert(val->bottom_type()->make_oopptr(), "need oop");
1351 assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1352
1353 enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1354 Node* region = new RegionNode(PATH_LIMIT);
1355 Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1356
1357 // Stable path.
1358 int flags = ShenandoahHeap::HAS_FORWARDED;
1359 if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1360 flags |= ShenandoahHeap::WEAK_ROOTS;
1361 }
1362 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1363 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1364
1365 // Heap stable case
1366 region->init_req(_heap_stable, heap_stable_ctrl);
1367 val_phi->init_req(_heap_stable, val);
1368
1369 // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1370 // even for non-cset objects to prevent resurrection of such objects.
1371 // Wires !in_cset(obj) to slot 2 of region and phis
1372 Node* not_cset_ctrl = nullptr;
1373 if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1374 test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1375 }
1376 if (not_cset_ctrl != nullptr) {
1377 region->init_req(_not_cset, not_cset_ctrl);
1378 val_phi->init_req(_not_cset, val);
1379 } else {
1380 region->del_req(_not_cset);
1381 val_phi->del_req(_not_cset);
1382 }
1383
1384 // Resolve object when orig-value is in cset.
1385 // Make the unconditional resolve for fwdptr.
1386
1387 // Call lrb-stub and wire up that path in slots 4
1388 Node* result_mem = nullptr;
1389
1390 Node* addr;
1391 {
1392 VectorSet visited;
1393 addr = get_load_addr(phase, visited, lrb);
1394 }
1395 if (addr->Opcode() == Op_AddP) {
1396 Node* orig_base = addr->in(AddPNode::Base);
1397 Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1398 phase->register_new_node(base, ctrl);
1399 if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1400 // Field access
1401 addr = addr->clone();
1402 addr->set_req(AddPNode::Base, base);
1403 addr->set_req(AddPNode::Address, base);
1404 phase->register_new_node(addr, ctrl);
1405 } else {
1406 Node* addr2 = addr->in(AddPNode::Address);
1407 if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1408 addr2->in(AddPNode::Base) == orig_base) {
1409 addr2 = addr2->clone();
1410 addr2->set_req(AddPNode::Base, base);
1411 addr2->set_req(AddPNode::Address, base);
1412 phase->register_new_node(addr2, ctrl);
1413 addr = addr->clone();
1414 addr->set_req(AddPNode::Base, base);
1415 addr->set_req(AddPNode::Address, addr2);
1416 phase->register_new_node(addr, ctrl);
1417 }
1418 }
1419 }
1420 call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1421 region->init_req(_evac_path, ctrl);
1422 val_phi->init_req(_evac_path, val);
1423
1424 phase->register_control(region, loop, heap_stable_iff);
1425 Node* out_val = val_phi;
1426 phase->register_new_node(val_phi, region);
1427
1428 fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
1429
1430 ctrl = orig_ctrl;
1431
1432 phase->igvn().replace_node(lrb, out_val);
1433
1434 follow_barrier_uses(out_val, ctrl, uses, phase);
1435
1436 for(uint next = 0; next < uses.size(); next++ ) {
1437 Node *n = uses.at(next);
1438 assert(phase->get_ctrl(n) == ctrl, "bad control");
1439 assert(n != raw_mem, "should leave input raw mem above the barrier");
1440 phase->set_ctrl(n, region);
1441 follow_barrier_uses(n, ctrl, uses, phase);
1442 }
1443 fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1444 }
1445 // Done expanding load-reference-barriers.
1446 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1447 }
1448
1449 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1450 if (visited.test_set(in->_idx)) {
1451 return nullptr;
1452 }
1453 switch (in->Opcode()) {
1454 case Op_Proj:
1455 return get_load_addr(phase, visited, in->in(0));
1456 case Op_CastPP:
1457 case Op_CheckCastPP:
1458 case Op_DecodeN:
1459 case Op_EncodeP:
1460 return get_load_addr(phase, visited, in->in(1));
1461 case Op_LoadN:
1462 case Op_LoadP:
1463 return in->in(MemNode::Address);
1464 case Op_CompareAndExchangeN:
1465 case Op_CompareAndExchangeP:
1466 case Op_GetAndSetN:
1467 case Op_GetAndSetP:
1468 case Op_ShenandoahCompareAndExchangeP:
1469 case Op_ShenandoahCompareAndExchangeN:
1470 // Those instructions would just have stored a different
1471 // value into the field. No use to attempt to fix it at this point.
1472 return phase->igvn().zerocon(T_OBJECT);
1473 case Op_CMoveP:
1474 case Op_CMoveN: {
1475 Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1476 Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1477 // Handle unambiguous cases: single address reported on both branches.
1478 if (t != nullptr && f == nullptr) return t;
1479 if (t == nullptr && f != nullptr) return f;
1480 if (t != nullptr && t == f) return t;
1481 // Ambiguity.
1482 return phase->igvn().zerocon(T_OBJECT);
1483 }
1484 case Op_Phi: {
1485 Node* addr = nullptr;
1486 for (uint i = 1; i < in->req(); i++) {
1487 Node* addr1 = get_load_addr(phase, visited, in->in(i));
1488 if (addr == nullptr) {
1489 addr = addr1;
1490 }
1491 if (addr != addr1) {
1492 return phase->igvn().zerocon(T_OBJECT);
1493 }
1494 }
1495 return addr;
1496 }
1497 case Op_ShenandoahLoadReferenceBarrier:
1498 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1499 case Op_CallDynamicJava:
1500 case Op_CallLeaf:
1501 case Op_CallStaticJava:
1502 case Op_ConN:
1503 case Op_ConP:
1504 case Op_Parm:
1505 case Op_CreateEx:
1506 return phase->igvn().zerocon(T_OBJECT);
1507 default:
1508 #ifdef ASSERT
1509 fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1510 #endif
1511 return phase->igvn().zerocon(T_OBJECT);
1512 }
1513
1514 }
1515
1516 #ifdef ASSERT
1517 static bool has_never_branch(Node* root) {
1518 for (uint i = 1; i < root->req(); i++) {
1519 Node* in = root->in(i);
1520 if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
1521 return true;
1522 }
1523 }
1524 return false;
1525 }
1526 #endif
1527
1528 void MemoryGraphFixer::collect_memory_nodes() {
1529 Node_Stack stack(0);
1530 VectorSet visited;
1531 Node_List regions;
1532
1533 // Walk the raw memory graph and create a mapping from CFG node to
1534 // memory node. Exclude phis for now.
1535 stack.push(_phase->C->root(), 1);
1536 do {
1537 Node* n = stack.node();
1538 int opc = n->Opcode();
1539 uint i = stack.index();
1540 if (i < n->req()) {
1541 Node* mem = nullptr;
1542 if (opc == Op_Root) {
1543 Node* in = n->in(i);
1544 int in_opc = in->Opcode();
1545 if (in_opc == Op_Return || in_opc == Op_Rethrow) {
1546 mem = in->in(TypeFunc::Memory);
1547 } else if (in_opc == Op_Halt) {
1548 if (in->in(0)->is_Region()) {
1549 Node* r = in->in(0);
1550 for (uint j = 1; j < r->req(); j++) {
1551 assert(!r->in(j)->is_NeverBranch(), "");
1552 }
1553 } else {
1554 Node* proj = in->in(0);
1555 assert(proj->is_Proj(), "");
1556 Node* in = proj->in(0);
1557 assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
1558 if (in->is_CallStaticJava()) {
1559 mem = in->in(TypeFunc::Memory);
1560 } else if (in->Opcode() == Op_Catch) {
1561 Node* call = in->in(0)->in(0);
1562 assert(call->is_Call(), "");
1563 mem = call->in(TypeFunc::Memory);
1564 } else if (in->is_NeverBranch()) {
1565 mem = collect_memory_for_infinite_loop(in);
1566 }
1567 }
1568 } else {
1569 #ifdef ASSERT
1570 n->dump();
1571 in->dump();
1572 #endif
1573 ShouldNotReachHere();
1574 }
1575 } else {
1576 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
1577 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
1578 mem = n->in(i);
1579 }
1580 i++;
1581 stack.set_index(i);
1582 if (mem == nullptr) {
1583 continue;
1584 }
1585 for (;;) {
1586 if (visited.test_set(mem->_idx) || mem->is_Start()) {
1587 break;
1588 }
1589 if (mem->is_Phi()) {
1590 stack.push(mem, 2);
1591 mem = mem->in(1);
1592 } else if (mem->is_Proj()) {
1593 stack.push(mem, mem->req());
1594 mem = mem->in(0);
1595 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
1596 mem = mem->in(TypeFunc::Memory);
1597 } else if (mem->is_MergeMem()) {
1598 MergeMemNode* mm = mem->as_MergeMem();
1599 mem = mm->memory_at(_alias);
1600 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1601 assert(_alias == Compile::AliasIdxRaw, "");
1602 stack.push(mem, mem->req());
1603 mem = mem->in(MemNode::Memory);
1604 } else {
1605 #ifdef ASSERT
1606 mem->dump();
1607 #endif
1608 ShouldNotReachHere();
1609 }
1610 }
1611 } else {
1612 if (n->is_Phi()) {
1613 // Nothing
1614 } else if (!n->is_Root()) {
1615 Node* c = get_ctrl(n);
1616 _memory_nodes.map(c->_idx, n);
1617 }
1618 stack.pop();
1619 }
1620 } while(stack.is_nonempty());
1621
1622 // Iterate over CFG nodes in rpo and propagate memory state to
1623 // compute memory state at regions, creating new phis if needed.
1624 Node_List rpo_list;
1625 visited.clear();
1626 _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
1627 Node* root = rpo_list.pop();
1628 assert(root == _phase->C->root(), "");
1629
1630 const bool trace = false;
1631 #ifdef ASSERT
1632 if (trace) {
1633 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1634 Node* c = rpo_list.at(i);
1635 if (_memory_nodes[c->_idx] != nullptr) {
1636 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
1637 }
1638 }
1639 }
1640 #endif
1641 uint last = _phase->C->unique();
1642
1643 #ifdef ASSERT
1644 uint16_t max_depth = 0;
1645 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
1646 IdealLoopTree* lpt = iter.current();
1647 max_depth = MAX2(max_depth, lpt->_nest);
1648 }
1649 #endif
1650
1651 bool progress = true;
1652 int iteration = 0;
1653 Node_List dead_phis;
1654 while (progress) {
1655 progress = false;
1656 iteration++;
1657 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1658 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
1659
1660 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1661 Node* c = rpo_list.at(i);
1662
1663 Node* prev_mem = _memory_nodes[c->_idx];
1664 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1665 Node* prev_region = regions[c->_idx];
1666 Node* unique = nullptr;
1667 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
1668 Node* m = _memory_nodes[c->in(j)->_idx];
1669 assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
1670 if (m != nullptr) {
1671 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
1672 assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1673 // continue
1674 } else if (unique == nullptr) {
1675 unique = m;
1676 } else if (m == unique) {
1677 // continue
1678 } else {
1679 unique = NodeSentinel;
1680 }
1681 }
1682 }
1683 assert(unique != nullptr, "empty phi???");
1684 if (unique != NodeSentinel) {
1685 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
1686 dead_phis.push(prev_region);
1687 }
1688 regions.map(c->_idx, unique);
1689 } else {
1690 Node* phi = nullptr;
1691 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
1692 phi = prev_region;
1693 for (uint k = 1; k < c->req(); k++) {
1694 Node* m = _memory_nodes[c->in(k)->_idx];
1695 assert(m != nullptr, "expect memory state");
1696 phi->set_req(k, m);
1697 }
1698 } else {
1699 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
1700 Node* u = c->fast_out(j);
1701 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1702 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
1703 phi = u;
1704 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
1705 Node* m = _memory_nodes[c->in(k)->_idx];
1706 assert(m != nullptr, "expect memory state");
1707 if (u->in(k) != m) {
1708 phi = NodeSentinel;
1709 }
1710 }
1711 }
1712 }
1713 if (phi == NodeSentinel) {
1714 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
1715 for (uint k = 1; k < c->req(); k++) {
1716 Node* m = _memory_nodes[c->in(k)->_idx];
1717 assert(m != nullptr, "expect memory state");
1718 phi->init_req(k, m);
1719 }
1720 }
1721 }
1722 if (phi != nullptr) {
1723 regions.map(c->_idx, phi);
1724 } else {
1725 assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1726 }
1727 }
1728 Node* current_region = regions[c->_idx];
1729 if (current_region != prev_region) {
1730 progress = true;
1731 if (prev_region == prev_mem) {
1732 _memory_nodes.map(c->_idx, current_region);
1733 }
1734 }
1735 } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
1736 Node* m = _memory_nodes[_phase->idom(c)->_idx];
1737 assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
1738 if (m != prev_mem) {
1739 _memory_nodes.map(c->_idx, m);
1740 progress = true;
1741 }
1742 }
1743 #ifdef ASSERT
1744 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); }
1745 #endif
1746 }
1747 }
1748
1749 // Replace existing phi with computed memory state for that region
1750 // if different (could be a new phi or a dominating memory node if
1751 // that phi was found to be useless).
1752 while (dead_phis.size() > 0) {
1753 Node* n = dead_phis.pop();
1754 n->replace_by(_phase->C->top());
1755 n->destruct(&_phase->igvn());
1756 }
1757 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1758 Node* c = rpo_list.at(i);
1759 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1760 Node* n = regions[c->_idx];
1761 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1762 if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
1763 _phase->register_new_node(n, c);
1764 }
1765 }
1766 }
1767 for (int i = rpo_list.size() - 1; i >= 0; i--) {
1768 Node* c = rpo_list.at(i);
1769 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1770 Node* n = regions[c->_idx];
1771 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1772 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
1773 Node* u = c->fast_out(i);
1774 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1775 u != n) {
1776 assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
1777 if (u->adr_type() == TypePtr::BOTTOM) {
1778 fix_memory_uses(u, n, n, c);
1779 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1780 _phase->igvn().replace_node(u, n);
1781 --i; --imax;
1782 }
1783 }
1784 }
1785 }
1786 }
1787 }
1788
1789 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
1790 Node* mem = nullptr;
1791 Node* head = in->in(0);
1792 assert(head->is_Region(), "unexpected infinite loop graph shape");
1793
1794 Node* phi_mem = nullptr;
1795 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
1796 Node* u = head->fast_out(j);
1797 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
1798 if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1799 assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
1800 phi_mem = u;
1801 } else if (u->adr_type() == TypePtr::BOTTOM) {
1802 assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
1803 if (phi_mem == nullptr) {
1804 phi_mem = u;
1805 }
1806 }
1807 }
1808 }
1809 if (phi_mem == nullptr) {
1810 ResourceMark rm;
1811 Node_Stack stack(0);
1812 stack.push(head, 1);
1813 do {
1814 Node* n = stack.node();
1815 uint i = stack.index();
1816 if (i >= n->req()) {
1817 stack.pop();
1818 } else {
1819 stack.set_index(i + 1);
1820 Node* c = n->in(i);
1821 assert(c != head, "should have found a safepoint on the way");
1822 if (stack.size() != 1 || _phase->is_dominator(head, c)) {
1823 for (;;) {
1824 if (c->is_Region()) {
1825 stack.push(c, 1);
1826 break;
1827 } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
1828 Node* m = c->in(TypeFunc::Memory);
1829 if (m->is_MergeMem()) {
1830 m = m->as_MergeMem()->memory_at(_alias);
1831 }
1832 assert(mem == nullptr || mem == m, "several memory states");
1833 mem = m;
1834 break;
1835 } else {
1836 assert(c != c->in(0), "");
1837 c = c->in(0);
1838 }
1839 }
1840 }
1841 }
1842 } while (stack.size() > 0);
1843 assert(mem != nullptr, "should have found safepoint");
1844 } else {
1845 mem = phi_mem;
1846 }
1847 return mem;
1848 }
1849
1850 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
1851 Node* c = _phase->get_ctrl(n);
1852 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
1853 assert(c == n->in(0), "");
1854 CallNode* call = c->as_Call();
1855 CallProjections projs;
1856 call->extract_projections(&projs, true, false);
1857 if (projs.catchall_memproj != nullptr) {
1858 if (projs.fallthrough_memproj == n) {
1859 c = projs.fallthrough_catchproj;
1860 } else {
1861 assert(projs.catchall_memproj == n, "");
1862 c = projs.catchall_catchproj;
1863 }
1864 }
1865 }
1866 return c;
1867 }
1868
1869 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
1870 if (_phase->has_ctrl(n))
1871 return get_ctrl(n);
1872 else {
1873 assert (n->is_CFG(), "must be a CFG node");
1874 return n;
1875 }
1876 }
1877
1878 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
1879 return m != nullptr && get_ctrl(m) == c;
1880 }
1881
1882 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
1883 assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
1884 assert(!ctrl->is_Call() || ctrl == n, "projection expected");
1885 #ifdef ASSERT
1886 if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
1887 (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
1888 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
1889 int mems = 0;
1890 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
1891 Node* u = call->fast_out(i);
1892 if (u->bottom_type() == Type::MEMORY) {
1893 mems++;
1894 }
1895 }
1896 assert(mems <= 1, "No node right after call if multiple mem projections");
1897 }
1898 #endif
1899 Node* mem = _memory_nodes[ctrl->_idx];
1900 Node* c = ctrl;
1901 while (!mem_is_valid(mem, c) &&
1902 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
1903 c = _phase->idom(c);
1904 mem = _memory_nodes[c->_idx];
1905 }
1906 if (n != nullptr && mem_is_valid(mem, c)) {
1907 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
1908 mem = next_mem(mem, _alias);
1909 }
1910 if (mem->is_MergeMem()) {
1911 mem = mem->as_MergeMem()->memory_at(_alias);
1912 }
1913 if (!mem_is_valid(mem, c)) {
1914 do {
1915 c = _phase->idom(c);
1916 mem = _memory_nodes[c->_idx];
1917 } while (!mem_is_valid(mem, c) &&
1918 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
1919 }
1920 }
1921 assert(mem->bottom_type() == Type::MEMORY, "");
1922 return mem;
1923 }
1924
1925 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
1926 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1927 Node* use = region->fast_out(i);
1928 if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
1929 (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
1930 return true;
1931 }
1932 }
1933 return false;
1934 }
1935
1936 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
1937 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
1938 const bool trace = false;
1939 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
1940 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
1941 GrowableArray<Node*> phis;
1942 if (mem_for_ctrl != mem) {
1943 Node* old = mem_for_ctrl;
1944 Node* prev = nullptr;
1945 while (old != mem) {
1946 prev = old;
1947 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
1948 assert(_alias == Compile::AliasIdxRaw, "");
1949 old = old->in(MemNode::Memory);
1950 } else if (old->Opcode() == Op_SCMemProj) {
1951 assert(_alias == Compile::AliasIdxRaw, "");
1952 old = old->in(0);
1953 } else {
1954 ShouldNotReachHere();
1955 }
1956 }
1957 assert(prev != nullptr, "");
1958 if (new_ctrl != ctrl) {
1959 _memory_nodes.map(ctrl->_idx, mem);
1960 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
1961 }
1962 uint input = (uint)MemNode::Memory;
1963 _phase->igvn().replace_input_of(prev, input, new_mem);
1964 } else {
1965 uses.clear();
1966 _memory_nodes.map(new_ctrl->_idx, new_mem);
1967 uses.push(new_ctrl);
1968 for(uint next = 0; next < uses.size(); next++ ) {
1969 Node *n = uses.at(next);
1970 assert(n->is_CFG(), "");
1971 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
1972 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1973 Node* u = n->fast_out(i);
1974 if (!u->is_Root() && u->is_CFG() && u != n) {
1975 Node* m = _memory_nodes[u->_idx];
1976 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
1977 !has_mem_phi(u) &&
1978 u->unique_ctrl_out()->Opcode() != Op_Halt) {
1979 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
1980 DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
1981
1982 if (!mem_is_valid(m, u) || !m->is_Phi()) {
1983 bool push = true;
1984 bool create_phi = true;
1985 if (_phase->is_dominator(new_ctrl, u)) {
1986 create_phi = false;
1987 }
1988 if (create_phi) {
1989 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
1990 _phase->register_new_node(phi, u);
1991 phis.push(phi);
1992 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
1993 if (!mem_is_valid(m, u)) {
1994 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
1995 _memory_nodes.map(u->_idx, phi);
1996 } else {
1997 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
1998 for (;;) {
1999 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2000 Node* next = nullptr;
2001 if (m->is_Proj()) {
2002 next = m->in(0);
2003 } else {
2004 assert(m->is_Mem() || m->is_LoadStore(), "");
2005 assert(_alias == Compile::AliasIdxRaw, "");
2006 next = m->in(MemNode::Memory);
2007 }
2008 if (_phase->get_ctrl(next) != u) {
2009 break;
2010 }
2011 if (next->is_MergeMem()) {
2012 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2013 break;
2014 }
2015 if (next->is_Phi()) {
2016 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2017 break;
2018 }
2019 m = next;
2020 }
2021
2022 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2023 assert(m->is_Mem() || m->is_LoadStore(), "");
2024 uint input = (uint)MemNode::Memory;
2025 _phase->igvn().replace_input_of(m, input, phi);
2026 push = false;
2027 }
2028 } else {
2029 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2030 }
2031 if (push) {
2032 uses.push(u);
2033 }
2034 }
2035 } else if (!mem_is_valid(m, u) &&
2036 !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2037 uses.push(u);
2038 }
2039 }
2040 }
2041 }
2042 for (int i = 0; i < phis.length(); i++) {
2043 Node* n = phis.at(i);
2044 Node* r = n->in(0);
2045 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2046 for (uint j = 1; j < n->req(); j++) {
2047 Node* m = find_mem(r->in(j), nullptr);
2048 _phase->igvn().replace_input_of(n, j, m);
2049 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2050 }
2051 }
2052 }
2053 uint last = _phase->C->unique();
2054 MergeMemNode* mm = nullptr;
2055 int alias = _alias;
2056 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2057 // Process loads first to not miss an anti-dependency: if the memory
2058 // edge of a store is updated before a load is processed then an
2059 // anti-dependency may be missed.
2060 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2061 Node* u = mem->out(i);
2062 if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2063 Node* m = find_mem(_phase->get_ctrl(u), u);
2064 if (m != mem) {
2065 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2066 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2067 --i;
2068 }
2069 }
2070 }
2071 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2072 Node* u = mem->out(i);
2073 if (u->_idx < last) {
2074 if (u->is_Mem()) {
2075 if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2076 Node* m = find_mem(_phase->get_ctrl(u), u);
2077 if (m != mem) {
2078 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2079 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2080 --i;
2081 }
2082 }
2083 } else if (u->is_MergeMem()) {
2084 MergeMemNode* u_mm = u->as_MergeMem();
2085 if (u_mm->memory_at(alias) == mem) {
2086 MergeMemNode* newmm = nullptr;
2087 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2088 Node* uu = u->fast_out(j);
2089 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2090 if (uu->is_Phi()) {
2091 assert(uu->adr_type() == TypePtr::BOTTOM, "");
2092 Node* region = uu->in(0);
2093 int nb = 0;
2094 for (uint k = 1; k < uu->req(); k++) {
2095 if (uu->in(k) == u) {
2096 Node* m = find_mem(region->in(k), nullptr);
2097 if (m != mem) {
2098 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2099 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2100 if (newmm != u) {
2101 _phase->igvn().replace_input_of(uu, k, newmm);
2102 nb++;
2103 --jmax;
2104 }
2105 }
2106 }
2107 }
2108 if (nb > 0) {
2109 --j;
2110 }
2111 } else {
2112 Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2113 if (m != mem) {
2114 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2115 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2116 if (newmm != u) {
2117 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2118 --j, --jmax;
2119 }
2120 }
2121 }
2122 }
2123 }
2124 } else if (u->is_Phi()) {
2125 assert(u->bottom_type() == Type::MEMORY, "what else?");
2126 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2127 Node* region = u->in(0);
2128 bool replaced = false;
2129 for (uint j = 1; j < u->req(); j++) {
2130 if (u->in(j) == mem) {
2131 Node* m = find_mem(region->in(j), nullptr);
2132 Node* nnew = m;
2133 if (m != mem) {
2134 if (u->adr_type() == TypePtr::BOTTOM) {
2135 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2136 nnew = mm;
2137 }
2138 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2139 _phase->igvn().replace_input_of(u, j, nnew);
2140 replaced = true;
2141 }
2142 }
2143 }
2144 if (replaced) {
2145 --i;
2146 }
2147 }
2148 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2149 u->adr_type() == nullptr) {
2150 assert(u->adr_type() != nullptr ||
2151 u->Opcode() == Op_Rethrow ||
2152 u->Opcode() == Op_Return ||
2153 u->Opcode() == Op_SafePoint ||
2154 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2155 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2156 u->Opcode() == Op_CallLeaf, "");
2157 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2158 if (m != mem) {
2159 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2160 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2161 --i;
2162 }
2163 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2164 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2165 if (m != mem) {
2166 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2167 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2168 --i;
2169 }
2170 } else if (u->adr_type() != TypePtr::BOTTOM &&
2171 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2172 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2173 assert(m != mem, "");
2174 // u is on the wrong slice...
2175 assert(u->is_ClearArray(), "");
2176 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2177 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2178 --i;
2179 }
2180 }
2181 }
2182 #ifdef ASSERT
2183 assert(new_mem->outcnt() > 0, "");
2184 for (int i = 0; i < phis.length(); i++) {
2185 Node* n = phis.at(i);
2186 assert(n->outcnt() > 0, "new phi must have uses now");
2187 }
2188 #endif
2189 }
2190
2191 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2192 if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2193 _memory_nodes.map(ctrl->_idx, mem);
2194 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2195 }
2196 }
2197
2198 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2199 MergeMemNode* mm = MergeMemNode::make(mem);
2200 mm->set_memory_at(_alias, rep_proj);
2201 _phase->register_new_node(mm, rep_ctrl);
2202 return mm;
2203 }
2204
2205 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2206 MergeMemNode* newmm = nullptr;
2207 MergeMemNode* u_mm = u->as_MergeMem();
2208 Node* c = _phase->get_ctrl(u);
2209 if (_phase->is_dominator(c, rep_ctrl)) {
2210 c = rep_ctrl;
2211 } else {
2212 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2213 }
2214 if (u->outcnt() == 1) {
2215 if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2216 _phase->igvn().replace_input_of(u, _alias, rep_proj);
2217 --i;
2218 } else {
2219 _phase->igvn().rehash_node_delayed(u);
2220 u_mm->set_memory_at(_alias, rep_proj);
2221 }
2222 newmm = u_mm;
2223 _phase->set_ctrl_and_loop(u, c);
2224 } else {
2225 // can't simply clone u and then change one of its input because
2226 // it adds and then removes an edge which messes with the
2227 // DUIterator
2228 newmm = MergeMemNode::make(u_mm->base_memory());
2229 for (uint j = 0; j < u->req(); j++) {
2230 if (j < newmm->req()) {
2231 if (j == (uint)_alias) {
2232 newmm->set_req(j, rep_proj);
2233 } else if (newmm->in(j) != u->in(j)) {
2234 newmm->set_req(j, u->in(j));
2235 }
2236 } else if (j == (uint)_alias) {
2237 newmm->add_req(rep_proj);
2238 } else {
2239 newmm->add_req(u->in(j));
2240 }
2241 }
2242 if ((uint)_alias >= u->req()) {
2243 newmm->set_memory_at(_alias, rep_proj);
2244 }
2245 _phase->register_new_node(newmm, c);
2246 }
2247 return newmm;
2248 }
2249
2250 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2251 if (phi->adr_type() == TypePtr::BOTTOM) {
2252 Node* region = phi->in(0);
2253 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2254 Node* uu = region->fast_out(j);
2255 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2256 return false;
2257 }
2258 }
2259 return true;
2260 }
2261 return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2262 }
2263
2264 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2265 uint last = _phase-> C->unique();
2266 MergeMemNode* mm = nullptr;
2267 assert(mem->bottom_type() == Type::MEMORY, "");
2268 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2269 Node* u = mem->out(i);
2270 if (u != replacement && u->_idx < last) {
2271 if (u->is_MergeMem()) {
2272 MergeMemNode* u_mm = u->as_MergeMem();
2273 if (u_mm->memory_at(_alias) == mem) {
2274 MergeMemNode* newmm = nullptr;
2275 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2276 Node* uu = u->fast_out(j);
2277 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2278 if (uu->is_Phi()) {
2279 if (should_process_phi(uu)) {
2280 Node* region = uu->in(0);
2281 int nb = 0;
2282 for (uint k = 1; k < uu->req(); k++) {
2283 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2284 if (newmm == nullptr) {
2285 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2286 }
2287 if (newmm != u) {
2288 _phase->igvn().replace_input_of(uu, k, newmm);
2289 nb++;
2290 --jmax;
2291 }
2292 }
2293 }
2294 if (nb > 0) {
2295 --j;
2296 }
2297 }
2298 } else {
2299 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2300 if (newmm == nullptr) {
2301 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2302 }
2303 if (newmm != u) {
2304 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2305 --j, --jmax;
2306 }
2307 }
2308 }
2309 }
2310 }
2311 } else if (u->is_Phi()) {
2312 assert(u->bottom_type() == Type::MEMORY, "what else?");
2313 Node* region = u->in(0);
2314 if (should_process_phi(u)) {
2315 bool replaced = false;
2316 for (uint j = 1; j < u->req(); j++) {
2317 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2318 Node* nnew = rep_proj;
2319 if (u->adr_type() == TypePtr::BOTTOM) {
2320 if (mm == nullptr) {
2321 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2322 }
2323 nnew = mm;
2324 }
2325 _phase->igvn().replace_input_of(u, j, nnew);
2326 replaced = true;
2327 }
2328 }
2329 if (replaced) {
2330 --i;
2331 }
2332
2333 }
2334 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2335 u->adr_type() == nullptr) {
2336 assert(u->adr_type() != nullptr ||
2337 u->Opcode() == Op_Rethrow ||
2338 u->Opcode() == Op_Return ||
2339 u->Opcode() == Op_SafePoint ||
2340 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2341 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2342 u->Opcode() == Op_CallLeaf, "%s", u->Name());
2343 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2344 if (mm == nullptr) {
2345 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2346 }
2347 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2348 --i;
2349 }
2350 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2351 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2352 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2353 --i;
2354 }
2355 }
2356 }
2357 }
2358 }
2359
2360 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2361 : Node(ctrl, obj), _decorators(decorators) {
2362 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2363 }
2364
2365 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2366 return _decorators;
2367 }
2368
2369 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2370 return sizeof(*this);
2371 }
2372
2373 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2374 return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2375 }
2376
2377 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2378 uint hash = Node::hash();
2379 hash += mask_decorators(_decorators);
2380 return hash;
2381 }
2382
2383 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2384 return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2385 mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2386 }
2387
2388 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2389 if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2390 return Type::TOP;
2391 }
2392 const Type* t = in(ValueIn)->bottom_type();
2393 if (t == TypePtr::NULL_PTR) {
2394 return t;
2395 }
2396
2397 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2398 return t;
2399 }
2400
2401 return t->meet(TypePtr::NULL_PTR);
2402 }
2403
2404 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2405 // Either input is TOP ==> the result is TOP
2406 const Type *t2 = phase->type(in(ValueIn));
2407 if( t2 == Type::TOP ) return Type::TOP;
2408
2409 if (t2 == TypePtr::NULL_PTR) {
2410 return t2;
2411 }
2412
2413 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2414 return t2;
2415 }
2416
2417 return t2->meet(TypePtr::NULL_PTR);
2418 }
2419
2420 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2421 Node* value = in(ValueIn);
2422 if (!needs_barrier(phase, value)) {
2423 return value;
2424 }
2425 return this;
2426 }
2427
2428 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2429 Unique_Node_List visited;
2430 return needs_barrier_impl(phase, n, visited);
2431 }
2432
2433 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2434 if (n == nullptr) return false;
2435 if (visited.member(n)) {
2436 return false; // Been there.
2437 }
2438 visited.push(n);
2439
2440 if (n->is_Allocate()) {
2441 // tty->print_cr("optimize barrier on alloc");
2442 return false;
2443 }
2444 if (n->is_Call()) {
2445 // tty->print_cr("optimize barrier on call");
2446 return false;
2447 }
2448
2449 const Type* type = phase->type(n);
2450 if (type == Type::TOP) {
2451 return false;
2452 }
2453 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2454 // tty->print_cr("optimize barrier on null");
2455 return false;
2456 }
2457 if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2458 // tty->print_cr("optimize barrier on constant");
2459 return false;
2460 }
2461
2462 switch (n->Opcode()) {
2463 case Op_AddP:
2464 return true; // TODO: Can refine?
2465 case Op_LoadP:
2466 case Op_ShenandoahCompareAndExchangeN:
2467 case Op_ShenandoahCompareAndExchangeP:
2468 case Op_CompareAndExchangeN:
2469 case Op_CompareAndExchangeP:
2470 case Op_GetAndSetN:
2471 case Op_GetAndSetP:
2472 return true;
2473 case Op_Phi: {
2474 for (uint i = 1; i < n->req(); i++) {
2475 if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2476 }
2477 return false;
2478 }
2479 case Op_CheckCastPP:
2480 case Op_CastPP:
2481 return needs_barrier_impl(phase, n->in(1), visited);
2482 case Op_Proj:
2483 return needs_barrier_impl(phase, n->in(0), visited);
2484 case Op_ShenandoahLoadReferenceBarrier:
2485 // tty->print_cr("optimize barrier on barrier");
2486 return false;
2487 case Op_Parm:
2488 // tty->print_cr("optimize barrier on input arg");
2489 return false;
2490 case Op_DecodeN:
2491 case Op_EncodeP:
2492 return needs_barrier_impl(phase, n->in(1), visited);
2493 case Op_LoadN:
2494 return true;
2495 case Op_CMoveN:
2496 case Op_CMoveP:
2497 return needs_barrier_impl(phase, n->in(2), visited) ||
2498 needs_barrier_impl(phase, n->in(3), visited);
2499 case Op_CreateEx:
2500 return false;
2501 default:
2502 break;
2503 }
2504 #ifdef ASSERT
2505 tty->print("need barrier on?: ");
2506 tty->print_cr("ins:");
2507 n->dump(2);
2508 tty->print_cr("outs:");
2509 n->dump(-2);
2510 ShouldNotReachHere();
2511 #endif
2512 return true;
2513 }