1 /*
2 * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "classfile/javaClasses.hpp"
29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
32 #include "gc/shenandoah/shenandoahForwarding.hpp"
33 #include "gc/shenandoah/shenandoahHeap.hpp"
34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
35 #include "gc/shenandoah/shenandoahRuntime.hpp"
36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/block.hpp"
39 #include "opto/callnode.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/movenode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/rootnode.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46
47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
48 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
49 if ((state->iu_barriers_count() +
50 state->load_reference_barriers_count()) > 0) {
51 assert(C->post_loop_opts_phase(), "no loop opts allowed");
52 C->reset_post_loop_opts_phase(); // ... but we know what we are doing
53 C->clear_major_progress();
54 PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
55 if (C->failing()) return false;
56
57 C->set_major_progress();
58 if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
59 return false;
60 }
61 C->clear_major_progress();
62 C->process_for_post_loop_opts_igvn(igvn);
63 if (C->failing()) return false;
64
65 C->set_post_loop_opts_phase(); // now for real!
66 }
67 return true;
68 }
69
70 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
71 if (!UseShenandoahGC) {
72 return false;
73 }
74 assert(iff->is_If(), "bad input");
75 if (iff->Opcode() != Op_If) {
76 return false;
77 }
78 Node* bol = iff->in(1);
79 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
80 return false;
81 }
82 Node* cmp = bol->in(1);
83 if (cmp->Opcode() != Op_CmpI) {
84 return false;
85 }
86 Node* in1 = cmp->in(1);
87 Node* in2 = cmp->in(2);
88 if (in2->find_int_con(-1) != 0) {
89 return false;
90 }
91 if (in1->Opcode() != Op_AndI) {
92 return false;
93 }
94 in2 = in1->in(2);
95 if (in2->find_int_con(-1) != mask) {
96 return false;
97 }
98 in1 = in1->in(1);
99
100 return is_gc_state_load(in1);
101 }
102
103 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
104 return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
105 }
106
107 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
108 if (!UseShenandoahGC) {
109 return false;
110 }
111 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
112 return false;
113 }
114 Node* addp = n->in(MemNode::Address);
115 if (!addp->is_AddP()) {
116 return false;
117 }
118 Node* base = addp->in(AddPNode::Address);
119 Node* off = addp->in(AddPNode::Offset);
120 if (base->Opcode() != Op_ThreadLocal) {
121 return false;
122 }
123 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
124 return false;
125 }
126 return true;
127 }
128
129 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
130 assert(phase->is_dominator(stop, start), "bad inputs");
131 ResourceMark rm;
132 Unique_Node_List wq;
133 wq.push(start);
134 for (uint next = 0; next < wq.size(); next++) {
135 Node *m = wq.at(next);
136 if (m == stop) {
137 continue;
138 }
139 if (m->is_SafePoint() && !m->is_CallLeaf()) {
140 return true;
141 }
142 if (m->is_Region()) {
143 for (uint i = 1; i < m->req(); i++) {
144 wq.push(m->in(i));
145 }
146 } else {
147 wq.push(m->in(0));
148 }
149 }
150 return false;
151 }
152
153 #ifdef ASSERT
154 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
155 assert(phis.size() == 0, "");
156
157 while (true) {
158 if (in->bottom_type() == TypePtr::NULL_PTR) {
159 if (trace) {tty->print_cr("null");}
160 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
161 if (trace) {tty->print_cr("Non oop");}
162 } else {
163 if (in->is_ConstraintCast()) {
164 in = in->in(1);
165 continue;
166 } else if (in->is_AddP()) {
167 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
168 in = in->in(AddPNode::Address);
169 continue;
170 } else if (in->is_Con()) {
171 if (trace) {
172 tty->print("Found constant");
173 in->dump();
174 }
175 } else if (in->Opcode() == Op_Parm) {
176 if (trace) {
177 tty->print("Found argument");
178 }
179 } else if (in->Opcode() == Op_CreateEx) {
180 if (trace) {
181 tty->print("Found create-exception");
182 }
183 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
184 if (trace) {
185 tty->print("Found raw LoadP (OSR argument?)");
186 }
187 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
188 if (t == ShenandoahOopStore) {
189 uint i = 0;
190 for (; i < phis.size(); i++) {
191 Node* n = phis.node_at(i);
192 if (n->Opcode() == Op_ShenandoahIUBarrier) {
193 break;
194 }
195 }
196 if (i == phis.size()) {
197 return false;
198 }
199 }
200 barriers_used.push(in);
201 if (trace) {tty->print("Found barrier"); in->dump();}
202 } else if (in->Opcode() == Op_ShenandoahIUBarrier) {
203 if (t != ShenandoahOopStore) {
204 in = in->in(1);
205 continue;
206 }
207 if (trace) {tty->print("Found enqueue barrier"); in->dump();}
208 phis.push(in, in->req());
209 in = in->in(1);
210 continue;
211 } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
212 if (trace) {
213 tty->print("Found alloc");
214 in->in(0)->dump();
215 }
216 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
217 if (trace) {
218 tty->print("Found Java call");
219 }
220 } else if (in->is_Phi()) {
221 if (!visited.test_set(in->_idx)) {
222 if (trace) {tty->print("Pushed phi:"); in->dump();}
223 phis.push(in, 2);
224 in = in->in(1);
225 continue;
226 }
227 if (trace) {tty->print("Already seen phi:"); in->dump();}
228 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
229 if (!visited.test_set(in->_idx)) {
230 if (trace) {tty->print("Pushed cmovep:"); in->dump();}
231 phis.push(in, CMoveNode::IfTrue);
232 in = in->in(CMoveNode::IfFalse);
233 continue;
234 }
235 if (trace) {tty->print("Already seen cmovep:"); in->dump();}
236 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
237 in = in->in(1);
238 continue;
239 } else {
240 return false;
241 }
242 }
243 bool cont = false;
244 while (phis.is_nonempty()) {
245 uint idx = phis.index();
246 Node* phi = phis.node();
247 if (idx >= phi->req()) {
248 if (trace) {tty->print("Popped phi:"); phi->dump();}
249 phis.pop();
250 continue;
251 }
252 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
253 in = phi->in(idx);
254 phis.set_index(idx+1);
255 cont = true;
256 break;
257 }
258 if (!cont) {
259 break;
260 }
261 }
262 return true;
263 }
264
265 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
266 if (n1 != nullptr) {
267 n1->dump(+10);
268 }
269 if (n2 != nullptr) {
270 n2->dump(+10);
271 }
272 fatal("%s", msg);
273 }
274
275 void ShenandoahBarrierC2Support::verify(RootNode* root) {
276 ResourceMark rm;
277 Unique_Node_List wq;
278 GrowableArray<Node*> barriers;
279 Unique_Node_List barriers_used;
280 Node_Stack phis(0);
281 VectorSet visited;
282 const bool trace = false;
283 const bool verify_no_useless_barrier = false;
284
285 wq.push(root);
286 for (uint next = 0; next < wq.size(); next++) {
287 Node *n = wq.at(next);
288 if (n->is_Load()) {
289 const bool trace = false;
290 if (trace) {tty->print("Verifying"); n->dump();}
291 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
292 if (trace) {tty->print_cr("Load range/klass");}
293 } else {
294 const TypePtr* adr_type = n->as_Load()->adr_type();
295
296 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
297 if (trace) {tty->print_cr("Mark load");}
298 } else if (adr_type->isa_instptr() &&
299 adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
300 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
301 if (trace) {tty->print_cr("Reference.get()");}
302 } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
303 report_verify_failure("Shenandoah verification: Load should have barriers", n);
304 }
305 }
306 } else if (n->is_Store()) {
307 const bool trace = false;
308
309 if (trace) {tty->print("Verifying"); n->dump();}
310 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
311 Node* adr = n->in(MemNode::Address);
312 bool verify = true;
313
314 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
315 adr = adr->in(AddPNode::Address);
316 if (adr->is_AddP()) {
317 assert(adr->in(AddPNode::Base)->is_top(), "");
318 adr = adr->in(AddPNode::Address);
319 if (adr->Opcode() == Op_LoadP &&
320 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
321 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
322 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
323 if (trace) {tty->print_cr("SATB prebarrier");}
324 verify = false;
325 }
326 }
327 }
328
329 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
330 report_verify_failure("Shenandoah verification: Store should have barriers", n);
331 }
332 }
333 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
334 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
335 }
336 } else if (n->Opcode() == Op_CmpP) {
337 const bool trace = false;
338
339 Node* in1 = n->in(1);
340 Node* in2 = n->in(2);
341 if (in1->bottom_type()->isa_oopptr()) {
342 if (trace) {tty->print("Verifying"); n->dump();}
343
344 bool mark_inputs = false;
345 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
346 (in1->is_Con() || in2->is_Con())) {
347 if (trace) {tty->print_cr("Comparison against a constant");}
348 mark_inputs = true;
349 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
350 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
351 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
352 mark_inputs = true;
353 } else {
354 assert(in2->bottom_type()->isa_oopptr(), "");
355
356 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
357 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
358 report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
359 }
360 }
361 if (verify_no_useless_barrier &&
362 mark_inputs &&
363 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
364 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
365 phis.clear();
366 visited.reset();
367 }
368 }
369 } else if (n->is_LoadStore()) {
370 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
371 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahIUBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
372 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
373 }
374
375 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
376 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
377 }
378 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
379 CallNode* call = n->as_Call();
380
381 static struct {
382 const char* name;
383 struct {
384 int pos;
385 verify_type t;
386 } args[6];
387 } calls[] = {
388 "aescrypt_encryptBlock",
389 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
390 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
391 "aescrypt_decryptBlock",
392 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
393 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
394 "multiplyToLen",
395 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore },
396 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
397 "squareToLen",
398 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone},
399 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
400 "montgomery_multiply",
401 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
402 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
403 "montgomery_square",
404 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore },
405 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
406 "mulAdd",
407 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
408 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
409 "vectorizedMismatch",
410 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
411 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
412 "updateBytesCRC32",
413 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
414 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
415 "updateBytesAdler32",
416 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
417 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
418 "updateBytesCRC32C",
419 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone},
420 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
421 "counterMode_AESCrypt",
422 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
423 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
424 "cipherBlockChaining_encryptAESCrypt",
425 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
426 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
427 "cipherBlockChaining_decryptAESCrypt",
428 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
429 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
430 "shenandoah_clone_barrier",
431 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
432 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
433 "ghash_processBlocks",
434 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
435 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
436 "sha1_implCompress",
437 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
438 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
439 "sha256_implCompress",
440 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
441 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
442 "sha512_implCompress",
443 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
444 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
445 "sha1_implCompressMB",
446 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
447 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
448 "sha256_implCompressMB",
449 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
450 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
451 "sha512_implCompressMB",
452 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
453 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
454 "encodeBlock",
455 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
456 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
457 "decodeBlock",
458 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
459 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
460 };
461
462 if (call->is_call_to_arraycopystub()) {
463 Node* dest = nullptr;
464 const TypeTuple* args = n->as_Call()->_tf->domain();
465 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
466 if (args->field_at(i)->isa_ptr()) {
467 j++;
468 if (j == 2) {
469 dest = n->in(i);
470 break;
471 }
472 }
473 }
474 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
475 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
476 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
477 }
478 } else if (strlen(call->_name) > 5 &&
479 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
480 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
481 report_verify_failure("Shenandoah verification: _fill should have barriers", n);
482 }
483 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
484 // skip
485 } else {
486 const int calls_len = sizeof(calls) / sizeof(calls[0]);
487 int i = 0;
488 for (; i < calls_len; i++) {
489 if (!strcmp(calls[i].name, call->_name)) {
490 break;
491 }
492 }
493 if (i != calls_len) {
494 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
495 for (uint j = 0; j < args_len; j++) {
496 int pos = calls[i].args[j].pos;
497 if (pos == -1) {
498 break;
499 }
500 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
501 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
502 }
503 }
504 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
505 if (call->in(j)->bottom_type()->make_ptr() &&
506 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
507 uint k = 0;
508 for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
509 if (k == args_len) {
510 fatal("arg %d for call %s not covered", j, call->_name);
511 }
512 }
513 }
514 } else {
515 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
516 if (call->in(j)->bottom_type()->make_ptr() &&
517 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
518 fatal("%s not covered", call->_name);
519 }
520 }
521 }
522 }
523 } else if (n->Opcode() == Op_ShenandoahIUBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
524 // skip
525 } else if (n->is_AddP()
526 || n->is_Phi()
527 || n->is_ConstraintCast()
528 || n->Opcode() == Op_Return
529 || n->Opcode() == Op_CMoveP
530 || n->Opcode() == Op_CMoveN
531 || n->Opcode() == Op_Rethrow
532 || n->is_MemBar()
533 || n->Opcode() == Op_Conv2B
534 || n->Opcode() == Op_SafePoint
535 || n->is_CallJava()
536 || n->Opcode() == Op_Unlock
537 || n->Opcode() == Op_EncodeP
538 || n->Opcode() == Op_DecodeN) {
539 // nothing to do
540 } else {
541 static struct {
542 int opcode;
543 struct {
544 int pos;
545 verify_type t;
546 } inputs[2];
547 } others[] = {
548 Op_FastLock,
549 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
550 Op_Lock,
551 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} },
552 Op_ArrayCopy,
553 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
554 Op_StrCompressedCopy,
555 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
556 Op_StrInflatedCopy,
557 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
558 Op_AryEq,
559 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
560 Op_StrIndexOf,
561 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
562 Op_StrComp,
563 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
564 Op_StrEquals,
565 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
566 Op_VectorizedHashCode,
567 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
568 Op_EncodeISOArray,
569 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
570 Op_CountPositives,
571 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} },
572 Op_CastP2X,
573 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
574 Op_StrIndexOfChar,
575 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
576 };
577
578 const int others_len = sizeof(others) / sizeof(others[0]);
579 int i = 0;
580 for (; i < others_len; i++) {
581 if (others[i].opcode == n->Opcode()) {
582 break;
583 }
584 }
585 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
586 if (i != others_len) {
587 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
588 for (uint j = 0; j < inputs_len; j++) {
589 int pos = others[i].inputs[j].pos;
590 if (pos == -1) {
591 break;
592 }
593 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
594 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
595 }
596 }
597 for (uint j = 1; j < stop; j++) {
598 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
599 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
600 uint k = 0;
601 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
602 if (k == inputs_len) {
603 fatal("arg %d for node %s not covered", j, n->Name());
604 }
605 }
606 }
607 } else {
608 for (uint j = 1; j < stop; j++) {
609 if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
610 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
611 fatal("%s not covered", n->Name());
612 }
613 }
614 }
615 }
616
617 if (n->is_SafePoint()) {
618 SafePointNode* sfpt = n->as_SafePoint();
619 if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
620 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
621 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
622 phis.clear();
623 visited.reset();
624 }
625 }
626 }
627 }
628 }
629
630 if (verify_no_useless_barrier) {
631 for (int i = 0; i < barriers.length(); i++) {
632 Node* n = barriers.at(i);
633 if (!barriers_used.member(n)) {
634 tty->print("XXX useless barrier"); n->dump(-2);
635 ShouldNotReachHere();
636 }
637 }
638 }
639 }
640 #endif
641
642 bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
643 Node* control) {
644 return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
645 phase->ctrl_or_self(maybe_load) == control;
646 }
647
648 void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
649 if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
650 return;
651 }
652 Node* mem = maybe_store->in(MemNode::Memory);
653 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
654 Node* u = mem->fast_out(i);
655 if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
656 wq.push(u);
657 }
658 }
659 }
660
661 void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
662 for (uint i = 0; i < n->req(); i++) {
663 Node* in = n->in(i);
664 if (in != nullptr && (ShenandoahIUBarrier ? (phase->ctrl_or_self(in) == ctrl) : (phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl))) {
665 wq.push(in);
666 }
667 }
668 }
669
670 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
671 // That both nodes have the same control is not sufficient to prove
672 // domination, verify that there's no path from d to n
673 ResourceMark rm;
674 Unique_Node_List wq;
675 wq.push(d);
676 for (uint next = 0; next < wq.size(); next++) {
677 Node *m = wq.at(next);
678 if (m == n) {
679 return false;
680 }
681 if (m->is_Phi() && m->in(0)->is_Loop()) {
682 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
683 } else {
684 // Take anti-dependencies into account
685 maybe_push_anti_dependent_loads(phase, m, c, wq);
686 push_data_inputs_at_control(phase, m, c, wq);
687 }
688 }
689 return true;
690 }
691
692 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
693 if (d_c != n_c) {
694 return phase->is_dominator(d_c, n_c);
695 }
696 return is_dominator_same_ctrl(d_c, d, n, phase);
697 }
698
699 Node* next_mem(Node* mem, int alias) {
700 Node* res = nullptr;
701 if (mem->is_Proj()) {
702 res = mem->in(0);
703 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
704 res = mem->in(TypeFunc::Memory);
705 } else if (mem->is_Phi()) {
706 res = mem->in(1);
707 } else if (mem->is_MergeMem()) {
708 res = mem->as_MergeMem()->memory_at(alias);
709 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
710 assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
711 res = mem->in(MemNode::Memory);
712 } else {
713 #ifdef ASSERT
714 mem->dump();
715 #endif
716 ShouldNotReachHere();
717 }
718 return res;
719 }
720
721 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
722 Node* iffproj = nullptr;
723 while (c != dom) {
724 Node* next = phase->idom(c);
725 assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
726 if (c->is_Region()) {
727 ResourceMark rm;
728 Unique_Node_List wq;
729 wq.push(c);
730 for (uint i = 0; i < wq.size(); i++) {
731 Node *n = wq.at(i);
732 if (n == next) {
733 continue;
734 }
735 if (n->is_Region()) {
736 for (uint j = 1; j < n->req(); j++) {
737 wq.push(n->in(j));
738 }
739 } else {
740 wq.push(n->in(0));
741 }
742 }
743 for (uint i = 0; i < wq.size(); i++) {
744 Node *n = wq.at(i);
745 assert(n->is_CFG(), "");
746 if (n->is_Multi()) {
747 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
748 Node* u = n->fast_out(j);
749 if (u->is_CFG()) {
750 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
751 return NodeSentinel;
752 }
753 }
754 }
755 }
756 }
757 } else if (c->is_Proj()) {
758 if (c->is_IfProj()) {
759 if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
760 // continue;
761 } else {
762 if (!allow_one_proj) {
763 return NodeSentinel;
764 }
765 if (iffproj == nullptr) {
766 iffproj = c;
767 } else {
768 return NodeSentinel;
769 }
770 }
771 } else if (c->Opcode() == Op_JumpProj) {
772 return NodeSentinel; // unsupported
773 } else if (c->Opcode() == Op_CatchProj) {
774 return NodeSentinel; // unsupported
775 } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
776 return NodeSentinel; // unsupported
777 } else {
778 assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
779 }
780 }
781 c = next;
782 }
783 return iffproj;
784 }
785
786 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
787 ResourceMark rm;
788 VectorSet wq;
789 wq.set(mem->_idx);
790 mem_ctrl = phase->ctrl_or_self(mem);
791 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
792 mem = next_mem(mem, alias);
793 if (wq.test_set(mem->_idx)) {
794 return nullptr;
795 }
796 mem_ctrl = phase->ctrl_or_self(mem);
797 }
798 if (mem->is_MergeMem()) {
799 mem = mem->as_MergeMem()->memory_at(alias);
800 mem_ctrl = phase->ctrl_or_self(mem);
801 }
802 return mem;
803 }
804
805 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
806 Node* mem = nullptr;
807 Node* c = ctrl;
808 do {
809 if (c->is_Region()) {
810 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
811 Node* u = c->fast_out(i);
812 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
813 if (u->adr_type() == TypePtr::BOTTOM) {
814 mem = u;
815 }
816 }
817 }
818 } else {
819 if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
820 CallProjections projs;
821 c->as_Call()->extract_projections(&projs, true, false);
822 if (projs.fallthrough_memproj != nullptr) {
823 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
824 if (projs.catchall_memproj == nullptr) {
825 mem = projs.fallthrough_memproj;
826 } else {
827 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
828 mem = projs.fallthrough_memproj;
829 } else {
830 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
831 mem = projs.catchall_memproj;
832 }
833 }
834 }
835 } else {
836 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
837 if (proj != nullptr &&
838 proj->adr_type() == TypePtr::BOTTOM) {
839 mem = proj;
840 }
841 }
842 } else {
843 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
844 Node* u = c->fast_out(i);
845 if (u->is_Proj() &&
846 u->bottom_type() == Type::MEMORY &&
847 u->adr_type() == TypePtr::BOTTOM) {
848 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
849 assert(mem == nullptr, "only one proj");
850 mem = u;
851 }
852 }
853 assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
854 }
855 }
856 c = phase->idom(c);
857 } while (mem == nullptr);
858 return mem;
859 }
860
861 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
862 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
863 Node* u = n->fast_out(i);
864 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
865 uses.push(u);
866 }
867 }
868 }
869
870 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
871 OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
872 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
873 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
874 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
875 phase->register_control(new_le, phase->get_loop(le), le->in(0));
876 phase->lazy_replace(outer, new_outer);
877 phase->lazy_replace(le, new_le);
878 inner->clear_strip_mined();
879 }
880
881 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
882 PhaseIdealLoop* phase, int flags) {
883 PhaseIterGVN& igvn = phase->igvn();
884 Node* old_ctrl = ctrl;
885
886 Node* thread = new ThreadLocalNode();
887 Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
888 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset);
889 Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
890 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
891 TypeInt::BYTE, MemNode::unordered);
892 Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
893 Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
894 Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne);
895
896 IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
897 ctrl = new IfTrueNode(gc_state_iff);
898 test_fail_ctrl = new IfFalseNode(gc_state_iff);
899
900 IdealLoopTree* loop = phase->get_loop(old_ctrl);
901 phase->register_control(gc_state_iff, loop, old_ctrl);
902 phase->register_control(ctrl, loop, gc_state_iff);
903 phase->register_control(test_fail_ctrl, loop, gc_state_iff);
904
905 phase->register_new_node(thread, old_ctrl);
906 phase->register_new_node(gc_state_addr, old_ctrl);
907 phase->register_new_node(gc_state, old_ctrl);
908 phase->register_new_node(gc_state_and, old_ctrl);
909 phase->register_new_node(gc_state_cmp, old_ctrl);
910 phase->register_new_node(gc_state_bool, old_ctrl);
911
912 phase->set_ctrl(gc_state_offset, phase->C->root());
913
914 assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
915 }
916
917 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
918 Node* old_ctrl = ctrl;
919 PhaseIterGVN& igvn = phase->igvn();
920
921 const Type* val_t = igvn.type(val);
922 if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
923 Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT));
924 Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
925
926 IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
927 ctrl = new IfTrueNode(null_iff);
928 null_ctrl = new IfFalseNode(null_iff);
929
930 IdealLoopTree* loop = phase->get_loop(old_ctrl);
931 phase->register_control(null_iff, loop, old_ctrl);
932 phase->register_control(ctrl, loop, null_iff);
933 phase->register_control(null_ctrl, loop, null_iff);
934
935 phase->register_new_node(null_cmp, old_ctrl);
936 phase->register_new_node(null_test, old_ctrl);
937 }
938 }
939
940 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
941 Node* old_ctrl = ctrl;
942 PhaseIterGVN& igvn = phase->igvn();
943
944 Node* raw_val = new CastP2XNode(old_ctrl, val);
945 Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
946
947 // Figure out the target cset address with raw pointer math.
948 // This avoids matching AddP+LoadB that would emit inefficient code.
949 // See JDK-8245465.
950 Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
951 Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr);
952 Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
953 Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
954
955 Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
956 DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
957 TypeInt::BYTE, MemNode::unordered);
958 Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
959 Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
960
961 IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
962 ctrl = new IfTrueNode(cset_iff);
963 not_cset_ctrl = new IfFalseNode(cset_iff);
964
965 IdealLoopTree *loop = phase->get_loop(old_ctrl);
966 phase->register_control(cset_iff, loop, old_ctrl);
967 phase->register_control(ctrl, loop, cset_iff);
968 phase->register_control(not_cset_ctrl, loop, cset_iff);
969
970 phase->set_ctrl(cset_addr_ptr, phase->C->root());
971
972 phase->register_new_node(raw_val, old_ctrl);
973 phase->register_new_node(cset_idx, old_ctrl);
974 phase->register_new_node(cset_addr, old_ctrl);
975 phase->register_new_node(cset_load_addr, old_ctrl);
976 phase->register_new_node(cset_load_ptr, old_ctrl);
977 phase->register_new_node(cset_load, old_ctrl);
978 phase->register_new_node(cset_cmp, old_ctrl);
979 phase->register_new_node(cset_bool, old_ctrl);
980 }
981
982 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
983 DecoratorSet decorators, PhaseIdealLoop* phase) {
984 IdealLoopTree*loop = phase->get_loop(ctrl);
985 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
986
987 address calladdr = nullptr;
988 const char* name = nullptr;
989 bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
990 bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
991 bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
992 bool is_native = ShenandoahBarrierSet::is_native_access(decorators);
993 bool is_narrow = UseCompressedOops && !is_native;
994 if (is_strong) {
995 if (is_narrow) {
996 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
997 name = "load_reference_barrier_strong_narrow";
998 } else {
999 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
1000 name = "load_reference_barrier_strong";
1001 }
1002 } else if (is_weak) {
1003 if (is_narrow) {
1004 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
1005 name = "load_reference_barrier_weak_narrow";
1006 } else {
1007 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
1008 name = "load_reference_barrier_weak";
1009 }
1010 } else {
1011 assert(is_phantom, "only remaining strength");
1012 if (is_narrow) {
1013 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
1014 name = "load_reference_barrier_phantom_narrow";
1015 } else {
1016 calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
1017 name = "load_reference_barrier_phantom";
1018 }
1019 }
1020 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
1021
1022 call->init_req(TypeFunc::Control, ctrl);
1023 call->init_req(TypeFunc::I_O, phase->C->top());
1024 call->init_req(TypeFunc::Memory, phase->C->top());
1025 call->init_req(TypeFunc::FramePtr, phase->C->top());
1026 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1027 call->init_req(TypeFunc::Parms, val);
1028 call->init_req(TypeFunc::Parms+1, load_addr);
1029 phase->register_control(call, loop, ctrl);
1030 ctrl = new ProjNode(call, TypeFunc::Control);
1031 phase->register_control(ctrl, loop, call);
1032 val = new ProjNode(call, TypeFunc::Parms);
1033 phase->register_new_node(val, call);
1034 val = new CheckCastPPNode(ctrl, val, obj_type);
1035 phase->register_new_node(val, ctrl);
1036 }
1037
1038 void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
1039 nodes_above_barrier.clear();
1040 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1041 nodes_above_barrier.push(init_raw_mem);
1042 }
1043 for (uint next = 0; next < nodes_above_barrier.size(); next++) {
1044 Node* n = nodes_above_barrier.at(next);
1045 // Take anti-dependencies into account
1046 maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
1047 push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
1048 }
1049 }
1050
1051 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
1052 Node* ctrl = phase->get_ctrl(barrier);
1053 Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1054
1055 // Update the control of all nodes that should be after the
1056 // barrier control flow
1057 uses.clear();
1058 // Every node that is control dependent on the barrier's input
1059 // control will be after the expanded barrier. The raw memory (if
1060 // its memory is control dependent on the barrier's input control)
1061 // must stay above the barrier.
1062 collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
1063 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1064 Node* u = ctrl->fast_out(i);
1065 if (u->_idx < last &&
1066 u != barrier &&
1067 !u->depends_only_on_test() && // preserve dependency on test
1068 !nodes_above_barrier.member(u) &&
1069 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1070 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1071 Node* old_c = phase->ctrl_or_self(u);
1072 if (old_c != ctrl ||
1073 is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1074 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1075 phase->igvn().rehash_node_delayed(u);
1076 int nb = u->replace_edge(ctrl, region, &phase->igvn());
1077 if (u->is_CFG()) {
1078 if (phase->idom(u) == ctrl) {
1079 phase->set_idom(u, region, phase->dom_depth(region));
1080 }
1081 } else if (phase->get_ctrl(u) == ctrl) {
1082 assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1083 uses.push(u);
1084 }
1085 assert(nb == 1, "more than 1 ctrl input?");
1086 --i, imax -= nb;
1087 }
1088 }
1089 }
1090 }
1091
1092 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1093 Node* region = nullptr;
1094 while (c != ctrl) {
1095 if (c->is_Region()) {
1096 region = c;
1097 }
1098 c = phase->idom(c);
1099 }
1100 assert(region != nullptr, "");
1101 Node* phi = new PhiNode(region, n->bottom_type());
1102 for (uint j = 1; j < region->req(); j++) {
1103 Node* in = region->in(j);
1104 if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1105 phi->init_req(j, n);
1106 } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1107 phi->init_req(j, n_clone);
1108 } else {
1109 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1110 }
1111 }
1112 phase->register_new_node(phi, region);
1113 return phi;
1114 }
1115
1116 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1117 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1118
1119 Unique_Node_List uses;
1120 for (int i = 0; i < state->iu_barriers_count(); i++) {
1121 Node* barrier = state->iu_barrier(i);
1122 Node* ctrl = phase->get_ctrl(barrier);
1123 IdealLoopTree* loop = phase->get_loop(ctrl);
1124 Node* head = loop->head();
1125 if (head->is_OuterStripMinedLoop()) {
1126 // Expanding a barrier here will break loop strip mining
1127 // verification. Transform the loop so the loop nest doesn't
1128 // appear as strip mined.
1129 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1130 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1131 }
1132 }
1133
1134 Node_Stack stack(0);
1135 Node_List clones;
1136 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1137 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1138
1139 Node* ctrl = phase->get_ctrl(lrb);
1140 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1141
1142 CallStaticJavaNode* unc = nullptr;
1143 Node* unc_ctrl = nullptr;
1144 Node* uncasted_val = val;
1145
1146 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1147 Node* u = lrb->fast_out(i);
1148 if (u->Opcode() == Op_CastPP &&
1149 u->in(0) != nullptr &&
1150 phase->is_dominator(u->in(0), ctrl)) {
1151 const Type* u_t = phase->igvn().type(u);
1152
1153 if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1154 u->in(0)->Opcode() == Op_IfTrue &&
1155 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1156 u->in(0)->in(0)->is_If() &&
1157 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1158 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1159 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1160 u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1161 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1162 IdealLoopTree* loop = phase->get_loop(ctrl);
1163 IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1164
1165 if (!unc_loop->is_member(loop)) {
1166 continue;
1167 }
1168
1169 Node* branch = no_branches(ctrl, u->in(0), false, phase);
1170 assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1171 if (branch == NodeSentinel) {
1172 continue;
1173 }
1174
1175 Node* iff = u->in(0)->in(0);
1176 Node* bol = iff->in(1)->clone();
1177 Node* cmp = bol->in(1)->clone();
1178 cmp->set_req(1, lrb);
1179 bol->set_req(1, cmp);
1180 phase->igvn().replace_input_of(iff, 1, bol);
1181 phase->set_ctrl(lrb, iff->in(0));
1182 phase->register_new_node(cmp, iff->in(0));
1183 phase->register_new_node(bol, iff->in(0));
1184 break;
1185 }
1186 }
1187 }
1188 // Load barrier on the control output of a call
1189 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1190 CallJavaNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1191 if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1192 // The rethrow call may have too many projections to be
1193 // properly handled here. Given there's no reason for a
1194 // barrier to depend on the call, move it above the call
1195 stack.push(lrb, 0);
1196 do {
1197 Node* n = stack.node();
1198 uint idx = stack.index();
1199 if (idx < n->req()) {
1200 Node* in = n->in(idx);
1201 stack.set_index(idx+1);
1202 if (in != nullptr) {
1203 if (phase->has_ctrl(in)) {
1204 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1205 #ifdef ASSERT
1206 for (uint i = 0; i < stack.size(); i++) {
1207 assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1208 }
1209 #endif
1210 stack.push(in, 0);
1211 }
1212 } else {
1213 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1214 }
1215 }
1216 } else {
1217 phase->set_ctrl(n, call->in(0));
1218 stack.pop();
1219 }
1220 } while(stack.size() > 0);
1221 continue;
1222 }
1223 CallProjections projs;
1224 call->extract_projections(&projs, false, false);
1225
1226 // If this is a runtime call, it doesn't have an exception handling path
1227 if (projs.fallthrough_catchproj == nullptr) {
1228 assert(call->method() == nullptr, "should be runtime call");
1229 assert(projs.catchall_catchproj == nullptr, "runtime call should not have catch all projection");
1230 continue;
1231 }
1232
1233 // Otherwise, clone the barrier so there's one for the fallthrough and one for the exception handling path
1234 #ifdef ASSERT
1235 VectorSet cloned;
1236 #endif
1237 Node* lrb_clone = lrb->clone();
1238 phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1239 phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1240
1241 stack.push(lrb, 0);
1242 clones.push(lrb_clone);
1243
1244 do {
1245 assert(stack.size() == clones.size(), "");
1246 Node* n = stack.node();
1247 #ifdef ASSERT
1248 if (n->is_Load()) {
1249 Node* mem = n->in(MemNode::Memory);
1250 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1251 Node* u = mem->fast_out(j);
1252 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1253 }
1254 }
1255 #endif
1256 uint idx = stack.index();
1257 Node* n_clone = clones.at(clones.size()-1);
1258 if (idx < n->outcnt()) {
1259 Node* u = n->raw_out(idx);
1260 Node* c = phase->ctrl_or_self(u);
1261 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1262 stack.set_index(idx+1);
1263 assert(!u->is_CFG(), "");
1264 stack.push(u, 0);
1265 assert(!cloned.test_set(u->_idx), "only one clone");
1266 Node* u_clone = u->clone();
1267 int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1268 assert(nb > 0, "should have replaced some uses");
1269 phase->register_new_node(u_clone, projs.catchall_catchproj);
1270 clones.push(u_clone);
1271 phase->set_ctrl(u, projs.fallthrough_catchproj);
1272 } else {
1273 bool replaced = false;
1274 if (u->is_Phi()) {
1275 for (uint k = 1; k < u->req(); k++) {
1276 if (u->in(k) == n) {
1277 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1278 phase->igvn().replace_input_of(u, k, n_clone);
1279 replaced = true;
1280 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1281 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1282 replaced = true;
1283 }
1284 }
1285 }
1286 } else {
1287 if (phase->is_dominator(projs.catchall_catchproj, c)) {
1288 phase->igvn().rehash_node_delayed(u);
1289 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1290 assert(nb > 0, "should have replaced some uses");
1291 replaced = true;
1292 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1293 if (u->is_If()) {
1294 // Can't break If/Bool/Cmp chain
1295 assert(n->is_Bool(), "unexpected If shape");
1296 assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1297 assert(n_clone->is_Bool(), "unexpected clone");
1298 assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1299 Node* bol_clone = n->clone();
1300 Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1301 bol_clone->set_req(1, cmp_clone);
1302
1303 Node* nn = stack.node_at(stack.size()-3);
1304 Node* nn_clone = clones.at(clones.size()-3);
1305 assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1306
1307 int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1308 &phase->igvn());
1309 assert(nb > 0, "should have replaced some uses");
1310
1311 phase->register_new_node(bol_clone, u->in(0));
1312 phase->register_new_node(cmp_clone, u->in(0));
1313
1314 phase->igvn().replace_input_of(u, 1, bol_clone);
1315
1316 } else {
1317 phase->igvn().rehash_node_delayed(u);
1318 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1319 assert(nb > 0, "should have replaced some uses");
1320 }
1321 replaced = true;
1322 }
1323 }
1324 if (!replaced) {
1325 stack.set_index(idx+1);
1326 }
1327 }
1328 } else {
1329 stack.pop();
1330 clones.pop();
1331 }
1332 } while (stack.size() > 0);
1333 assert(stack.size() == 0 && clones.size() == 0, "");
1334 }
1335 }
1336
1337 for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1338 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1339 Node* ctrl = phase->get_ctrl(lrb);
1340 IdealLoopTree* loop = phase->get_loop(ctrl);
1341 Node* head = loop->head();
1342 if (head->is_OuterStripMinedLoop()) {
1343 // Expanding a barrier here will break loop strip mining
1344 // verification. Transform the loop so the loop nest doesn't
1345 // appear as strip mined.
1346 OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1347 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1348 }
1349 if (head->is_BaseCountedLoop() && ctrl->is_IfProj() && ctrl->in(0)->is_BaseCountedLoopEnd() &&
1350 head->as_BaseCountedLoop()->loopexit() == ctrl->in(0)) {
1351 Node* entry = head->in(LoopNode::EntryControl);
1352 Node* backedge = head->in(LoopNode::LoopBackControl);
1353 Node* new_head = new LoopNode(entry, backedge);
1354 phase->register_control(new_head, phase->get_loop(entry), entry);
1355 phase->lazy_replace(head, new_head);
1356 }
1357 }
1358
1359 // Expand load-reference-barriers
1360 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1361 Unique_Node_List nodes_above_barriers;
1362 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1363 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1364 uint last = phase->C->unique();
1365 Node* ctrl = phase->get_ctrl(lrb);
1366 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1367
1368 Node* orig_ctrl = ctrl;
1369
1370 Node* raw_mem = fixer.find_mem(ctrl, lrb);
1371 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1372
1373 IdealLoopTree *loop = phase->get_loop(ctrl);
1374
1375 Node* heap_stable_ctrl = nullptr;
1376 Node* null_ctrl = nullptr;
1377
1378 assert(val->bottom_type()->make_oopptr(), "need oop");
1379 assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1380
1381 enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1382 Node* region = new RegionNode(PATH_LIMIT);
1383 Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1384
1385 // Stable path.
1386 int flags = ShenandoahHeap::HAS_FORWARDED;
1387 if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1388 flags |= ShenandoahHeap::WEAK_ROOTS;
1389 }
1390 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1391 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1392
1393 // Heap stable case
1394 region->init_req(_heap_stable, heap_stable_ctrl);
1395 val_phi->init_req(_heap_stable, val);
1396
1397 // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1398 // even for non-cset objects to prevent resurrection of such objects.
1399 // Wires !in_cset(obj) to slot 2 of region and phis
1400 Node* not_cset_ctrl = nullptr;
1401 if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1402 test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1403 }
1404 if (not_cset_ctrl != nullptr) {
1405 region->init_req(_not_cset, not_cset_ctrl);
1406 val_phi->init_req(_not_cset, val);
1407 } else {
1408 region->del_req(_not_cset);
1409 val_phi->del_req(_not_cset);
1410 }
1411
1412 // Resolve object when orig-value is in cset.
1413 // Make the unconditional resolve for fwdptr.
1414
1415 // Call lrb-stub and wire up that path in slots 4
1416 Node* result_mem = nullptr;
1417
1418 Node* addr;
1419 {
1420 VectorSet visited;
1421 addr = get_load_addr(phase, visited, lrb);
1422 }
1423 if (addr->Opcode() == Op_AddP) {
1424 Node* orig_base = addr->in(AddPNode::Base);
1425 Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1426 phase->register_new_node(base, ctrl);
1427 if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1428 // Field access
1429 addr = addr->clone();
1430 addr->set_req(AddPNode::Base, base);
1431 addr->set_req(AddPNode::Address, base);
1432 phase->register_new_node(addr, ctrl);
1433 } else {
1434 Node* addr2 = addr->in(AddPNode::Address);
1435 if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1436 addr2->in(AddPNode::Base) == orig_base) {
1437 addr2 = addr2->clone();
1438 addr2->set_req(AddPNode::Base, base);
1439 addr2->set_req(AddPNode::Address, base);
1440 phase->register_new_node(addr2, ctrl);
1441 addr = addr->clone();
1442 addr->set_req(AddPNode::Base, base);
1443 addr->set_req(AddPNode::Address, addr2);
1444 phase->register_new_node(addr, ctrl);
1445 }
1446 }
1447 }
1448 call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1449 region->init_req(_evac_path, ctrl);
1450 val_phi->init_req(_evac_path, val);
1451
1452 phase->register_control(region, loop, heap_stable_iff);
1453 Node* out_val = val_phi;
1454 phase->register_new_node(val_phi, region);
1455
1456 fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
1457
1458 ctrl = orig_ctrl;
1459
1460 phase->igvn().replace_node(lrb, out_val);
1461
1462 follow_barrier_uses(out_val, ctrl, uses, phase);
1463
1464 for(uint next = 0; next < uses.size(); next++ ) {
1465 Node *n = uses.at(next);
1466 assert(phase->get_ctrl(n) == ctrl, "bad control");
1467 assert(n != raw_mem, "should leave input raw mem above the barrier");
1468 phase->set_ctrl(n, region);
1469 follow_barrier_uses(n, ctrl, uses, phase);
1470 }
1471 fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1472 }
1473 // Done expanding load-reference-barriers.
1474 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1475
1476 for (int i = state->iu_barriers_count() - 1; i >= 0; i--) {
1477 Node* barrier = state->iu_barrier(i);
1478 Node* pre_val = barrier->in(1);
1479
1480 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1481 ShouldNotReachHere();
1482 continue;
1483 }
1484
1485 Node* ctrl = phase->get_ctrl(barrier);
1486
1487 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1488 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1489 ctrl = ctrl->in(0)->in(0);
1490 phase->set_ctrl(barrier, ctrl);
1491 } else if (ctrl->is_CallRuntime()) {
1492 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1493 ctrl = ctrl->in(0);
1494 phase->set_ctrl(barrier, ctrl);
1495 }
1496
1497 Node* init_ctrl = ctrl;
1498 IdealLoopTree* loop = phase->get_loop(ctrl);
1499 Node* raw_mem = fixer.find_mem(ctrl, barrier);
1500 Node* init_raw_mem = raw_mem;
1501 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1502 Node* heap_stable_ctrl = nullptr;
1503 Node* null_ctrl = nullptr;
1504 uint last = phase->C->unique();
1505
1506 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1507 Node* region = new RegionNode(PATH_LIMIT);
1508 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1509
1510 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1511 Node* region2 = new RegionNode(PATH_LIMIT2);
1512 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1513
1514 // Stable path.
1515 test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
1516 region->init_req(_heap_stable, heap_stable_ctrl);
1517 phi->init_req(_heap_stable, raw_mem);
1518
1519 // Null path
1520 Node* reg2_ctrl = nullptr;
1521 test_null(ctrl, pre_val, null_ctrl, phase);
1522 if (null_ctrl != nullptr) {
1523 reg2_ctrl = null_ctrl->in(0);
1524 region2->init_req(_null_path, null_ctrl);
1525 phi2->init_req(_null_path, raw_mem);
1526 } else {
1527 region2->del_req(_null_path);
1528 phi2->del_req(_null_path);
1529 }
1530
1531 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1532 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1533 Node* thread = new ThreadLocalNode();
1534 phase->register_new_node(thread, ctrl);
1535 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1536 phase->register_new_node(buffer_adr, ctrl);
1537 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1538 phase->register_new_node(index_adr, ctrl);
1539
1540 BasicType index_bt = TypeX_X->basic_type();
1541 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading Shenandoah SATBMarkQueue::_index with wrong size.");
1542 const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1543 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1544 phase->register_new_node(index, ctrl);
1545 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1546 phase->register_new_node(index_cmp, ctrl);
1547 Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1548 phase->register_new_node(index_test, ctrl);
1549 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1550 if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
1551 phase->register_control(queue_full_iff, loop, ctrl);
1552 Node* not_full = new IfTrueNode(queue_full_iff);
1553 phase->register_control(not_full, loop, queue_full_iff);
1554 Node* full = new IfFalseNode(queue_full_iff);
1555 phase->register_control(full, loop, queue_full_iff);
1556
1557 ctrl = not_full;
1558
1559 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1560 phase->register_new_node(next_index, ctrl);
1561
1562 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1563 phase->register_new_node(buffer, ctrl);
1564 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1565 phase->register_new_node(log_addr, ctrl);
1566 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1567 phase->register_new_node(log_store, ctrl);
1568 // update the index
1569 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1570 phase->register_new_node(index_update, ctrl);
1571
1572 // Fast-path case
1573 region2->init_req(_fast_path, ctrl);
1574 phi2->init_req(_fast_path, index_update);
1575
1576 ctrl = full;
1577
1578 Node* base = find_bottom_mem(ctrl, phase);
1579
1580 MergeMemNode* mm = MergeMemNode::make(base);
1581 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1582 phase->register_new_node(mm, ctrl);
1583
1584 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1585 call->init_req(TypeFunc::Control, ctrl);
1586 call->init_req(TypeFunc::I_O, phase->C->top());
1587 call->init_req(TypeFunc::Memory, mm);
1588 call->init_req(TypeFunc::FramePtr, phase->C->top());
1589 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1590 call->init_req(TypeFunc::Parms, pre_val);
1591 call->init_req(TypeFunc::Parms+1, thread);
1592 phase->register_control(call, loop, ctrl);
1593
1594 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1595 phase->register_control(ctrl_proj, loop, call);
1596 Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1597 phase->register_new_node(mem_proj, call);
1598
1599 // Slow-path case
1600 region2->init_req(_slow_path, ctrl_proj);
1601 phi2->init_req(_slow_path, mem_proj);
1602
1603 phase->register_control(region2, loop, reg2_ctrl);
1604 phase->register_new_node(phi2, region2);
1605
1606 region->init_req(_heap_unstable, region2);
1607 phi->init_req(_heap_unstable, phi2);
1608
1609 phase->register_control(region, loop, heap_stable_ctrl->in(0));
1610 phase->register_new_node(phi, region);
1611
1612 fix_ctrl(barrier, region, fixer, uses, nodes_above_barriers, last, phase);
1613 for(uint next = 0; next < uses.size(); next++ ) {
1614 Node *n = uses.at(next);
1615 assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1616 assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1617 phase->set_ctrl(n, region);
1618 follow_barrier_uses(n, init_ctrl, uses, phase);
1619 }
1620 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1621
1622 phase->igvn().replace_node(barrier, pre_val);
1623 }
1624 assert(state->iu_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1625
1626 }
1627
1628 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1629 if (visited.test_set(in->_idx)) {
1630 return nullptr;
1631 }
1632 switch (in->Opcode()) {
1633 case Op_Proj:
1634 return get_load_addr(phase, visited, in->in(0));
1635 case Op_CastPP:
1636 case Op_CheckCastPP:
1637 case Op_DecodeN:
1638 case Op_EncodeP:
1639 return get_load_addr(phase, visited, in->in(1));
1640 case Op_LoadN:
1641 case Op_LoadP:
1642 return in->in(MemNode::Address);
1643 case Op_CompareAndExchangeN:
1644 case Op_CompareAndExchangeP:
1645 case Op_GetAndSetN:
1646 case Op_GetAndSetP:
1647 case Op_ShenandoahCompareAndExchangeP:
1648 case Op_ShenandoahCompareAndExchangeN:
1649 // Those instructions would just have stored a different
1650 // value into the field. No use to attempt to fix it at this point.
1651 return phase->igvn().zerocon(T_OBJECT);
1652 case Op_CMoveP:
1653 case Op_CMoveN: {
1654 Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1655 Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1656 // Handle unambiguous cases: single address reported on both branches.
1657 if (t != nullptr && f == nullptr) return t;
1658 if (t == nullptr && f != nullptr) return f;
1659 if (t != nullptr && t == f) return t;
1660 // Ambiguity.
1661 return phase->igvn().zerocon(T_OBJECT);
1662 }
1663 case Op_Phi: {
1664 Node* addr = nullptr;
1665 for (uint i = 1; i < in->req(); i++) {
1666 Node* addr1 = get_load_addr(phase, visited, in->in(i));
1667 if (addr == nullptr) {
1668 addr = addr1;
1669 }
1670 if (addr != addr1) {
1671 return phase->igvn().zerocon(T_OBJECT);
1672 }
1673 }
1674 return addr;
1675 }
1676 case Op_ShenandoahLoadReferenceBarrier:
1677 return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1678 case Op_ShenandoahIUBarrier:
1679 return get_load_addr(phase, visited, in->in(1));
1680 case Op_CallDynamicJava:
1681 case Op_CallLeaf:
1682 case Op_CallStaticJava:
1683 case Op_ConN:
1684 case Op_ConP:
1685 case Op_Parm:
1686 case Op_CreateEx:
1687 return phase->igvn().zerocon(T_OBJECT);
1688 default:
1689 #ifdef ASSERT
1690 fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1691 #endif
1692 return phase->igvn().zerocon(T_OBJECT);
1693 }
1694
1695 }
1696
1697 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1698 IdealLoopTree *loop = phase->get_loop(iff);
1699 Node* loop_head = loop->_head;
1700 Node* entry_c = loop_head->in(LoopNode::EntryControl);
1701
1702 Node* bol = iff->in(1);
1703 Node* cmp = bol->in(1);
1704 Node* andi = cmp->in(1);
1705 Node* load = andi->in(1);
1706
1707 assert(is_gc_state_load(load), "broken");
1708 if (!phase->is_dominator(load->in(0), entry_c)) {
1709 Node* mem_ctrl = nullptr;
1710 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1711 load = load->clone();
1712 load->set_req(MemNode::Memory, mem);
1713 load->set_req(0, entry_c);
1714 phase->register_new_node(load, entry_c);
1715 andi = andi->clone();
1716 andi->set_req(1, load);
1717 phase->register_new_node(andi, entry_c);
1718 cmp = cmp->clone();
1719 cmp->set_req(1, andi);
1720 phase->register_new_node(cmp, entry_c);
1721 bol = bol->clone();
1722 bol->set_req(1, cmp);
1723 phase->register_new_node(bol, entry_c);
1724
1725 phase->igvn().replace_input_of(iff, 1, bol);
1726 }
1727 }
1728
1729 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1730 if (!n->is_If() || n->is_CountedLoopEnd()) {
1731 return false;
1732 }
1733 Node* region = n->in(0);
1734
1735 if (!region->is_Region()) {
1736 return false;
1737 }
1738 Node* dom = phase->idom(region);
1739 if (!dom->is_If()) {
1740 return false;
1741 }
1742
1743 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1744 return false;
1745 }
1746
1747 IfNode* dom_if = dom->as_If();
1748 Node* proj_true = dom_if->proj_out(1);
1749 Node* proj_false = dom_if->proj_out(0);
1750
1751 for (uint i = 1; i < region->req(); i++) {
1752 if (phase->is_dominator(proj_true, region->in(i))) {
1753 continue;
1754 }
1755 if (phase->is_dominator(proj_false, region->in(i))) {
1756 continue;
1757 }
1758 return false;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool ShenandoahBarrierC2Support::merge_point_safe(Node* region) {
1765 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1766 Node* n = region->fast_out(i);
1767 if (n->is_LoadStore()) {
1768 // Splitting a LoadStore node through phi, causes it to lose its SCMemProj: the split if code doesn't have support
1769 // for a LoadStore at the region the if is split through because that's not expected to happen (LoadStore nodes
1770 // should be between barrier nodes). It does however happen with Shenandoah though because barriers can get
1771 // expanded around a LoadStore node.
1772 return false;
1773 }
1774 }
1775 return true;
1776 }
1777
1778
1779 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1780 assert(is_heap_stable_test(n), "no other tests");
1781 if (identical_backtoback_ifs(n, phase)) {
1782 Node* n_ctrl = n->in(0);
1783 if (phase->can_split_if(n_ctrl) && merge_point_safe(n_ctrl)) {
1784 IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1785 if (is_heap_stable_test(n)) {
1786 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1787 assert(is_gc_state_load(gc_state_load), "broken");
1788 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1789 assert(is_gc_state_load(dom_gc_state_load), "broken");
1790 if (gc_state_load != dom_gc_state_load) {
1791 phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1792 }
1793 }
1794 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1795 Node* proj_true = dom_if->proj_out(1);
1796 Node* proj_false = dom_if->proj_out(0);
1797 Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1798 Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1799
1800 for (uint i = 1; i < n_ctrl->req(); i++) {
1801 if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1802 bolphi->init_req(i, con_true);
1803 } else {
1804 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1805 bolphi->init_req(i, con_false);
1806 }
1807 }
1808 phase->register_new_node(bolphi, n_ctrl);
1809 phase->igvn().replace_input_of(n, 1, bolphi);
1810 phase->do_split_if(n);
1811 }
1812 }
1813 }
1814
1815 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1816 // Find first invariant test that doesn't exit the loop
1817 LoopNode *head = loop->_head->as_Loop();
1818 IfNode* unswitch_iff = nullptr;
1819 Node* n = head->in(LoopNode::LoopBackControl);
1820 int loop_has_sfpts = -1;
1821 while (n != head) {
1822 Node* n_dom = phase->idom(n);
1823 if (n->is_Region()) {
1824 if (n_dom->is_If()) {
1825 IfNode* iff = n_dom->as_If();
1826 if (iff->in(1)->is_Bool()) {
1827 BoolNode* bol = iff->in(1)->as_Bool();
1828 if (bol->in(1)->is_Cmp()) {
1829 // If condition is invariant and not a loop exit,
1830 // then found reason to unswitch.
1831 if (is_heap_stable_test(iff) &&
1832 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1833 assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1834 if (loop_has_sfpts == -1) {
1835 for(uint i = 0; i < loop->_body.size(); i++) {
1836 Node *m = loop->_body[i];
1837 if (m->is_SafePoint() && !m->is_CallLeaf()) {
1838 loop_has_sfpts = 1;
1839 break;
1840 }
1841 }
1842 if (loop_has_sfpts == -1) {
1843 loop_has_sfpts = 0;
1844 }
1845 }
1846 if (!loop_has_sfpts) {
1847 unswitch_iff = iff;
1848 }
1849 }
1850 }
1851 }
1852 }
1853 }
1854 n = n_dom;
1855 }
1856 return unswitch_iff;
1857 }
1858
1859
1860 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1861 Node_List heap_stable_tests;
1862 stack.push(phase->C->start(), 0);
1863 do {
1864 Node* n = stack.node();
1865 uint i = stack.index();
1866
1867 if (i < n->outcnt()) {
1868 Node* u = n->raw_out(i);
1869 stack.set_index(i+1);
1870 if (!visited.test_set(u->_idx)) {
1871 stack.push(u, 0);
1872 }
1873 } else {
1874 stack.pop();
1875 if (n->is_If() && is_heap_stable_test(n)) {
1876 heap_stable_tests.push(n);
1877 }
1878 }
1879 } while (stack.size() > 0);
1880
1881 for (uint i = 0; i < heap_stable_tests.size(); i++) {
1882 Node* n = heap_stable_tests.at(i);
1883 assert(is_heap_stable_test(n), "only evacuation test");
1884 merge_back_to_back_tests(n, phase);
1885 }
1886
1887 if (!phase->C->major_progress()) {
1888 VectorSet seen;
1889 for (uint i = 0; i < heap_stable_tests.size(); i++) {
1890 Node* n = heap_stable_tests.at(i);
1891 IdealLoopTree* loop = phase->get_loop(n);
1892 if (loop != phase->ltree_root() &&
1893 loop->_child == nullptr &&
1894 !loop->_irreducible) {
1895 Node* head = loop->_head;
1896 if (head->is_Loop() &&
1897 (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1898 !seen.test_set(head->_idx)) {
1899 IfNode* iff = find_unswitching_candidate(loop, phase);
1900 if (iff != nullptr) {
1901 Node* bol = iff->in(1);
1902 if (head->as_Loop()->is_strip_mined()) {
1903 head->as_Loop()->verify_strip_mined(0);
1904 }
1905 move_gc_state_test_out_of_loop(iff, phase);
1906
1907 AutoNodeBudget node_budget(phase);
1908
1909 if (loop->policy_unswitching(phase)) {
1910 if (head->as_Loop()->is_strip_mined()) {
1911 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1912 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1913 }
1914 phase->do_unswitching(loop, old_new);
1915 } else {
1916 // Not proceeding with unswitching. Move load back in
1917 // the loop.
1918 phase->igvn().replace_input_of(iff, 1, bol);
1919 }
1920 }
1921 }
1922 }
1923 }
1924 }
1925 }
1926
1927 ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
1928 ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
1929 }
1930
1931 const Type* ShenandoahIUBarrierNode::bottom_type() const {
1932 if (in(1) == nullptr || in(1)->is_top()) {
1933 return Type::TOP;
1934 }
1935 const Type* t = in(1)->bottom_type();
1936 if (t == TypePtr::NULL_PTR) {
1937 return t;
1938 }
1939 return t->is_oopptr();
1940 }
1941
1942 const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
1943 if (in(1) == nullptr) {
1944 return Type::TOP;
1945 }
1946 const Type* t = phase->type(in(1));
1947 if (t == Type::TOP) {
1948 return Type::TOP;
1949 }
1950 if (t == TypePtr::NULL_PTR) {
1951 return t;
1952 }
1953 return t->is_oopptr();
1954 }
1955
1956 int ShenandoahIUBarrierNode::needed(Node* n) {
1957 if (n == nullptr ||
1958 n->is_Allocate() ||
1959 n->Opcode() == Op_ShenandoahIUBarrier ||
1960 n->bottom_type() == TypePtr::NULL_PTR ||
1961 (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
1962 return NotNeeded;
1963 }
1964 if (n->is_Phi() ||
1965 n->is_CMove()) {
1966 return MaybeNeeded;
1967 }
1968 return Needed;
1969 }
1970
1971 Node* ShenandoahIUBarrierNode::next(Node* n) {
1972 for (;;) {
1973 if (n == nullptr) {
1974 return n;
1975 } else if (n->bottom_type() == TypePtr::NULL_PTR) {
1976 return n;
1977 } else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
1978 return n;
1979 } else if (n->is_ConstraintCast() ||
1980 n->Opcode() == Op_DecodeN ||
1981 n->Opcode() == Op_EncodeP) {
1982 n = n->in(1);
1983 } else if (n->is_Proj()) {
1984 n = n->in(0);
1985 } else {
1986 return n;
1987 }
1988 }
1989 ShouldNotReachHere();
1990 return nullptr;
1991 }
1992
1993 Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
1994 PhaseIterGVN* igvn = phase->is_IterGVN();
1995
1996 Node* n = next(in(1));
1997
1998 int cont = needed(n);
1999
2000 if (cont == NotNeeded) {
2001 return in(1);
2002 } else if (cont == MaybeNeeded) {
2003 if (igvn == nullptr) {
2004 phase->record_for_igvn(this);
2005 return this;
2006 } else {
2007 ResourceMark rm;
2008 Unique_Node_List wq;
2009 uint wq_i = 0;
2010
2011 for (;;) {
2012 if (n->is_Phi()) {
2013 for (uint i = 1; i < n->req(); i++) {
2014 Node* m = n->in(i);
2015 if (m != nullptr) {
2016 wq.push(m);
2017 }
2018 }
2019 } else {
2020 assert(n->is_CMove(), "nothing else here");
2021 Node* m = n->in(CMoveNode::IfFalse);
2022 wq.push(m);
2023 m = n->in(CMoveNode::IfTrue);
2024 wq.push(m);
2025 }
2026 Node* orig_n = nullptr;
2027 do {
2028 if (wq_i >= wq.size()) {
2029 return in(1);
2030 }
2031 n = wq.at(wq_i);
2032 wq_i++;
2033 orig_n = n;
2034 n = next(n);
2035 cont = needed(n);
2036 if (cont == Needed) {
2037 return this;
2038 }
2039 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2040 }
2041 }
2042 }
2043
2044 return this;
2045 }
2046
2047 #ifdef ASSERT
2048 static bool has_never_branch(Node* root) {
2049 for (uint i = 1; i < root->req(); i++) {
2050 Node* in = root->in(i);
2051 if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
2052 return true;
2053 }
2054 }
2055 return false;
2056 }
2057 #endif
2058
2059 void MemoryGraphFixer::collect_memory_nodes() {
2060 Node_Stack stack(0);
2061 VectorSet visited;
2062 Node_List regions;
2063
2064 // Walk the raw memory graph and create a mapping from CFG node to
2065 // memory node. Exclude phis for now.
2066 stack.push(_phase->C->root(), 1);
2067 do {
2068 Node* n = stack.node();
2069 int opc = n->Opcode();
2070 uint i = stack.index();
2071 if (i < n->req()) {
2072 Node* mem = nullptr;
2073 if (opc == Op_Root) {
2074 Node* in = n->in(i);
2075 int in_opc = in->Opcode();
2076 if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2077 mem = in->in(TypeFunc::Memory);
2078 } else if (in_opc == Op_Halt) {
2079 if (in->in(0)->is_Region()) {
2080 Node* r = in->in(0);
2081 for (uint j = 1; j < r->req(); j++) {
2082 assert(!r->in(j)->is_NeverBranch(), "");
2083 }
2084 } else {
2085 Node* proj = in->in(0);
2086 assert(proj->is_Proj(), "");
2087 Node* in = proj->in(0);
2088 assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2089 if (in->is_CallStaticJava()) {
2090 mem = in->in(TypeFunc::Memory);
2091 } else if (in->Opcode() == Op_Catch) {
2092 Node* call = in->in(0)->in(0);
2093 assert(call->is_Call(), "");
2094 mem = call->in(TypeFunc::Memory);
2095 } else if (in->is_NeverBranch()) {
2096 mem = collect_memory_for_infinite_loop(in);
2097 }
2098 }
2099 } else {
2100 #ifdef ASSERT
2101 n->dump();
2102 in->dump();
2103 #endif
2104 ShouldNotReachHere();
2105 }
2106 } else {
2107 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2108 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2109 mem = n->in(i);
2110 }
2111 i++;
2112 stack.set_index(i);
2113 if (mem == nullptr) {
2114 continue;
2115 }
2116 for (;;) {
2117 if (visited.test_set(mem->_idx) || mem->is_Start()) {
2118 break;
2119 }
2120 if (mem->is_Phi()) {
2121 stack.push(mem, 2);
2122 mem = mem->in(1);
2123 } else if (mem->is_Proj()) {
2124 stack.push(mem, mem->req());
2125 mem = mem->in(0);
2126 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2127 mem = mem->in(TypeFunc::Memory);
2128 } else if (mem->is_MergeMem()) {
2129 MergeMemNode* mm = mem->as_MergeMem();
2130 mem = mm->memory_at(_alias);
2131 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2132 assert(_alias == Compile::AliasIdxRaw, "");
2133 stack.push(mem, mem->req());
2134 mem = mem->in(MemNode::Memory);
2135 } else {
2136 #ifdef ASSERT
2137 mem->dump();
2138 #endif
2139 ShouldNotReachHere();
2140 }
2141 }
2142 } else {
2143 if (n->is_Phi()) {
2144 // Nothing
2145 } else if (!n->is_Root()) {
2146 Node* c = get_ctrl(n);
2147 _memory_nodes.map(c->_idx, n);
2148 }
2149 stack.pop();
2150 }
2151 } while(stack.is_nonempty());
2152
2153 // Iterate over CFG nodes in rpo and propagate memory state to
2154 // compute memory state at regions, creating new phis if needed.
2155 Node_List rpo_list;
2156 visited.clear();
2157 _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2158 Node* root = rpo_list.pop();
2159 assert(root == _phase->C->root(), "");
2160
2161 const bool trace = false;
2162 #ifdef ASSERT
2163 if (trace) {
2164 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2165 Node* c = rpo_list.at(i);
2166 if (_memory_nodes[c->_idx] != nullptr) {
2167 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
2168 }
2169 }
2170 }
2171 #endif
2172 uint last = _phase->C->unique();
2173
2174 #ifdef ASSERT
2175 uint16_t max_depth = 0;
2176 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2177 IdealLoopTree* lpt = iter.current();
2178 max_depth = MAX2(max_depth, lpt->_nest);
2179 }
2180 #endif
2181
2182 bool progress = true;
2183 int iteration = 0;
2184 Node_List dead_phis;
2185 while (progress) {
2186 progress = false;
2187 iteration++;
2188 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2189 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2190
2191 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2192 Node* c = rpo_list.at(i);
2193
2194 Node* prev_mem = _memory_nodes[c->_idx];
2195 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2196 Node* prev_region = regions[c->_idx];
2197 Node* unique = nullptr;
2198 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2199 Node* m = _memory_nodes[c->in(j)->_idx];
2200 assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2201 if (m != nullptr) {
2202 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2203 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2204 // continue
2205 } else if (unique == nullptr) {
2206 unique = m;
2207 } else if (m == unique) {
2208 // continue
2209 } else {
2210 unique = NodeSentinel;
2211 }
2212 }
2213 }
2214 assert(unique != nullptr, "empty phi???");
2215 if (unique != NodeSentinel) {
2216 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
2217 dead_phis.push(prev_region);
2218 }
2219 regions.map(c->_idx, unique);
2220 } else {
2221 Node* phi = nullptr;
2222 if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2223 phi = prev_region;
2224 for (uint k = 1; k < c->req(); k++) {
2225 Node* m = _memory_nodes[c->in(k)->_idx];
2226 assert(m != nullptr, "expect memory state");
2227 phi->set_req(k, m);
2228 }
2229 } else {
2230 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
2231 Node* u = c->fast_out(j);
2232 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2233 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2234 phi = u;
2235 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
2236 Node* m = _memory_nodes[c->in(k)->_idx];
2237 assert(m != nullptr, "expect memory state");
2238 if (u->in(k) != m) {
2239 phi = NodeSentinel;
2240 }
2241 }
2242 }
2243 }
2244 if (phi == NodeSentinel) {
2245 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2246 for (uint k = 1; k < c->req(); k++) {
2247 Node* m = _memory_nodes[c->in(k)->_idx];
2248 assert(m != nullptr, "expect memory state");
2249 phi->init_req(k, m);
2250 }
2251 }
2252 }
2253 if (phi != nullptr) {
2254 regions.map(c->_idx, phi);
2255 } else {
2256 assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2257 }
2258 }
2259 Node* current_region = regions[c->_idx];
2260 if (current_region != prev_region) {
2261 progress = true;
2262 if (prev_region == prev_mem) {
2263 _memory_nodes.map(c->_idx, current_region);
2264 }
2265 }
2266 } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2267 Node* m = _memory_nodes[_phase->idom(c)->_idx];
2268 assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
2269 if (m != prev_mem) {
2270 _memory_nodes.map(c->_idx, m);
2271 progress = true;
2272 }
2273 }
2274 #ifdef ASSERT
2275 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); }
2276 #endif
2277 }
2278 }
2279
2280 // Replace existing phi with computed memory state for that region
2281 // if different (could be a new phi or a dominating memory node if
2282 // that phi was found to be useless).
2283 while (dead_phis.size() > 0) {
2284 Node* n = dead_phis.pop();
2285 n->replace_by(_phase->C->top());
2286 n->destruct(&_phase->igvn());
2287 }
2288 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2289 Node* c = rpo_list.at(i);
2290 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2291 Node* n = regions[c->_idx];
2292 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2293 if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2294 _phase->register_new_node(n, c);
2295 }
2296 }
2297 }
2298 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2299 Node* c = rpo_list.at(i);
2300 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2301 Node* n = regions[c->_idx];
2302 assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
2303 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2304 Node* u = c->fast_out(i);
2305 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2306 u != n) {
2307 assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
2308 if (u->adr_type() == TypePtr::BOTTOM) {
2309 fix_memory_uses(u, n, n, c);
2310 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2311 _phase->lazy_replace(u, n);
2312 --i; --imax;
2313 }
2314 }
2315 }
2316 }
2317 }
2318 }
2319
2320 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
2321 Node* mem = nullptr;
2322 Node* head = in->in(0);
2323 assert(head->is_Region(), "unexpected infinite loop graph shape");
2324
2325 Node* phi_mem = nullptr;
2326 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
2327 Node* u = head->fast_out(j);
2328 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
2329 if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2330 assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
2331 phi_mem = u;
2332 } else if (u->adr_type() == TypePtr::BOTTOM) {
2333 assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2334 if (phi_mem == nullptr) {
2335 phi_mem = u;
2336 }
2337 }
2338 }
2339 }
2340 if (phi_mem == nullptr) {
2341 ResourceMark rm;
2342 Node_Stack stack(0);
2343 stack.push(head, 1);
2344 do {
2345 Node* n = stack.node();
2346 uint i = stack.index();
2347 if (i >= n->req()) {
2348 stack.pop();
2349 } else {
2350 stack.set_index(i + 1);
2351 Node* c = n->in(i);
2352 assert(c != head, "should have found a safepoint on the way");
2353 if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2354 for (;;) {
2355 if (c->is_Region()) {
2356 stack.push(c, 1);
2357 break;
2358 } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2359 Node* m = c->in(TypeFunc::Memory);
2360 if (m->is_MergeMem()) {
2361 m = m->as_MergeMem()->memory_at(_alias);
2362 }
2363 assert(mem == nullptr || mem == m, "several memory states");
2364 mem = m;
2365 break;
2366 } else {
2367 assert(c != c->in(0), "");
2368 c = c->in(0);
2369 }
2370 }
2371 }
2372 }
2373 } while (stack.size() > 0);
2374 assert(mem != nullptr, "should have found safepoint");
2375 } else {
2376 mem = phi_mem;
2377 }
2378 return mem;
2379 }
2380
2381 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2382 Node* c = _phase->get_ctrl(n);
2383 if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
2384 assert(c == n->in(0), "");
2385 CallNode* call = c->as_Call();
2386 CallProjections projs;
2387 call->extract_projections(&projs, true, false);
2388 if (projs.catchall_memproj != nullptr) {
2389 if (projs.fallthrough_memproj == n) {
2390 c = projs.fallthrough_catchproj;
2391 } else {
2392 assert(projs.catchall_memproj == n, "");
2393 c = projs.catchall_catchproj;
2394 }
2395 }
2396 }
2397 return c;
2398 }
2399
2400 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2401 if (_phase->has_ctrl(n))
2402 return get_ctrl(n);
2403 else {
2404 assert (n->is_CFG(), "must be a CFG node");
2405 return n;
2406 }
2407 }
2408
2409 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2410 return m != nullptr && get_ctrl(m) == c;
2411 }
2412
2413 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2414 assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
2415 assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2416 #ifdef ASSERT
2417 if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2418 (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2419 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2420 int mems = 0;
2421 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2422 Node* u = call->fast_out(i);
2423 if (u->bottom_type() == Type::MEMORY) {
2424 mems++;
2425 }
2426 }
2427 assert(mems <= 1, "No node right after call if multiple mem projections");
2428 }
2429 #endif
2430 Node* mem = _memory_nodes[ctrl->_idx];
2431 Node* c = ctrl;
2432 while (!mem_is_valid(mem, c) &&
2433 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2434 c = _phase->idom(c);
2435 mem = _memory_nodes[c->_idx];
2436 }
2437 if (n != nullptr && mem_is_valid(mem, c)) {
2438 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2439 mem = next_mem(mem, _alias);
2440 }
2441 if (mem->is_MergeMem()) {
2442 mem = mem->as_MergeMem()->memory_at(_alias);
2443 }
2444 if (!mem_is_valid(mem, c)) {
2445 do {
2446 c = _phase->idom(c);
2447 mem = _memory_nodes[c->_idx];
2448 } while (!mem_is_valid(mem, c) &&
2449 (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2450 }
2451 }
2452 assert(mem->bottom_type() == Type::MEMORY, "");
2453 return mem;
2454 }
2455
2456 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2457 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2458 Node* use = region->fast_out(i);
2459 if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2460 (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2461 return true;
2462 }
2463 }
2464 return false;
2465 }
2466
2467 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2468 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2469 const bool trace = false;
2470 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2471 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2472 GrowableArray<Node*> phis;
2473 if (mem_for_ctrl != mem) {
2474 Node* old = mem_for_ctrl;
2475 Node* prev = nullptr;
2476 while (old != mem) {
2477 prev = old;
2478 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2479 assert(_alias == Compile::AliasIdxRaw, "");
2480 old = old->in(MemNode::Memory);
2481 } else if (old->Opcode() == Op_SCMemProj) {
2482 assert(_alias == Compile::AliasIdxRaw, "");
2483 old = old->in(0);
2484 } else {
2485 ShouldNotReachHere();
2486 }
2487 }
2488 assert(prev != nullptr, "");
2489 if (new_ctrl != ctrl) {
2490 _memory_nodes.map(ctrl->_idx, mem);
2491 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2492 }
2493 uint input = (uint)MemNode::Memory;
2494 _phase->igvn().replace_input_of(prev, input, new_mem);
2495 } else {
2496 uses.clear();
2497 _memory_nodes.map(new_ctrl->_idx, new_mem);
2498 uses.push(new_ctrl);
2499 for(uint next = 0; next < uses.size(); next++ ) {
2500 Node *n = uses.at(next);
2501 assert(n->is_CFG(), "");
2502 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2503 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2504 Node* u = n->fast_out(i);
2505 if (!u->is_Root() && u->is_CFG() && u != n) {
2506 Node* m = _memory_nodes[u->_idx];
2507 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2508 !has_mem_phi(u) &&
2509 u->unique_ctrl_out()->Opcode() != Op_Halt) {
2510 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2511 DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2512
2513 if (!mem_is_valid(m, u) || !m->is_Phi()) {
2514 bool push = true;
2515 bool create_phi = true;
2516 if (_phase->is_dominator(new_ctrl, u)) {
2517 create_phi = false;
2518 }
2519 if (create_phi) {
2520 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2521 _phase->register_new_node(phi, u);
2522 phis.push(phi);
2523 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2524 if (!mem_is_valid(m, u)) {
2525 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2526 _memory_nodes.map(u->_idx, phi);
2527 } else {
2528 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2529 for (;;) {
2530 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2531 Node* next = nullptr;
2532 if (m->is_Proj()) {
2533 next = m->in(0);
2534 } else {
2535 assert(m->is_Mem() || m->is_LoadStore(), "");
2536 assert(_alias == Compile::AliasIdxRaw, "");
2537 next = m->in(MemNode::Memory);
2538 }
2539 if (_phase->get_ctrl(next) != u) {
2540 break;
2541 }
2542 if (next->is_MergeMem()) {
2543 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2544 break;
2545 }
2546 if (next->is_Phi()) {
2547 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2548 break;
2549 }
2550 m = next;
2551 }
2552
2553 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2554 assert(m->is_Mem() || m->is_LoadStore(), "");
2555 uint input = (uint)MemNode::Memory;
2556 _phase->igvn().replace_input_of(m, input, phi);
2557 push = false;
2558 }
2559 } else {
2560 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2561 }
2562 if (push) {
2563 uses.push(u);
2564 }
2565 }
2566 } else if (!mem_is_valid(m, u) &&
2567 !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2568 uses.push(u);
2569 }
2570 }
2571 }
2572 }
2573 for (int i = 0; i < phis.length(); i++) {
2574 Node* n = phis.at(i);
2575 Node* r = n->in(0);
2576 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2577 for (uint j = 1; j < n->req(); j++) {
2578 Node* m = find_mem(r->in(j), nullptr);
2579 _phase->igvn().replace_input_of(n, j, m);
2580 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2581 }
2582 }
2583 }
2584 uint last = _phase->C->unique();
2585 MergeMemNode* mm = nullptr;
2586 int alias = _alias;
2587 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2588 // Process loads first to not miss an anti-dependency: if the memory
2589 // edge of a store is updated before a load is processed then an
2590 // anti-dependency may be missed.
2591 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2592 Node* u = mem->out(i);
2593 if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2594 Node* m = find_mem(_phase->get_ctrl(u), u);
2595 if (m != mem) {
2596 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2597 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2598 --i;
2599 }
2600 }
2601 }
2602 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2603 Node* u = mem->out(i);
2604 if (u->_idx < last) {
2605 if (u->is_Mem()) {
2606 if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2607 Node* m = find_mem(_phase->get_ctrl(u), u);
2608 if (m != mem) {
2609 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2610 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2611 --i;
2612 }
2613 }
2614 } else if (u->is_MergeMem()) {
2615 MergeMemNode* u_mm = u->as_MergeMem();
2616 if (u_mm->memory_at(alias) == mem) {
2617 MergeMemNode* newmm = nullptr;
2618 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2619 Node* uu = u->fast_out(j);
2620 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2621 if (uu->is_Phi()) {
2622 assert(uu->adr_type() == TypePtr::BOTTOM, "");
2623 Node* region = uu->in(0);
2624 int nb = 0;
2625 for (uint k = 1; k < uu->req(); k++) {
2626 if (uu->in(k) == u) {
2627 Node* m = find_mem(region->in(k), nullptr);
2628 if (m != mem) {
2629 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2630 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2631 if (newmm != u) {
2632 _phase->igvn().replace_input_of(uu, k, newmm);
2633 nb++;
2634 --jmax;
2635 }
2636 }
2637 }
2638 }
2639 if (nb > 0) {
2640 --j;
2641 }
2642 } else {
2643 Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2644 if (m != mem) {
2645 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2646 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2647 if (newmm != u) {
2648 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2649 --j, --jmax;
2650 }
2651 }
2652 }
2653 }
2654 }
2655 } else if (u->is_Phi()) {
2656 assert(u->bottom_type() == Type::MEMORY, "what else?");
2657 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2658 Node* region = u->in(0);
2659 bool replaced = false;
2660 for (uint j = 1; j < u->req(); j++) {
2661 if (u->in(j) == mem) {
2662 Node* m = find_mem(region->in(j), nullptr);
2663 Node* nnew = m;
2664 if (m != mem) {
2665 if (u->adr_type() == TypePtr::BOTTOM) {
2666 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2667 nnew = mm;
2668 }
2669 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2670 _phase->igvn().replace_input_of(u, j, nnew);
2671 replaced = true;
2672 }
2673 }
2674 }
2675 if (replaced) {
2676 --i;
2677 }
2678 }
2679 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2680 u->adr_type() == nullptr) {
2681 assert(u->adr_type() != nullptr ||
2682 u->Opcode() == Op_Rethrow ||
2683 u->Opcode() == Op_Return ||
2684 u->Opcode() == Op_SafePoint ||
2685 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2686 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2687 u->Opcode() == Op_CallLeaf, "");
2688 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2689 if (m != mem) {
2690 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2691 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2692 --i;
2693 }
2694 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2695 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2696 if (m != mem) {
2697 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2698 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2699 --i;
2700 }
2701 } else if (u->adr_type() != TypePtr::BOTTOM &&
2702 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2703 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2704 assert(m != mem, "");
2705 // u is on the wrong slice...
2706 assert(u->is_ClearArray(), "");
2707 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2708 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2709 --i;
2710 }
2711 }
2712 }
2713 #ifdef ASSERT
2714 assert(new_mem->outcnt() > 0, "");
2715 for (int i = 0; i < phis.length(); i++) {
2716 Node* n = phis.at(i);
2717 assert(n->outcnt() > 0, "new phi must have uses now");
2718 }
2719 #endif
2720 }
2721
2722 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2723 if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2724 _memory_nodes.map(ctrl->_idx, mem);
2725 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2726 }
2727 }
2728
2729 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2730 MergeMemNode* mm = MergeMemNode::make(mem);
2731 mm->set_memory_at(_alias, rep_proj);
2732 _phase->register_new_node(mm, rep_ctrl);
2733 return mm;
2734 }
2735
2736 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2737 MergeMemNode* newmm = nullptr;
2738 MergeMemNode* u_mm = u->as_MergeMem();
2739 Node* c = _phase->get_ctrl(u);
2740 if (_phase->is_dominator(c, rep_ctrl)) {
2741 c = rep_ctrl;
2742 } else {
2743 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2744 }
2745 if (u->outcnt() == 1) {
2746 if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2747 _phase->igvn().replace_input_of(u, _alias, rep_proj);
2748 --i;
2749 } else {
2750 _phase->igvn().rehash_node_delayed(u);
2751 u_mm->set_memory_at(_alias, rep_proj);
2752 }
2753 newmm = u_mm;
2754 _phase->set_ctrl_and_loop(u, c);
2755 } else {
2756 // can't simply clone u and then change one of its input because
2757 // it adds and then removes an edge which messes with the
2758 // DUIterator
2759 newmm = MergeMemNode::make(u_mm->base_memory());
2760 for (uint j = 0; j < u->req(); j++) {
2761 if (j < newmm->req()) {
2762 if (j == (uint)_alias) {
2763 newmm->set_req(j, rep_proj);
2764 } else if (newmm->in(j) != u->in(j)) {
2765 newmm->set_req(j, u->in(j));
2766 }
2767 } else if (j == (uint)_alias) {
2768 newmm->add_req(rep_proj);
2769 } else {
2770 newmm->add_req(u->in(j));
2771 }
2772 }
2773 if ((uint)_alias >= u->req()) {
2774 newmm->set_memory_at(_alias, rep_proj);
2775 }
2776 _phase->register_new_node(newmm, c);
2777 }
2778 return newmm;
2779 }
2780
2781 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2782 if (phi->adr_type() == TypePtr::BOTTOM) {
2783 Node* region = phi->in(0);
2784 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2785 Node* uu = region->fast_out(j);
2786 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2787 return false;
2788 }
2789 }
2790 return true;
2791 }
2792 return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2793 }
2794
2795 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2796 uint last = _phase-> C->unique();
2797 MergeMemNode* mm = nullptr;
2798 assert(mem->bottom_type() == Type::MEMORY, "");
2799 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2800 Node* u = mem->out(i);
2801 if (u != replacement && u->_idx < last) {
2802 if (u->is_MergeMem()) {
2803 MergeMemNode* u_mm = u->as_MergeMem();
2804 if (u_mm->memory_at(_alias) == mem) {
2805 MergeMemNode* newmm = nullptr;
2806 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2807 Node* uu = u->fast_out(j);
2808 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2809 if (uu->is_Phi()) {
2810 if (should_process_phi(uu)) {
2811 Node* region = uu->in(0);
2812 int nb = 0;
2813 for (uint k = 1; k < uu->req(); k++) {
2814 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2815 if (newmm == nullptr) {
2816 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2817 }
2818 if (newmm != u) {
2819 _phase->igvn().replace_input_of(uu, k, newmm);
2820 nb++;
2821 --jmax;
2822 }
2823 }
2824 }
2825 if (nb > 0) {
2826 --j;
2827 }
2828 }
2829 } else {
2830 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2831 if (newmm == nullptr) {
2832 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2833 }
2834 if (newmm != u) {
2835 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2836 --j, --jmax;
2837 }
2838 }
2839 }
2840 }
2841 }
2842 } else if (u->is_Phi()) {
2843 assert(u->bottom_type() == Type::MEMORY, "what else?");
2844 Node* region = u->in(0);
2845 if (should_process_phi(u)) {
2846 bool replaced = false;
2847 for (uint j = 1; j < u->req(); j++) {
2848 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2849 Node* nnew = rep_proj;
2850 if (u->adr_type() == TypePtr::BOTTOM) {
2851 if (mm == nullptr) {
2852 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2853 }
2854 nnew = mm;
2855 }
2856 _phase->igvn().replace_input_of(u, j, nnew);
2857 replaced = true;
2858 }
2859 }
2860 if (replaced) {
2861 --i;
2862 }
2863
2864 }
2865 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2866 u->adr_type() == nullptr) {
2867 assert(u->adr_type() != nullptr ||
2868 u->Opcode() == Op_Rethrow ||
2869 u->Opcode() == Op_Return ||
2870 u->Opcode() == Op_SafePoint ||
2871 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2872 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2873 u->Opcode() == Op_CallLeaf, "%s", u->Name());
2874 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2875 if (mm == nullptr) {
2876 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2877 }
2878 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2879 --i;
2880 }
2881 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2882 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2883 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2884 --i;
2885 }
2886 }
2887 }
2888 }
2889 }
2890
2891 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2892 : Node(ctrl, obj), _decorators(decorators) {
2893 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2894 }
2895
2896 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2897 return _decorators;
2898 }
2899
2900 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2901 return sizeof(*this);
2902 }
2903
2904 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2905 return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2906 }
2907
2908 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2909 uint hash = Node::hash();
2910 hash += mask_decorators(_decorators);
2911 return hash;
2912 }
2913
2914 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2915 return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2916 mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2917 }
2918
2919 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2920 if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2921 return Type::TOP;
2922 }
2923 const Type* t = in(ValueIn)->bottom_type();
2924 if (t == TypePtr::NULL_PTR) {
2925 return t;
2926 }
2927
2928 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2929 return t;
2930 }
2931
2932 return t->meet(TypePtr::NULL_PTR);
2933 }
2934
2935 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2936 // Either input is TOP ==> the result is TOP
2937 const Type *t2 = phase->type(in(ValueIn));
2938 if( t2 == Type::TOP ) return Type::TOP;
2939
2940 if (t2 == TypePtr::NULL_PTR) {
2941 return t2;
2942 }
2943
2944 if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2945 return t2;
2946 }
2947
2948 return t2->meet(TypePtr::NULL_PTR);
2949 }
2950
2951 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2952 Node* value = in(ValueIn);
2953 if (!needs_barrier(phase, value)) {
2954 return value;
2955 }
2956 return this;
2957 }
2958
2959 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2960 Unique_Node_List visited;
2961 return needs_barrier_impl(phase, n, visited);
2962 }
2963
2964 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2965 if (n == nullptr) return false;
2966 if (visited.member(n)) {
2967 return false; // Been there.
2968 }
2969 visited.push(n);
2970
2971 if (n->is_Allocate()) {
2972 // tty->print_cr("optimize barrier on alloc");
2973 return false;
2974 }
2975 if (n->is_Call()) {
2976 // tty->print_cr("optimize barrier on call");
2977 return false;
2978 }
2979
2980 const Type* type = phase->type(n);
2981 if (type == Type::TOP) {
2982 return false;
2983 }
2984 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2985 // tty->print_cr("optimize barrier on null");
2986 return false;
2987 }
2988 if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2989 // tty->print_cr("optimize barrier on constant");
2990 return false;
2991 }
2992
2993 switch (n->Opcode()) {
2994 case Op_AddP:
2995 return true; // TODO: Can refine?
2996 case Op_LoadP:
2997 case Op_ShenandoahCompareAndExchangeN:
2998 case Op_ShenandoahCompareAndExchangeP:
2999 case Op_CompareAndExchangeN:
3000 case Op_CompareAndExchangeP:
3001 case Op_GetAndSetN:
3002 case Op_GetAndSetP:
3003 return true;
3004 case Op_Phi: {
3005 for (uint i = 1; i < n->req(); i++) {
3006 if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3007 }
3008 return false;
3009 }
3010 case Op_CheckCastPP:
3011 case Op_CastPP:
3012 return needs_barrier_impl(phase, n->in(1), visited);
3013 case Op_Proj:
3014 return needs_barrier_impl(phase, n->in(0), visited);
3015 case Op_ShenandoahLoadReferenceBarrier:
3016 // tty->print_cr("optimize barrier on barrier");
3017 return false;
3018 case Op_Parm:
3019 // tty->print_cr("optimize barrier on input arg");
3020 return false;
3021 case Op_DecodeN:
3022 case Op_EncodeP:
3023 return needs_barrier_impl(phase, n->in(1), visited);
3024 case Op_LoadN:
3025 return true;
3026 case Op_CMoveN:
3027 case Op_CMoveP:
3028 return needs_barrier_impl(phase, n->in(2), visited) ||
3029 needs_barrier_impl(phase, n->in(3), visited);
3030 case Op_ShenandoahIUBarrier:
3031 return needs_barrier_impl(phase, n->in(1), visited);
3032 case Op_CreateEx:
3033 return false;
3034 default:
3035 break;
3036 }
3037 #ifdef ASSERT
3038 tty->print("need barrier on?: ");
3039 tty->print_cr("ins:");
3040 n->dump(2);
3041 tty->print_cr("outs:");
3042 n->dump(-2);
3043 ShouldNotReachHere();
3044 #endif
3045 return true;
3046 }