1 /*
2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
28 #include "gc/shared/gc_globals.hpp"
29 #include "opto/arraycopynode.hpp"
30 #include "opto/graphKit.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "utilities/macros.hpp"
33 #include "utilities/powerOfTwo.hpp"
34
35 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
36
37 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
38 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
39 _kind(None),
40 _alloc_tightly_coupled(alloc_tightly_coupled),
41 _has_negative_length_guard(has_negative_length_guard),
42 _arguments_validated(false),
43 _src_type(TypeOopPtr::BOTTOM),
44 _dest_type(TypeOopPtr::BOTTOM) {
45 init_class_id(Class_ArrayCopy);
46 init_flags(Flag_is_macro);
47 C->add_macro_node(this);
48 }
49
50 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
51
52 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
53 Node* src, Node* src_offset,
54 Node* dest, Node* dest_offset,
55 Node* length,
56 bool alloc_tightly_coupled,
57 bool has_negative_length_guard,
58 Node* src_klass, Node* dest_klass,
59 Node* src_length, Node* dest_length) {
60
61 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard);
62 kit->set_predefined_input_for_runtime_call(ac);
63
64 ac->init_req(ArrayCopyNode::Src, src);
65 ac->init_req(ArrayCopyNode::SrcPos, src_offset);
66 ac->init_req(ArrayCopyNode::Dest, dest);
67 ac->init_req(ArrayCopyNode::DestPos, dest_offset);
68 ac->init_req(ArrayCopyNode::Length, length);
69 ac->init_req(ArrayCopyNode::SrcLen, src_length);
70 ac->init_req(ArrayCopyNode::DestLen, dest_length);
71 ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
72 ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
73
74 if (may_throw) {
75 ac->set_req(TypeFunc::I_O , kit->i_o());
76 kit->add_safepoint_edges(ac, false);
77 }
78
79 return ac;
80 }
81
82 void ArrayCopyNode::connect_outputs(GraphKit* kit, bool deoptimize_on_exception) {
83 kit->set_all_memory_call(this, true);
84 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
85 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
86 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true, deoptimize_on_exception);
87 kit->set_all_memory_call(this);
88 }
89
90 #ifndef PRODUCT
91 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
92
93 void ArrayCopyNode::dump_spec(outputStream *st) const {
94 CallNode::dump_spec(st);
95 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
96 }
97
98 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
99 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
100 }
101 #endif
102
103 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
104 // check that length is constant
105 Node* length = in(ArrayCopyNode::Length);
106 const Type* length_type = phase->type(length);
107
108 if (length_type == Type::TOP) {
109 return -1;
110 }
111
112 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
113
114 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
115 }
116
117 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
118 Node* src = in(ArrayCopyNode::Src);
119 const Type* src_type = phase->type(src);
120
121 if (is_clonebasic()) {
122 if (src_type->isa_instptr()) {
123 const TypeInstPtr* inst_src = src_type->is_instptr();
124 ciInstanceKlass* ik = inst_src->instance_klass();
125 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
126 // fields into account. They are rare anyway so easier to simply
127 // skip instances with injected fields.
128 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
129 return -1;
130 }
131 int nb_fields = ik->nof_nonstatic_fields();
132 return nb_fields;
133 } else {
134 const TypeAryPtr* ary_src = src_type->isa_aryptr();
135 assert (ary_src != nullptr, "not an array or instance?");
136 // clone passes a length as a rounded number of longs. If we're
137 // cloning an array we'll do it element by element. If the
138 // length of the input array is constant, ArrayCopyNode::Length
139 // must be too. Note that the opposite does not need to hold,
140 // because different input array lengths (e.g. int arrays with
141 // 3 or 4 elements) might lead to the same length input
142 // (e.g. 2 double-words).
143 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
144 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
145 if (ary_src->size()->is_con()) {
146 return ary_src->size()->get_con();
147 }
148 return -1;
149 }
150 }
151
152 return get_length_if_constant(phase);
153 }
154
155 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
156 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
157 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
158 // above runtime checks that guarantee it is within bounds.
159 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
160 C2AccessValuePtr addr(adr, adr_type);
161 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
162 Node* res = bs->load_at(access, type);
163 ctl = access.ctl();
164 return res;
165 }
166
167 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
168 DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
169 if (is_alloc_tightly_coupled()) {
170 decorators |= C2_TIGHTLY_COUPLED_ALLOC;
171 }
172 C2AccessValuePtr addr(adr, adr_type);
173 C2AccessValue value(val, type);
174 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
175 bs->store_at(access, value);
176 ctl = access.ctl();
177 }
178
179
180 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
181 if (!is_clonebasic()) {
182 return nullptr;
183 }
184
185 Node* base_src = in(ArrayCopyNode::Src);
186 Node* base_dest = in(ArrayCopyNode::Dest);
187 Node* ctl = in(TypeFunc::Control);
188 Node* in_mem = in(TypeFunc::Memory);
189
190 const Type* src_type = phase->type(base_src);
191 const TypeInstPtr* inst_src = src_type->isa_instptr();
192 if (inst_src == nullptr) {
193 return nullptr;
194 }
195
196 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
197 if (can_reshape) {
198 phase->is_IterGVN()->_worklist.push(mem);
199 }
200
201
202 ciInstanceKlass* ik = inst_src->instance_klass();
203
204 if (!inst_src->klass_is_exact()) {
205 assert(!ik->is_interface(), "inconsistent klass hierarchy");
206 if (ik->has_subklass()) {
207 // Concurrent class loading.
208 // Fail fast and return NodeSentinel to indicate that the transform failed.
209 return NodeSentinel;
210 } else {
211 phase->C->dependencies()->assert_leaf_type(ik);
212 }
213 }
214
215 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
216
217 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
218 for (int i = 0; i < count; i++) {
219 ciField* field = ik->nonstatic_field_at(i);
220 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
221 Node* off = phase->MakeConX(field->offset_in_bytes());
222 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
223 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
224 assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_src)->isa_ptr()),
225 "slice of address and input slice don't match");
226 assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_dest)->isa_ptr()),
227 "slice of address and input slice don't match");
228 BasicType bt = field->layout_type();
229
230 const Type *type;
231 if (bt == T_OBJECT) {
232 if (!field->type()->is_loaded()) {
233 type = TypeInstPtr::BOTTOM;
234 } else {
235 ciType* field_klass = field->type();
236 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
237 }
238 } else {
239 type = Type::get_const_basic_type(bt);
240 }
241
242 Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
243 store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
244 }
245
246 if (!finish_transform(phase, can_reshape, ctl, mem)) {
247 // Return NodeSentinel to indicate that the transform failed
248 return NodeSentinel;
249 }
250
251 return mem;
252 }
253
254 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
255 Node*& adr_src,
256 Node*& base_src,
257 Node*& adr_dest,
258 Node*& base_dest,
259 BasicType& copy_type,
260 const Type*& value_type,
261 bool& disjoint_bases) {
262 base_src = in(ArrayCopyNode::Src);
263 base_dest = in(ArrayCopyNode::Dest);
264 const Type* src_type = phase->type(base_src);
265 const TypeAryPtr* ary_src = src_type->isa_aryptr();
266
267 Node* src_offset = in(ArrayCopyNode::SrcPos);
268 Node* dest_offset = in(ArrayCopyNode::DestPos);
269
270 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
271 const Type* dest_type = phase->type(base_dest);
272 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
273
274 // newly allocated object is guaranteed to not overlap with source object
275 disjoint_bases = is_alloc_tightly_coupled();
276 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
277 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
278 // We don't know if arguments are arrays
279 return false;
280 }
281
282 BasicType src_elem = ary_src->elem()->array_element_basic_type();
283 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
284 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
285 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
286
287 if (src_elem != dest_elem || dest_elem == T_VOID) {
288 // We don't know if arguments are arrays of the same type
289 return false;
290 }
291
292 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
293 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) {
294 // It's an object array copy but we can't emit the card marking
295 // that is needed
296 return false;
297 }
298
299 value_type = ary_src->elem();
300
301 uint shift = exact_log2(type2aelembytes(dest_elem));
302 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
303
304 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
305 if (src_offset->is_top()) {
306 // Offset is out of bounds (the ArrayCopyNode will be removed)
307 return false;
308 }
309 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
310 if (dest_offset->is_top()) {
311 // Offset is out of bounds (the ArrayCopyNode will be removed)
312 if (can_reshape) {
313 // record src_offset, so it can be deleted later (if it is dead)
314 phase->is_IterGVN()->_worklist.push(src_offset);
315 }
316 return false;
317 }
318
319 Node* hook = new Node(1);
320 hook->init_req(0, dest_offset);
321
322 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
323
324 hook->destruct(phase);
325
326 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
327
328 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
329 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
330
331 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
332 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
333
334 copy_type = dest_elem;
335 } else {
336 assert(ary_src != nullptr, "should be a clone");
337 assert(is_clonebasic(), "should be");
338
339 disjoint_bases = true;
340
341 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
342 if (is_reference_type(elem, true)) {
343 elem = T_OBJECT;
344 }
345
346 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
347 if (bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) {
348 return false;
349 }
350
351 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
352 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
353
354 // The address is offsetted to an aligned address where a raw copy would start.
355 // If the clone copy is decomposed into load-stores - the address is adjusted to
356 // point at where the array starts.
357 const Type* toff = phase->type(src_offset);
358 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
359 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
360 assert(diff >= 0, "clone should not start after 1st array element");
361 if (diff > 0) {
362 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
363 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
364 }
365 copy_type = elem;
366 value_type = ary_src->elem();
367 }
368 return true;
369 }
370
371 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
372 if (atp == TypeOopPtr::BOTTOM) {
373 atp = phase->type(n)->isa_ptr();
374 }
375 // adjust atp to be the correct array element address type
376 return atp->add_offset(Type::OffsetBot);
377 }
378
379 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
380 Node* ctl = in(TypeFunc::Control);
381 if (!disjoint_bases && count > 1) {
382 Node* src_offset = in(ArrayCopyNode::SrcPos);
383 Node* dest_offset = in(ArrayCopyNode::DestPos);
384 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
385 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
386 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
387 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
388
389 phase->transform(iff);
390
391 forward_ctl = phase->transform(new IfFalseNode(iff));
392 backward_ctl = phase->transform(new IfTrueNode(iff));
393 } else {
394 forward_ctl = ctl;
395 }
396 }
397
398 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
399 bool can_reshape,
400 Node*& forward_ctl,
401 Node* mem,
402 const TypePtr* atp_src,
403 const TypePtr* atp_dest,
404 Node* adr_src,
405 Node* base_src,
406 Node* adr_dest,
407 Node* base_dest,
408 BasicType copy_type,
409 const Type* value_type,
410 int count) {
411 if (!forward_ctl->is_top()) {
412 // copy forward
413 MergeMemNode* mm = MergeMemNode::make(mem);
414
415 if (count > 0) {
416 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
417 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
418 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
419 for (int i = 1; i < count; i++) {
420 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
421 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
422 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
423 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
424 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
425 }
426 } else if (can_reshape) {
427 PhaseIterGVN* igvn = phase->is_IterGVN();
428 igvn->_worklist.push(adr_src);
429 igvn->_worklist.push(adr_dest);
430 }
431 return mm;
432 }
433 return phase->C->top();
434 }
435
436 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
437 bool can_reshape,
438 Node*& backward_ctl,
439 Node* mem,
440 const TypePtr* atp_src,
441 const TypePtr* atp_dest,
442 Node* adr_src,
443 Node* base_src,
444 Node* adr_dest,
445 Node* base_dest,
446 BasicType copy_type,
447 const Type* value_type,
448 int count) {
449 if (!backward_ctl->is_top()) {
450 // copy backward
451 MergeMemNode* mm = MergeMemNode::make(mem);
452
453 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
454 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
455
456 if (count > 0) {
457 for (int i = count-1; i >= 1; i--) {
458 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
459 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
460 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
461 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
462 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
463 }
464 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
465 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
466 } else if (can_reshape) {
467 PhaseIterGVN* igvn = phase->is_IterGVN();
468 igvn->_worklist.push(adr_src);
469 igvn->_worklist.push(adr_dest);
470 }
471 return phase->transform(mm);
472 }
473 return phase->C->top();
474 }
475
476 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
477 Node* ctl, Node *mem) {
478 if (can_reshape) {
479 PhaseIterGVN* igvn = phase->is_IterGVN();
480 igvn->set_delay_transform(false);
481 if (is_clonebasic()) {
482 Node* out_mem = proj_out(TypeFunc::Memory);
483
484 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
485 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
486 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
487 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
488 return false;
489 }
490
491 igvn->replace_node(out_mem->raw_out(0), mem);
492
493 Node* out_ctl = proj_out(TypeFunc::Control);
494 igvn->replace_node(out_ctl, ctl);
495 } else {
496 // replace fallthrough projections of the ArrayCopyNode by the
497 // new memory, control and the input IO.
498 CallProjections callprojs;
499 extract_projections(&callprojs, true, false);
500
501 if (callprojs.fallthrough_ioproj != nullptr) {
502 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
503 }
504 if (callprojs.fallthrough_memproj != nullptr) {
505 igvn->replace_node(callprojs.fallthrough_memproj, mem);
506 }
507 if (callprojs.fallthrough_catchproj != nullptr) {
508 igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
509 }
510
511 // The ArrayCopyNode is not disconnected. It still has the
512 // projections for the exception case. Replace current
513 // ArrayCopyNode with a dummy new one with a top() control so
514 // that this part of the graph stays consistent but is
515 // eventually removed.
516
517 set_req(0, phase->C->top());
518 remove_dead_region(phase, can_reshape);
519 }
520 } else {
521 if (in(TypeFunc::Control) != ctl) {
522 // we can't return new memory and control from Ideal at parse time
523 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
524 phase->record_for_igvn(this);
525 return false;
526 }
527 }
528 return true;
529 }
530
531
532 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
533 if (remove_dead_region(phase, can_reshape)) return this;
534
535 if (StressArrayCopyMacroNode && !can_reshape) {
536 phase->record_for_igvn(this);
537 return nullptr;
538 }
539
540 // See if it's a small array copy and we can inline it as
541 // loads/stores
542 // Here we can only do:
543 // - arraycopy if all arguments were validated before and we don't
544 // need card marking
545 // - clone for which we don't need to do card marking
546
547 if (!is_clonebasic() && !is_arraycopy_validated() &&
548 !is_copyofrange_validated() && !is_copyof_validated()) {
549 return nullptr;
550 }
551
552 assert(in(TypeFunc::Control) != nullptr &&
553 in(TypeFunc::Memory) != nullptr &&
554 in(ArrayCopyNode::Src) != nullptr &&
555 in(ArrayCopyNode::Dest) != nullptr &&
556 in(ArrayCopyNode::Length) != nullptr &&
557 in(ArrayCopyNode::SrcPos) != nullptr &&
558 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
559
560 if (in(TypeFunc::Control)->is_top() ||
561 in(TypeFunc::Memory)->is_top() ||
562 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
563 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
564 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
565 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
566 return nullptr;
567 }
568
569 int count = get_count(phase);
570
571 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
572 return nullptr;
573 }
574
575 Node* mem = try_clone_instance(phase, can_reshape, count);
576 if (mem != nullptr) {
577 return (mem == NodeSentinel) ? nullptr : mem;
578 }
579
580 Node* adr_src = nullptr;
581 Node* base_src = nullptr;
582 Node* adr_dest = nullptr;
583 Node* base_dest = nullptr;
584 BasicType copy_type = T_ILLEGAL;
585 const Type* value_type = nullptr;
586 bool disjoint_bases = false;
587
588 if (!prepare_array_copy(phase, can_reshape,
589 adr_src, base_src, adr_dest, base_dest,
590 copy_type, value_type, disjoint_bases)) {
591 assert(adr_src == nullptr, "no node can be left behind");
592 assert(adr_dest == nullptr, "no node can be left behind");
593 return nullptr;
594 }
595
596 Node* src = in(ArrayCopyNode::Src);
597 Node* dest = in(ArrayCopyNode::Dest);
598 const TypePtr* atp_src = get_address_type(phase, _src_type, src);
599 const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
600 Node* in_mem = in(TypeFunc::Memory);
601
602 if (can_reshape) {
603 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
604 phase->is_IterGVN()->set_delay_transform(true);
605 }
606
607 Node* backward_ctl = phase->C->top();
608 Node* forward_ctl = phase->C->top();
609 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
610
611 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
612 in_mem,
613 atp_src, atp_dest,
614 adr_src, base_src, adr_dest, base_dest,
615 copy_type, value_type, count);
616
617 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
618 in_mem,
619 atp_src, atp_dest,
620 adr_src, base_src, adr_dest, base_dest,
621 copy_type, value_type, count);
622
623 Node* ctl = nullptr;
624 if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
625 ctl = new RegionNode(3);
626 ctl->init_req(1, forward_ctl);
627 ctl->init_req(2, backward_ctl);
628 ctl = phase->transform(ctl);
629 MergeMemNode* forward_mm = forward_mem->as_MergeMem();
630 MergeMemNode* backward_mm = backward_mem->as_MergeMem();
631 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
632 if (mms.memory() != mms.memory2()) {
633 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
634 phi->init_req(1, mms.memory());
635 phi->init_req(2, mms.memory2());
636 phi = phase->transform(phi);
637 mms.set_memory(phi);
638 }
639 }
640 mem = forward_mem;
641 } else if (!forward_ctl->is_top()) {
642 ctl = forward_ctl;
643 mem = forward_mem;
644 } else {
645 assert(!backward_ctl->is_top(), "no copy?");
646 ctl = backward_ctl;
647 mem = backward_mem;
648 }
649
650 if (can_reshape) {
651 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
652 phase->is_IterGVN()->set_delay_transform(false);
653 }
654
655 if (!finish_transform(phase, can_reshape, ctl, mem)) {
656 if (can_reshape) {
657 // put in worklist, so that if it happens to be dead it is removed
658 phase->is_IterGVN()->_worklist.push(mem);
659 }
660 return nullptr;
661 }
662
663 return mem;
664 }
665
666 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
667 Node* dest = in(ArrayCopyNode::Dest);
668 if (dest->is_top()) {
669 return false;
670 }
671 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
672 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
673 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
674 _src_type->is_known_instance(), "result of EA not recorded");
675
676 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
677 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");
678 return t_oop->instance_id() == _dest_type->instance_id();
679 }
680
681 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
682 }
683
684 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr* t_oop, Node* n, PhaseValues* phase, ArrayCopyNode*& ac) {
685 if (n != nullptr &&
686 n->is_ArrayCopy() &&
687 n->as_ArrayCopy()->may_modify(t_oop, phase)) {
688 ac = n->as_ArrayCopy();
689 return true;
690 }
691 return false;
692 }
693
694 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac) {
695 if (mb->trailing_expanded_array_copy()) {
696 return true;
697 }
698
699 Node* c = mb->in(0);
700
701 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
702 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
703 c = bs->step_over_gc_barrier(c);
704
705 CallNode* call = nullptr;
706 guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to.");
707 if (c->is_Region()) {
708 for (uint i = 1; i < c->req(); i++) {
709 if (c->in(i) != nullptr) {
710 Node* n = c->in(i)->in(0);
711 if (may_modify_helper(t_oop, n, phase, ac)) {
712 assert(c == mb->in(0), "only for clone");
713 return true;
714 }
715 }
716 }
717 } else if (may_modify_helper(t_oop, c->in(0), phase, ac)) {
718 #ifdef ASSERT
719 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
720 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
721 assert(c == mb->in(0) || (ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
722 #endif
723 return true;
724 }
725
726 return false;
727 }
728
729 // Does this array copy modify offsets between offset_lo and offset_hi
730 // in the destination array
731 // if must_modify is false, return true if the copy could write
732 // between offset_lo and offset_hi
733 // if must_modify is true, return true if the copy is guaranteed to
734 // write between offset_lo and offset_hi
735 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
736 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
737
738 Node* dest = in(Dest);
739 Node* dest_pos = in(DestPos);
740 Node* len = in(Length);
741
742 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
743 const TypeInt *len_t = phase->type(len)->isa_int();
744 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
745
746 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
747 return !must_modify;
748 }
749
750 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
751 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
752
753 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
754 uint elemsize = type2aelembytes(ary_elem);
755
756 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
757 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
758 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
759 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
760
761 if (must_modify) {
762 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
763 return true;
764 }
765 } else {
766 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
767 return true;
768 }
769 }
770 return false;
771 }
772
773 // As an optimization, choose optimum vector size for copy length known at compile time.
774 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
775 int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type);
776 if (const_len > 0) {
777 int size_in_bytes = const_len * type2aelembytes(type);
778 if (size_in_bytes <= 16)
779 lane_count = 16/type2aelembytes(type);
780 else if (size_in_bytes > 16 && size_in_bytes <= 32)
781 lane_count = 32/type2aelembytes(type);
782 }
783 return lane_count;
784 }