1 /*
2 * Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/inlinetypenode.hpp"
33 #include "utilities/powerOfTwo.hpp"
34
35 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
36
37 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
38 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
39 _kind(None),
40 _alloc_tightly_coupled(alloc_tightly_coupled),
41 _has_negative_length_guard(has_negative_length_guard),
42 _arguments_validated(false),
43 _src_type(TypeOopPtr::BOTTOM),
44 _dest_type(TypeOopPtr::BOTTOM) {
45 init_class_id(Class_ArrayCopy);
46 init_flags(Flag_is_macro);
47 C->add_macro_node(this);
48 }
49
50 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
51
52 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
53 Node* src, Node* src_offset,
54 Node* dest, Node* dest_offset,
55 Node* length,
56 bool alloc_tightly_coupled,
57 bool has_negative_length_guard,
58 Node* src_klass, Node* dest_klass,
59 Node* src_length, Node* dest_length) {
60
61 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard);
62 kit->set_predefined_input_for_runtime_call(ac);
63
64 ac->init_req(ArrayCopyNode::Src, src);
65 ac->init_req(ArrayCopyNode::SrcPos, src_offset);
66 ac->init_req(ArrayCopyNode::Dest, dest);
67 ac->init_req(ArrayCopyNode::DestPos, dest_offset);
68 ac->init_req(ArrayCopyNode::Length, length);
69 ac->init_req(ArrayCopyNode::SrcLen, src_length);
70 ac->init_req(ArrayCopyNode::DestLen, dest_length);
71 ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
72 ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
73
74 if (may_throw) {
75 ac->set_req(TypeFunc::I_O , kit->i_o());
76 kit->add_safepoint_edges(ac, false);
77 }
78
79 return ac;
80 }
81
82 void ArrayCopyNode::connect_outputs(GraphKit* kit, bool deoptimize_on_exception) {
83 kit->set_all_memory_call(this, true);
84 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
85 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
86 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true, deoptimize_on_exception);
87 kit->set_all_memory_call(this);
88 }
89
90 #ifndef PRODUCT
91 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
92
93 void ArrayCopyNode::dump_spec(outputStream *st) const {
94 CallNode::dump_spec(st);
95 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
96 }
97
98 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
99 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
100 }
101 #endif
102
103 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
104 // check that length is constant
105 Node* length = in(ArrayCopyNode::Length);
106 const Type* length_type = phase->type(length);
107
108 if (length_type == Type::TOP) {
109 return -1;
110 }
111
112 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
113
114 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
115 }
116
117 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
118 if (is_clonebasic()) {
119 Node* src = in(ArrayCopyNode::Src);
120 const Type* src_type = phase->type(src);
121
122 if (src_type == Type::TOP) {
123 return -1;
124 }
125
126 if (src_type->isa_instptr()) {
127 const TypeInstPtr* inst_src = src_type->is_instptr();
128 ciInstanceKlass* ik = inst_src->instance_klass();
129 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
130 // fields into account. They are rare anyway so easier to simply
131 // skip instances with injected fields.
132 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
133 return -1;
134 }
135 int nb_fields = ik->nof_nonstatic_fields();
136 return nb_fields;
137 } else {
138 const TypeAryPtr* ary_src = src_type->isa_aryptr();
139 assert (ary_src != nullptr, "not an array or instance?");
140 // clone passes a length as a rounded number of longs. If we're
141 // cloning an array we'll do it element by element. If the
142 // length of the input array is constant, ArrayCopyNode::Length
143 // must be too. Note that the opposite does not need to hold,
144 // because different input array lengths (e.g. int arrays with
145 // 3 or 4 elements) might lead to the same length input
146 // (e.g. 2 double-words).
147 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
148 (UseArrayFlattening && ary_src->elem()->make_oopptr() != nullptr && ary_src->elem()->make_oopptr()->can_be_inline_type()) ||
149 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
150 if (ary_src->size()->is_con()) {
151 return ary_src->size()->get_con();
152 }
153 return -1;
154 }
155 }
156
157 return get_length_if_constant(phase);
158 }
159
160 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
161 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
162 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
163 // above runtime checks that guarantee it is within bounds.
164 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
165 C2AccessValuePtr addr(adr, adr_type);
166 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
167 Node* res = bs->load_at(access, type);
168 ctl = access.ctl();
169 return res;
170 }
171
172 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
173 DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
174 if (is_alloc_tightly_coupled()) {
175 decorators |= C2_TIGHTLY_COUPLED_ALLOC;
176 }
177 C2AccessValuePtr addr(adr, adr_type);
178 C2AccessValue value(val, type);
179 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
180 bs->store_at(access, value);
181 ctl = access.ctl();
182 }
183
184
185 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
186 if (!is_clonebasic()) {
187 return nullptr;
188 }
189
190 Node* base_src = in(ArrayCopyNode::Src);
191 Node* base_dest = in(ArrayCopyNode::Dest);
192 Node* ctl = in(TypeFunc::Control);
193 Node* in_mem = in(TypeFunc::Memory);
194
195 const Type* src_type = phase->type(base_src);
196 const TypeInstPtr* inst_src = src_type->isa_instptr();
197 if (inst_src == nullptr) {
198 return nullptr;
199 }
200
201 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
202 phase->record_for_igvn(mem);
203 if (can_reshape) {
204 phase->is_IterGVN()->_worklist.push(mem);
205 }
206
207
208 ciInstanceKlass* ik = inst_src->instance_klass();
209
210 if (!inst_src->klass_is_exact()) {
211 assert(!ik->is_interface(), "inconsistent klass hierarchy");
212 if (ik->has_subklass()) {
213 // Concurrent class loading.
214 // Fail fast and return NodeSentinel to indicate that the transform failed.
215 return NodeSentinel;
216 } else {
217 phase->C->dependencies()->assert_leaf_type(ik);
218 }
219 }
220
221 const TypeInstPtr* dest_type = phase->type(base_dest)->is_instptr();
222 if (dest_type->instance_klass() != ik) {
223 // At parse time, the exact type of the object to clone was not known. That inexact type was captured by the CheckCastPP
224 // of the newly allocated cloned object (in dest). The exact type is now known (in src), but the type for the cloned object
225 // (dest) was not updated. When copying the fields below, Store nodes may write to offsets for fields that don't exist in
226 // the inexact class. The stores would then be assigned an incorrect slice.
227 return NodeSentinel;
228 }
229
230 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
231
232 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
233 for (int i = 0; i < count; i++) {
234 ciField* field = ik->nonstatic_field_at(i);
235 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
236 Node* off = phase->MakeConX(field->offset_in_bytes());
237 Node* next_src = phase->transform(AddPNode::make_with_base(base_src, off));
238 Node* next_dest = phase->transform(AddPNode::make_with_base(base_dest, off));
239 assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_src)->isa_ptr()),
240 "slice of address and input slice don't match");
241 assert(phase->C->get_alias_index(adr_type) == phase->C->get_alias_index(phase->type(next_dest)->isa_ptr()),
242 "slice of address and input slice don't match");
243 BasicType bt = field->layout_type();
244
245 const Type *type;
246 if (bt == T_OBJECT) {
247 if (!field->type()->is_loaded()) {
248 type = TypeInstPtr::BOTTOM;
249 } else {
250 ciType* field_klass = field->type();
251 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
252 }
253 } else {
254 type = Type::get_const_basic_type(bt);
255 }
256
257 Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
258 store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
259 }
260
261 if (!finish_transform(phase, can_reshape, ctl, mem)) {
262 // Return NodeSentinel to indicate that the transform failed
263 return NodeSentinel;
264 }
265
266 return mem;
267 }
268
269 // We may have narrowed the type of base because this runs with PhaseIterGVN::_delay_transform true, explicitly
270 // update the type of the AddP so it's consistent with its base and load() picks the right memory slice.
271 Node* ArrayCopyNode::make_and_transform_addp(PhaseGVN* phase, Node* base, Node* offset) {
272 return make_and_transform_addp(phase, base, base, offset);
273 }
274
275 Node* ArrayCopyNode::make_and_transform_addp(PhaseGVN* phase, Node* base, Node* ptr, Node* offset) {
276 assert(phase->is_IterGVN() == nullptr || phase->is_IterGVN()->delay_transform(), "helper method when delay transform is set");
277 Node* addp = phase->transform(AddPNode::make_with_base(base, ptr, offset));
278 phase->set_type(addp, addp->Value(phase));
279 return addp;
280 }
281
282 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
283 Node*& adr_src,
284 Node*& base_src,
285 Node*& adr_dest,
286 Node*& base_dest,
287 BasicType& copy_type,
288 const Type*& value_type,
289 bool& disjoint_bases) {
290 base_src = in(ArrayCopyNode::Src);
291 base_dest = in(ArrayCopyNode::Dest);
292 const Type* src_type = phase->type(base_src);
293 const TypeAryPtr* ary_src = src_type->isa_aryptr();
294
295 Node* src_offset = in(ArrayCopyNode::SrcPos);
296 Node* dest_offset = in(ArrayCopyNode::DestPos);
297
298 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
299 const Type* dest_type = phase->type(base_dest);
300 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
301
302 // newly allocated object is guaranteed to not overlap with source object
303 disjoint_bases = is_alloc_tightly_coupled();
304 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
305 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
306 // We don't know if arguments are arrays
307 return false;
308 }
309
310 BasicType src_elem = ary_src->elem()->array_element_basic_type();
311 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
312 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
313 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
314
315 // TODO 8251971 What about atomicity?
316 if (src_elem != dest_elem || ary_src->is_null_free() != ary_dest->is_null_free() || ary_src->is_flat() != ary_dest->is_flat() || dest_elem == T_VOID) {
317 // We don't know if arguments are arrays of the same type
318 return false;
319 }
320
321 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
322 if ((!ary_dest->is_flat() && bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) ||
323 (ary_dest->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
324 bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), T_OBJECT, false, false, BarrierSetC2::Optimization))) {
325 // It's an object array copy but we can't emit the card marking that is needed
326 return false;
327 }
328
329 value_type = ary_src->elem();
330
331 uint shift = exact_log2(type2aelembytes(dest_elem));
332 if (ary_dest->is_flat()) {
333 assert(ary_src->is_flat(), "src and dest must be flat");
334 shift = ary_src->flat_log_elem_size();
335 src_elem = T_FLAT_ELEMENT;
336 dest_elem = T_FLAT_ELEMENT;
337 }
338
339 const uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
340
341 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
342 if (src_offset->is_top()) {
343 // Offset is out of bounds (the ArrayCopyNode will be removed)
344 return false;
345 }
346 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
347 if (dest_offset->is_top()) {
348 // Offset is out of bounds (the ArrayCopyNode will be removed)
349 if (can_reshape) {
350 // record src_offset, so it can be deleted later (if it is dead)
351 phase->is_IterGVN()->_worklist.push(src_offset);
352 }
353 return false;
354 }
355
356 Node* hook = new Node(1);
357 hook->init_req(0, dest_offset);
358
359 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
360
361 hook->destruct(phase);
362
363 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
364
365 adr_src = make_and_transform_addp(phase, base_src, src_scale);
366 adr_dest = make_and_transform_addp(phase, base_dest, dest_scale);
367
368 adr_src = make_and_transform_addp(phase, base_src, adr_src, phase->MakeConX(header));
369 adr_dest = make_and_transform_addp(phase, base_dest, adr_dest, phase->MakeConX(header));
370 copy_type = dest_elem;
371 } else {
372 assert(ary_src != nullptr, "should be a clone");
373 assert(is_clonebasic(), "should be");
374
375 disjoint_bases = true;
376
377 if (ary_src->elem()->make_oopptr() != nullptr &&
378 ary_src->elem()->make_oopptr()->can_be_inline_type()) {
379 return false;
380 }
381
382 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
383 if (is_reference_type(elem, true)) {
384 elem = T_OBJECT;
385 }
386
387 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
388 if ((!ary_src->is_flat() && bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) ||
389 (ary_src->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
390 bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization))) {
391 // It's an object array copy but we can't emit the card marking that is needed
392 return false;
393 }
394
395 adr_src = make_and_transform_addp(phase, base_src, src_offset);
396 adr_dest = make_and_transform_addp(phase, base_dest, dest_offset);
397
398 // The address is offsetted to an aligned address where a raw copy would start.
399 // If the clone copy is decomposed into load-stores - the address is adjusted to
400 // point at where the array starts.
401 const Type* toff = phase->type(src_offset);
402 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
403 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
404 assert(diff >= 0, "clone should not start after 1st array element");
405 if (diff > 0) {
406 adr_src = make_and_transform_addp(phase, base_src, adr_src, phase->MakeConX(diff));
407 adr_dest = make_and_transform_addp(phase, base_dest, adr_dest, phase->MakeConX(diff));
408 }
409 copy_type = elem;
410 value_type = ary_src->elem();
411 }
412 return true;
413 }
414
415 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
416 if (atp == TypeOopPtr::BOTTOM) {
417 atp = phase->type(n)->isa_ptr();
418 }
419 // adjust atp to be the correct array element address type
420 return atp->add_offset(Type::OffsetBot)->is_aryptr();
421 }
422
423 const TypePtr* ArrayCopyNode::get_src_adr_type(PhaseGVN* phase) const {
424 return get_address_type(phase, _src_type, in(Src));
425 }
426
427 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) {
428 Node* ctl = kit.control();
429 if (!disjoint_bases && count > 1) {
430 PhaseGVN& gvn = kit.gvn();
431 Node* src_offset = in(ArrayCopyNode::SrcPos);
432 Node* dest_offset = in(ArrayCopyNode::DestPos);
433 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
434 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset));
435 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
436 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
437
438 gvn.transform(iff);
439
440 kit.set_control(gvn.transform(new IfFalseNode(iff)));
441 backward_ctl = gvn.transform(new IfTrueNode(iff));
442 }
443 }
444
445 void ArrayCopyNode::copy(GraphKit& kit,
446 const TypeAryPtr* atp_src,
447 const TypeAryPtr* atp_dest,
448 int i,
449 Node* base_src,
450 Node* base_dest,
451 Node* adr_src,
452 Node* adr_dest,
453 BasicType copy_type,
454 const Type* value_type) {
455 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
456 Node* ctl = kit.control();
457 if (atp_dest->is_flat()) {
458 ciInlineKlass* vk = atp_src->elem()->inline_klass();
459 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) {
460 ciField* field = vk->nonstatic_field_at(j);
461 int off_in_vt = field->offset_in_bytes() - vk->payload_offset();
462 Node* off = kit.MakeConX(off_in_vt + i * atp_src->flat_elem_size());
463 ciType* ft = field->type();
464 BasicType bt = type2field[ft->basic_type()];
465 assert(!field->is_flat(), "flat field encountered");
466 const Type* rt = Type::get_const_type(ft);
467 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
468 assert(!bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), bt, false, false, BarrierSetC2::Optimization), "GC barriers required");
469 Node* next_src = make_and_transform_addp(&kit.gvn(), base_src, adr_src, off);
470 Node* next_dest = make_and_transform_addp(&kit.gvn(), base_dest, adr_dest, off);
471 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, adr_type, rt, bt);
472 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, adr_type, v, rt, bt);
473 }
474 } else {
475 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i);
476 Node* next_src = make_and_transform_addp(&kit.gvn(), base_src, adr_src, off);
477 Node* next_dest = make_and_transform_addp(&kit.gvn(), base_dest, adr_dest, off);
478 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, atp_src, value_type, copy_type);
479 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, atp_dest, v, value_type, copy_type);
480 }
481 kit.set_control(ctl);
482 }
483
484
485 void ArrayCopyNode::array_copy_forward(GraphKit& kit,
486 bool can_reshape,
487 const TypeAryPtr* atp_src,
488 const TypeAryPtr* atp_dest,
489 Node* adr_src,
490 Node* base_src,
491 Node* adr_dest,
492 Node* base_dest,
493 BasicType copy_type,
494 const Type* value_type,
495 int count) {
496 if (!kit.stopped()) {
497 // copy forward
498 if (count > 0) {
499 for (int i = 0; i < count; i++) {
500 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
501 }
502 } else if (can_reshape) {
503 PhaseGVN& gvn = kit.gvn();
504 assert(gvn.is_IterGVN(), "");
505 gvn.record_for_igvn(adr_src);
506 gvn.record_for_igvn(adr_dest);
507 }
508 }
509 }
510
511 void ArrayCopyNode::array_copy_backward(GraphKit& kit,
512 bool can_reshape,
513 const TypeAryPtr* atp_src,
514 const TypeAryPtr* atp_dest,
515 Node* adr_src,
516 Node* base_src,
517 Node* adr_dest,
518 Node* base_dest,
519 BasicType copy_type,
520 const Type* value_type,
521 int count) {
522 if (!kit.stopped()) {
523 // copy backward
524
525 if (count > 0) {
526 for (int i = count-1; i >= 0; i--) {
527 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
528 }
529 } else if(can_reshape) {
530 PhaseGVN& gvn = kit.gvn();
531 assert(gvn.is_IterGVN(), "");
532 gvn.record_for_igvn(adr_src);
533 gvn.record_for_igvn(adr_dest);
534 }
535 }
536 }
537
538 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
539 Node* ctl, Node *mem) {
540 if (can_reshape) {
541 PhaseIterGVN* igvn = phase->is_IterGVN();
542 igvn->set_delay_transform(false);
543 if (is_clonebasic()) {
544 Node* out_mem = proj_out(TypeFunc::Memory);
545
546 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
547 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
548 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
549 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
550 return false;
551 }
552
553 igvn->replace_node(out_mem->raw_out(0), mem);
554
555 Node* out_ctl = proj_out(TypeFunc::Control);
556 igvn->replace_node(out_ctl, ctl);
557 } else {
558 // replace fallthrough projections of the ArrayCopyNode by the
559 // new memory, control and the input IO.
560 CallProjections* callprojs = extract_projections(true, false);
561
562 if (callprojs->fallthrough_ioproj != nullptr) {
563 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O));
564 }
565 if (callprojs->fallthrough_memproj != nullptr) {
566 igvn->replace_node(callprojs->fallthrough_memproj, mem);
567 }
568 if (callprojs->fallthrough_catchproj != nullptr) {
569 igvn->replace_node(callprojs->fallthrough_catchproj, ctl);
570 }
571
572 // The ArrayCopyNode is not disconnected. It still has the
573 // projections for the exception case. Replace current
574 // ArrayCopyNode with a dummy new one with a top() control so
575 // that this part of the graph stays consistent but is
576 // eventually removed.
577
578 set_req(0, phase->C->top());
579 remove_dead_region(phase, can_reshape);
580 }
581 } else {
582 if (in(TypeFunc::Control) != ctl) {
583 // we can't return new memory and control from Ideal at parse time
584 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
585 phase->record_for_igvn(this);
586 return false;
587 }
588 }
589 return true;
590 }
591
592
593 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
594 // Perform any generic optimizations first
595 Node* result = SafePointNode::Ideal(phase, can_reshape);
596 if (result != nullptr) {
597 return result;
598 }
599
600 if (StressArrayCopyMacroNode && !can_reshape) {
601 phase->record_for_igvn(this);
602 return nullptr;
603 }
604
605 // See if it's a small array copy and we can inline it as
606 // loads/stores
607 // Here we can only do:
608 // - arraycopy if all arguments were validated before and we don't
609 // need card marking
610 // - clone for which we don't need to do card marking
611
612 if (!is_clonebasic() && !is_arraycopy_validated() &&
613 !is_copyofrange_validated() && !is_copyof_validated()) {
614 return nullptr;
615 }
616
617 assert(in(TypeFunc::Control) != nullptr &&
618 in(TypeFunc::Memory) != nullptr &&
619 in(ArrayCopyNode::Src) != nullptr &&
620 in(ArrayCopyNode::Dest) != nullptr &&
621 in(ArrayCopyNode::Length) != nullptr &&
622 in(ArrayCopyNode::SrcPos) != nullptr &&
623 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
624
625 if (in(TypeFunc::Control)->is_top() ||
626 in(TypeFunc::Memory)->is_top() ||
627 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
628 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
629 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
630 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
631 return nullptr;
632 }
633
634 int count = get_count(phase);
635
636 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
637 return nullptr;
638 }
639
640 Node* src = in(ArrayCopyNode::Src);
641 Node* dest = in(ArrayCopyNode::Dest);
642 const Type* src_type = phase->type(src);
643 const Type* dest_type = phase->type(dest);
644
645 if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
646 // clone used for load of unknown inline type can't be optimized at
647 // this point
648 return nullptr;
649 }
650
651 Node* mem = try_clone_instance(phase, can_reshape, count);
652 if (mem != nullptr) {
653 return (mem == NodeSentinel) ? nullptr : mem;
654 }
655
656 Node* adr_src = nullptr;
657 Node* base_src = nullptr;
658 Node* adr_dest = nullptr;
659 Node* base_dest = nullptr;
660 BasicType copy_type = T_ILLEGAL;
661 const Type* value_type = nullptr;
662 bool disjoint_bases = false;
663
664 // EA may have moved an input to a new slice. EA stores the new address types in the ArrayCopy node itself
665 // (_src_type/_dest_type). phase->type(src) and _src_type or phase->type(dest) and _dest_type may be different
666 // when this transformation runs if igvn hasn't had a chance to propagate the new types yet. Make sure the new
667 // types are taken into account so new Load/Store nodes are created on the right slice.
668 const TypeAryPtr* atp_src = get_address_type(phase, _src_type, src);
669 const TypeAryPtr* atp_dest = get_address_type(phase, _dest_type, dest);
670 phase->set_type(src, phase->type(src)->join_speculative(atp_src));
671 phase->set_type(dest, phase->type(dest)->join_speculative(atp_dest));
672
673 // Control flow is going to be created, it's easier to do with _delay_transform set to true.
674
675 // prepare_array_copy() doesn't build control flow, but it creates AddP nodes. The src/dest type possibly gets
676 // narrowed above. If a newly created AddP node is commoned with a pre-existing one, then the type narrowing is lost.
677 // Setting _delay_transform before prepare_array_copy() guarantees this doesn't happen.
678 if (can_reshape) {
679 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
680 phase->is_IterGVN()->set_delay_transform(true);
681 }
682
683 if (!prepare_array_copy(phase, can_reshape,
684 adr_src, base_src, adr_dest, base_dest,
685 copy_type, value_type, disjoint_bases)) {
686 assert(adr_src == nullptr, "no node can be left behind");
687 assert(adr_dest == nullptr, "no node can be left behind");
688 if (can_reshape) {
689 assert(phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
690 phase->is_IterGVN()->set_delay_transform(false);
691 }
692
693 return nullptr;
694 }
695
696 JVMState* new_jvms = nullptr;
697 SafePointNode* new_map = nullptr;
698 if (!is_clonebasic()) {
699 new_jvms = jvms()->clone_shallow(phase->C);
700 new_map = new SafePointNode(req(), new_jvms);
701 for (uint i = TypeFunc::FramePtr; i < req(); i++) {
702 new_map->init_req(i, in(i));
703 }
704 new_jvms->set_map(new_map);
705 } else {
706 new_jvms = new (phase->C) JVMState(0);
707 new_map = new SafePointNode(TypeFunc::Parms, new_jvms);
708 new_jvms->set_map(new_map);
709 }
710 new_map->set_control(in(TypeFunc::Control));
711 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
712 new_map->set_i_o(in(TypeFunc::I_O));
713 phase->record_for_igvn(new_map);
714
715 GraphKit kit(new_jvms, phase);
716
717 SafePointNode* backward_map = nullptr;
718 SafePointNode* forward_map = nullptr;
719 Node* backward_ctl = phase->C->top();
720
721 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl);
722
723 {
724 PreserveJVMState pjvms(&kit);
725
726 array_copy_forward(kit, can_reshape,
727 atp_src, atp_dest,
728 adr_src, base_src, adr_dest, base_dest,
729 copy_type, value_type, count);
730
731 forward_map = kit.stop();
732 }
733
734 kit.set_control(backward_ctl);
735 array_copy_backward(kit, can_reshape,
736 atp_src, atp_dest,
737 adr_src, base_src, adr_dest, base_dest,
738 copy_type, value_type, count);
739
740 backward_map = kit.stop();
741
742 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) {
743 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?");
744 Node* ctl = new RegionNode(3);
745 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM);
746 kit.set_map(forward_map);
747 ctl->init_req(1, kit.control());
748 mem->init_req(1, kit.reset_memory());
749 kit.set_map(backward_map);
750 ctl->init_req(2, kit.control());
751 mem->init_req(2, kit.reset_memory());
752 kit.set_control(phase->transform(ctl));
753 kit.set_all_memory(phase->transform(mem));
754 } else if (!forward_map->control()->is_top()) {
755 kit.set_map(forward_map);
756 } else {
757 assert(!backward_map->control()->is_top(), "no copy?");
758 kit.set_map(backward_map);
759 }
760
761 if (can_reshape) {
762 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
763 phase->is_IterGVN()->set_delay_transform(false);
764 }
765
766 mem = kit.map()->memory();
767 if (!finish_transform(phase, can_reshape, kit.control(), mem)) {
768 if (!can_reshape) {
769 phase->record_for_igvn(this);
770 } else {
771 // put in worklist, so that if it happens to be dead it is removed
772 phase->is_IterGVN()->_worklist.push(mem);
773 }
774 return nullptr;
775 }
776
777 return mem;
778 }
779
780 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) const {
781 Node* dest = in(ArrayCopyNode::Dest);
782 if (dest->is_top()) {
783 return false;
784 }
785 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
786 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
787 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
788 _src_type->is_known_instance(), "result of EA not recorded");
789
790 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
791 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");
792 return t_oop->instance_id() == _dest_type->instance_id();
793 }
794
795 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
796 }
797
798 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr* t_oop, Node* n, PhaseValues* phase, ArrayCopyNode*& ac) {
799 if (n != nullptr &&
800 n->is_ArrayCopy() &&
801 n->as_ArrayCopy()->may_modify(t_oop, phase)) {
802 ac = n->as_ArrayCopy();
803 return true;
804 }
805 return false;
806 }
807
808 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac) {
809 if (mb->trailing_expanded_array_copy()) {
810 return true;
811 }
812
813 Node* c = mb->in(0);
814
815 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
816 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
817 c = bs->step_over_gc_barrier(c);
818
819 CallNode* call = nullptr;
820 guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to.");
821 if (c->is_Region()) {
822 for (uint i = 1; i < c->req(); i++) {
823 if (c->in(i) != nullptr) {
824 Node* n = c->in(i)->in(0);
825 if (may_modify_helper(t_oop, n, phase, ac)) {
826 assert(c == mb->in(0), "only for clone");
827 return true;
828 }
829 }
830 }
831 } else if (may_modify_helper(t_oop, c->in(0), phase, ac)) {
832 #ifdef ASSERT
833 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
834 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
835 assert(c == mb->in(0) || (ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
836 #endif
837 return true;
838 }
839
840 return false;
841 }
842
843 // Does this array copy modify offsets between offset_lo and offset_hi
844 // in the destination array
845 // if must_modify is false, return true if the copy could write
846 // between offset_lo and offset_hi
847 // if must_modify is true, return true if the copy is guaranteed to
848 // write between offset_lo and offset_hi
849 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
850 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
851
852 Node* dest = in(Dest);
853 Node* dest_pos = in(DestPos);
854 Node* len = in(Length);
855
856 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
857 const TypeInt *len_t = phase->type(len)->isa_int();
858 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
859
860 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
861 return !must_modify;
862 }
863
864 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
865 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
866
867 uint header;
868 uint elem_size;
869 if (ary_t->is_flat()) {
870 header = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT);
871 elem_size = ary_t->flat_elem_size();
872 } else {
873 header = arrayOopDesc::base_offset_in_bytes(ary_elem);
874 elem_size = type2aelembytes(ary_elem);
875 }
876
877 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elem_size + header;
878 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elem_size + header;
879 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elem_size + header;
880 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elem_size + header;
881
882 if (must_modify) {
883 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
884 return true;
885 }
886 } else {
887 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
888 return true;
889 }
890 }
891 return false;
892 }
893
894 // As an optimization, choose the optimal vector size for bounded copy length
895 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, jlong max_len) {
896 assert(max_len > 0, JLONG_FORMAT, max_len);
897 // We only care whether max_size_in_bytes is not larger than 32, we also want to avoid
898 // multiplication overflow, so clamp max_len to [0, 64]
899 int max_size_in_bytes = MIN2<jlong>(max_len, 64) * type2aelembytes(type);
900 if (ArrayOperationPartialInlineSize > 16 && max_size_in_bytes <= 16) {
901 return 16 / type2aelembytes(type);
902 } else if (ArrayOperationPartialInlineSize > 32 && max_size_in_bytes <= 32) {
903 return 32 / type2aelembytes(type);
904 } else {
905 return ArrayOperationPartialInlineSize / type2aelembytes(type);
906 }
907 }