6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "utilities/macros.hpp"
34 #include "utilities/powerOfTwo.hpp"
35
36 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
37 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
38 _kind(None),
39 _alloc_tightly_coupled(alloc_tightly_coupled),
40 _has_negative_length_guard(has_negative_length_guard),
41 _arguments_validated(false),
42 _src_type(TypeOopPtr::BOTTOM),
43 _dest_type(TypeOopPtr::BOTTOM) {
44 init_class_id(Class_ArrayCopy);
45 init_flags(Flag_is_macro);
46 C->add_macro_node(this);
47 }
48
49 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
50
51 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
97 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
98 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
99 }
100 #endif
101
102 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
103 // check that length is constant
104 Node* length = in(ArrayCopyNode::Length);
105 const Type* length_type = phase->type(length);
106
107 if (length_type == Type::TOP) {
108 return -1;
109 }
110
111 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
112
113 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
114 }
115
116 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
117 Node* src = in(ArrayCopyNode::Src);
118 const Type* src_type = phase->type(src);
119
120 if (is_clonebasic()) {
121 if (src_type->isa_instptr()) {
122 const TypeInstPtr* inst_src = src_type->is_instptr();
123 ciInstanceKlass* ik = inst_src->instance_klass();
124 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
125 // fields into account. They are rare anyway so easier to simply
126 // skip instances with injected fields.
127 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
128 return -1;
129 }
130 int nb_fields = ik->nof_nonstatic_fields();
131 return nb_fields;
132 } else {
133 const TypeAryPtr* ary_src = src_type->isa_aryptr();
134 assert (ary_src != nullptr, "not an array or instance?");
135 // clone passes a length as a rounded number of longs. If we're
136 // cloning an array we'll do it element by element. If the
137 // length of the input array is constant, ArrayCopyNode::Length
138 // must be too. Note that the opposite does not need to hold,
139 // because different input array lengths (e.g. int arrays with
140 // 3 or 4 elements) might lead to the same length input
141 // (e.g. 2 double-words).
142 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
143 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
144 if (ary_src->size()->is_con()) {
145 return ary_src->size()->get_con();
146 }
147 return -1;
148 }
149 }
150
151 return get_length_if_constant(phase);
152 }
153
154 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
155 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
156 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
157 // above runtime checks that guarantee it is within bounds.
158 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
159 C2AccessValuePtr addr(adr, adr_type);
160 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
161 Node* res = bs->load_at(access, type);
162 ctl = access.ctl();
176 }
177
178
179 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
180 if (!is_clonebasic()) {
181 return nullptr;
182 }
183
184 Node* base_src = in(ArrayCopyNode::Src);
185 Node* base_dest = in(ArrayCopyNode::Dest);
186 Node* ctl = in(TypeFunc::Control);
187 Node* in_mem = in(TypeFunc::Memory);
188
189 const Type* src_type = phase->type(base_src);
190 const TypeInstPtr* inst_src = src_type->isa_instptr();
191 if (inst_src == nullptr) {
192 return nullptr;
193 }
194
195 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
196 if (can_reshape) {
197 phase->is_IterGVN()->_worklist.push(mem);
198 }
199
200
201 ciInstanceKlass* ik = inst_src->instance_klass();
202
203 if (!inst_src->klass_is_exact()) {
204 assert(!ik->is_interface(), "inconsistent klass hierarchy");
205 if (ik->has_subklass()) {
206 // Concurrent class loading.
207 // Fail fast and return NodeSentinel to indicate that the transform failed.
208 return NodeSentinel;
209 } else {
210 phase->C->dependencies()->assert_leaf_type(ik);
211 }
212 }
213
214 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
215
262 Node* src_offset = in(ArrayCopyNode::SrcPos);
263 Node* dest_offset = in(ArrayCopyNode::DestPos);
264
265 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
266 const Type* dest_type = phase->type(base_dest);
267 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
268
269 // newly allocated object is guaranteed to not overlap with source object
270 disjoint_bases = is_alloc_tightly_coupled();
271 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
272 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
273 // We don't know if arguments are arrays
274 return false;
275 }
276
277 BasicType src_elem = ary_src->elem()->array_element_basic_type();
278 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
279 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
280 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
281
282 if (src_elem != dest_elem || dest_elem == T_VOID) {
283 // We don't know if arguments are arrays of the same type
284 return false;
285 }
286
287 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
288 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) {
289 // It's an object array copy but we can't emit the card marking
290 // that is needed
291 return false;
292 }
293
294 value_type = ary_src->elem();
295
296 uint shift = exact_log2(type2aelembytes(dest_elem));
297 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
298
299 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
300 if (src_offset->is_top()) {
301 // Offset is out of bounds (the ArrayCopyNode will be removed)
302 return false;
303 }
304 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
305 if (dest_offset->is_top()) {
306 // Offset is out of bounds (the ArrayCopyNode will be removed)
307 if (can_reshape) {
308 // record src_offset, so it can be deleted later (if it is dead)
309 phase->is_IterGVN()->_worklist.push(src_offset);
310 }
311 return false;
312 }
313
314 Node* hook = new Node(1);
315 hook->init_req(0, dest_offset);
316
317 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
318
319 hook->destruct(phase);
320
321 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
322
323 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
324 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
325
326 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
327 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
328
329 copy_type = dest_elem;
330 } else {
331 assert(ary_src != nullptr, "should be a clone");
332 assert(is_clonebasic(), "should be");
333
334 disjoint_bases = true;
335
336 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
337 if (is_reference_type(elem, true)) {
338 elem = T_OBJECT;
339 }
340
341 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
342 if (bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) {
343 return false;
344 }
345
346 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
347 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
348
349 // The address is offsetted to an aligned address where a raw copy would start.
350 // If the clone copy is decomposed into load-stores - the address is adjusted to
351 // point at where the array starts.
352 const Type* toff = phase->type(src_offset);
353 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
354 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
355 assert(diff >= 0, "clone should not start after 1st array element");
356 if (diff > 0) {
357 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
358 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
359 }
360 copy_type = elem;
361 value_type = ary_src->elem();
362 }
363 return true;
364 }
365
366 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
367 if (atp == TypeOopPtr::BOTTOM) {
368 atp = phase->type(n)->isa_ptr();
369 }
370 // adjust atp to be the correct array element address type
371 return atp->add_offset(Type::OffsetBot);
372 }
373
374 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
375 Node* ctl = in(TypeFunc::Control);
376 if (!disjoint_bases && count > 1) {
377 Node* src_offset = in(ArrayCopyNode::SrcPos);
378 Node* dest_offset = in(ArrayCopyNode::DestPos);
379 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
380 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
381 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
382 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
383
384 phase->transform(iff);
385
386 forward_ctl = phase->transform(new IfFalseNode(iff));
387 backward_ctl = phase->transform(new IfTrueNode(iff));
388 } else {
389 forward_ctl = ctl;
390 }
391 }
392
393 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
394 bool can_reshape,
395 Node*& forward_ctl,
396 Node* mem,
397 const TypePtr* atp_src,
398 const TypePtr* atp_dest,
399 Node* adr_src,
400 Node* base_src,
401 Node* adr_dest,
402 Node* base_dest,
403 BasicType copy_type,
404 const Type* value_type,
405 int count) {
406 if (!forward_ctl->is_top()) {
407 // copy forward
408 MergeMemNode* mm = MergeMemNode::make(mem);
409
410 if (count > 0) {
411 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
412 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
413 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
414 for (int i = 1; i < count; i++) {
415 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
416 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
417 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
418 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
419 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
420 }
421 } else if (can_reshape) {
422 PhaseIterGVN* igvn = phase->is_IterGVN();
423 igvn->_worklist.push(adr_src);
424 igvn->_worklist.push(adr_dest);
425 }
426 return mm;
427 }
428 return phase->C->top();
429 }
430
431 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
432 bool can_reshape,
433 Node*& backward_ctl,
434 Node* mem,
435 const TypePtr* atp_src,
436 const TypePtr* atp_dest,
437 Node* adr_src,
438 Node* base_src,
439 Node* adr_dest,
440 Node* base_dest,
441 BasicType copy_type,
442 const Type* value_type,
443 int count) {
444 if (!backward_ctl->is_top()) {
445 // copy backward
446 MergeMemNode* mm = MergeMemNode::make(mem);
447
448 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
449 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
450
451 if (count > 0) {
452 for (int i = count-1; i >= 1; i--) {
453 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
454 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
455 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
456 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
457 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
458 }
459 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
460 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
461 } else if (can_reshape) {
462 PhaseIterGVN* igvn = phase->is_IterGVN();
463 igvn->_worklist.push(adr_src);
464 igvn->_worklist.push(adr_dest);
465 }
466 return phase->transform(mm);
467 }
468 return phase->C->top();
469 }
470
471 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
472 Node* ctl, Node *mem) {
473 if (can_reshape) {
474 PhaseIterGVN* igvn = phase->is_IterGVN();
475 igvn->set_delay_transform(false);
476 if (is_clonebasic()) {
477 Node* out_mem = proj_out(TypeFunc::Memory);
478
479 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
480 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
481 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
482 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
483 return false;
484 }
485
486 igvn->replace_node(out_mem->raw_out(0), mem);
487
488 Node* out_ctl = proj_out(TypeFunc::Control);
489 igvn->replace_node(out_ctl, ctl);
490 } else {
491 // replace fallthrough projections of the ArrayCopyNode by the
492 // new memory, control and the input IO.
493 CallProjections callprojs;
494 extract_projections(&callprojs, true, false);
495
496 if (callprojs.fallthrough_ioproj != nullptr) {
497 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
498 }
499 if (callprojs.fallthrough_memproj != nullptr) {
500 igvn->replace_node(callprojs.fallthrough_memproj, mem);
501 }
502 if (callprojs.fallthrough_catchproj != nullptr) {
503 igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
504 }
505
506 // The ArrayCopyNode is not disconnected. It still has the
507 // projections for the exception case. Replace current
508 // ArrayCopyNode with a dummy new one with a top() control so
509 // that this part of the graph stays consistent but is
510 // eventually removed.
511
512 set_req(0, phase->C->top());
513 remove_dead_region(phase, can_reshape);
514 }
515 } else {
516 if (in(TypeFunc::Control) != ctl) {
517 // we can't return new memory and control from Ideal at parse time
518 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
519 phase->record_for_igvn(this);
520 return false;
521 }
522 }
523 return true;
524 }
525
526
527 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
528 if (remove_dead_region(phase, can_reshape)) return this;
529
530 if (StressArrayCopyMacroNode && !can_reshape) {
531 phase->record_for_igvn(this);
532 return nullptr;
533 }
534
535 // See if it's a small array copy and we can inline it as
536 // loads/stores
537 // Here we can only do:
538 // - arraycopy if all arguments were validated before and we don't
539 // need card marking
540 // - clone for which we don't need to do card marking
541
542 if (!is_clonebasic() && !is_arraycopy_validated() &&
543 !is_copyofrange_validated() && !is_copyof_validated()) {
544 return nullptr;
545 }
546
547 assert(in(TypeFunc::Control) != nullptr &&
548 in(TypeFunc::Memory) != nullptr &&
550 in(ArrayCopyNode::Dest) != nullptr &&
551 in(ArrayCopyNode::Length) != nullptr &&
552 in(ArrayCopyNode::SrcPos) != nullptr &&
553 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
554
555 if (in(TypeFunc::Control)->is_top() ||
556 in(TypeFunc::Memory)->is_top() ||
557 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
558 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
559 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
560 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
561 return nullptr;
562 }
563
564 int count = get_count(phase);
565
566 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
567 return nullptr;
568 }
569
570 Node* mem = try_clone_instance(phase, can_reshape, count);
571 if (mem != nullptr) {
572 return (mem == NodeSentinel) ? nullptr : mem;
573 }
574
575 Node* adr_src = nullptr;
576 Node* base_src = nullptr;
577 Node* adr_dest = nullptr;
578 Node* base_dest = nullptr;
579 BasicType copy_type = T_ILLEGAL;
580 const Type* value_type = nullptr;
581 bool disjoint_bases = false;
582
583 if (!prepare_array_copy(phase, can_reshape,
584 adr_src, base_src, adr_dest, base_dest,
585 copy_type, value_type, disjoint_bases)) {
586 assert(adr_src == nullptr, "no node can be left behind");
587 assert(adr_dest == nullptr, "no node can be left behind");
588 return nullptr;
589 }
590
591 Node* src = in(ArrayCopyNode::Src);
592 Node* dest = in(ArrayCopyNode::Dest);
593 const TypePtr* atp_src = get_address_type(phase, _src_type, src);
594 const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
595 Node* in_mem = in(TypeFunc::Memory);
596
597 if (can_reshape) {
598 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
599 phase->is_IterGVN()->set_delay_transform(true);
600 }
601
602 Node* backward_ctl = phase->C->top();
603 Node* forward_ctl = phase->C->top();
604 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
605
606 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
607 in_mem,
608 atp_src, atp_dest,
609 adr_src, base_src, adr_dest, base_dest,
610 copy_type, value_type, count);
611
612 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
613 in_mem,
614 atp_src, atp_dest,
615 adr_src, base_src, adr_dest, base_dest,
616 copy_type, value_type, count);
617
618 Node* ctl = nullptr;
619 if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
620 ctl = new RegionNode(3);
621 ctl->init_req(1, forward_ctl);
622 ctl->init_req(2, backward_ctl);
623 ctl = phase->transform(ctl);
624 MergeMemNode* forward_mm = forward_mem->as_MergeMem();
625 MergeMemNode* backward_mm = backward_mem->as_MergeMem();
626 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
627 if (mms.memory() != mms.memory2()) {
628 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
629 phi->init_req(1, mms.memory());
630 phi->init_req(2, mms.memory2());
631 phi = phase->transform(phi);
632 mms.set_memory(phi);
633 }
634 }
635 mem = forward_mem;
636 } else if (!forward_ctl->is_top()) {
637 ctl = forward_ctl;
638 mem = forward_mem;
639 } else {
640 assert(!backward_ctl->is_top(), "no copy?");
641 ctl = backward_ctl;
642 mem = backward_mem;
643 }
644
645 if (can_reshape) {
646 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
647 phase->is_IterGVN()->set_delay_transform(false);
648 }
649
650 if (!finish_transform(phase, can_reshape, ctl, mem)) {
651 if (can_reshape) {
652 // put in worklist, so that if it happens to be dead it is removed
653 phase->is_IterGVN()->_worklist.push(mem);
654 }
655 return nullptr;
656 }
657
658 return mem;
659 }
660
661 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
662 Node* dest = in(ArrayCopyNode::Dest);
663 if (dest->is_top()) {
664 return false;
665 }
666 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
667 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
668 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
669 _src_type->is_known_instance(), "result of EA not recorded");
670
671 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
730 // write between offset_lo and offset_hi
731 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
732 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
733
734 Node* dest = in(Dest);
735 Node* dest_pos = in(DestPos);
736 Node* len = in(Length);
737
738 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
739 const TypeInt *len_t = phase->type(len)->isa_int();
740 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
741
742 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
743 return !must_modify;
744 }
745
746 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
747 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
748
749 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
750 uint elemsize = type2aelembytes(ary_elem);
751
752 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
753 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
754 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
755 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
756
757 if (must_modify) {
758 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
759 return true;
760 }
761 } else {
762 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
763 return true;
764 }
765 }
766 return false;
767 }
768
769 // As an optimization, choose optimum vector size for copy length known at compile time.
770 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciFlatArrayKlass.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
30 #include "gc/shared/gc_globals.hpp"
31 #include "opto/arraycopynode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/inlinetypenode.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "utilities/macros.hpp"
36 #include "utilities/powerOfTwo.hpp"
37
38 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
39 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
40 _kind(None),
41 _alloc_tightly_coupled(alloc_tightly_coupled),
42 _has_negative_length_guard(has_negative_length_guard),
43 _arguments_validated(false),
44 _src_type(TypeOopPtr::BOTTOM),
45 _dest_type(TypeOopPtr::BOTTOM) {
46 init_class_id(Class_ArrayCopy);
47 init_flags(Flag_is_macro);
48 C->add_macro_node(this);
49 }
50
51 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
52
53 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
99 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
100 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
101 }
102 #endif
103
104 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
105 // check that length is constant
106 Node* length = in(ArrayCopyNode::Length);
107 const Type* length_type = phase->type(length);
108
109 if (length_type == Type::TOP) {
110 return -1;
111 }
112
113 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
114
115 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
116 }
117
118 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
119 if (is_clonebasic()) {
120 Node* src = in(ArrayCopyNode::Src);
121 const Type* src_type = phase->type(src);
122
123 if (src_type == Type::TOP) {
124 return -1;
125 }
126
127 if (src_type->isa_instptr()) {
128 const TypeInstPtr* inst_src = src_type->is_instptr();
129 ciInstanceKlass* ik = inst_src->instance_klass();
130 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
131 // fields into account. They are rare anyway so easier to simply
132 // skip instances with injected fields.
133 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
134 return -1;
135 }
136 int nb_fields = ik->nof_nonstatic_fields();
137 return nb_fields;
138 } else {
139 const TypeAryPtr* ary_src = src_type->isa_aryptr();
140 assert (ary_src != nullptr, "not an array or instance?");
141 // clone passes a length as a rounded number of longs. If we're
142 // cloning an array we'll do it element by element. If the
143 // length of the input array is constant, ArrayCopyNode::Length
144 // must be too. Note that the opposite does not need to hold,
145 // because different input array lengths (e.g. int arrays with
146 // 3 or 4 elements) might lead to the same length input
147 // (e.g. 2 double-words).
148 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
149 (UseFlatArray && ary_src->elem()->make_oopptr() != nullptr && ary_src->elem()->make_oopptr()->can_be_inline_type()) ||
150 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
151 if (ary_src->size()->is_con()) {
152 return ary_src->size()->get_con();
153 }
154 return -1;
155 }
156 }
157
158 return get_length_if_constant(phase);
159 }
160
161 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
162 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
163 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
164 // above runtime checks that guarantee it is within bounds.
165 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
166 C2AccessValuePtr addr(adr, adr_type);
167 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
168 Node* res = bs->load_at(access, type);
169 ctl = access.ctl();
183 }
184
185
186 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
187 if (!is_clonebasic()) {
188 return nullptr;
189 }
190
191 Node* base_src = in(ArrayCopyNode::Src);
192 Node* base_dest = in(ArrayCopyNode::Dest);
193 Node* ctl = in(TypeFunc::Control);
194 Node* in_mem = in(TypeFunc::Memory);
195
196 const Type* src_type = phase->type(base_src);
197 const TypeInstPtr* inst_src = src_type->isa_instptr();
198 if (inst_src == nullptr) {
199 return nullptr;
200 }
201
202 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
203 phase->record_for_igvn(mem);
204 if (can_reshape) {
205 phase->is_IterGVN()->_worklist.push(mem);
206 }
207
208
209 ciInstanceKlass* ik = inst_src->instance_klass();
210
211 if (!inst_src->klass_is_exact()) {
212 assert(!ik->is_interface(), "inconsistent klass hierarchy");
213 if (ik->has_subklass()) {
214 // Concurrent class loading.
215 // Fail fast and return NodeSentinel to indicate that the transform failed.
216 return NodeSentinel;
217 } else {
218 phase->C->dependencies()->assert_leaf_type(ik);
219 }
220 }
221
222 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
223
270 Node* src_offset = in(ArrayCopyNode::SrcPos);
271 Node* dest_offset = in(ArrayCopyNode::DestPos);
272
273 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
274 const Type* dest_type = phase->type(base_dest);
275 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
276
277 // newly allocated object is guaranteed to not overlap with source object
278 disjoint_bases = is_alloc_tightly_coupled();
279 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
280 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
281 // We don't know if arguments are arrays
282 return false;
283 }
284
285 BasicType src_elem = ary_src->elem()->array_element_basic_type();
286 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
287 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
288 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
289
290 if (src_elem != dest_elem || ary_src->is_flat() != ary_dest->is_flat() || dest_elem == T_VOID) {
291 // We don't know if arguments are arrays of the same type
292 return false;
293 }
294
295 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
296 if ((!ary_dest->is_flat() && bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) ||
297 (ary_dest->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
298 bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), T_OBJECT, false, false, BarrierSetC2::Optimization))) {
299 // It's an object array copy but we can't emit the card marking that is needed
300 return false;
301 }
302
303 value_type = ary_src->elem();
304
305 uint shift = exact_log2(type2aelembytes(dest_elem));
306 if (ary_dest->is_flat()) {
307 shift = ary_src->flat_log_elem_size();
308 }
309 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
310
311 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
312 if (src_offset->is_top()) {
313 // Offset is out of bounds (the ArrayCopyNode will be removed)
314 return false;
315 }
316 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
317 if (dest_offset->is_top()) {
318 // Offset is out of bounds (the ArrayCopyNode will be removed)
319 if (can_reshape) {
320 // record src_offset, so it can be deleted later (if it is dead)
321 phase->is_IterGVN()->_worklist.push(src_offset);
322 }
323 return false;
324 }
325
326 Node* hook = new Node(1);
327 hook->init_req(0, dest_offset);
328
329 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
330
331 hook->destruct(phase);
332
333 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
334
335 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
336 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
337
338 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
339 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
340
341 copy_type = dest_elem;
342 } else {
343 assert(ary_src != nullptr, "should be a clone");
344 assert(is_clonebasic(), "should be");
345
346 disjoint_bases = true;
347
348 if (ary_src->elem()->make_oopptr() != nullptr &&
349 ary_src->elem()->make_oopptr()->can_be_inline_type()) {
350 return false;
351 }
352
353 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
354 if (is_reference_type(elem, true)) {
355 elem = T_OBJECT;
356 }
357
358 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
359 if ((!ary_src->is_flat() && bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) ||
360 (ary_src->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
361 bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization))) {
362 // It's an object array copy but we can't emit the card marking that is needed
363 return false;
364 }
365
366 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
367 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
368
369 // The address is offsetted to an aligned address where a raw copy would start.
370 // If the clone copy is decomposed into load-stores - the address is adjusted to
371 // point at where the array starts.
372 const Type* toff = phase->type(src_offset);
373 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
374 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
375 assert(diff >= 0, "clone should not start after 1st array element");
376 if (diff > 0) {
377 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
378 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
379 }
380 copy_type = elem;
381 value_type = ary_src->elem();
382 }
383 return true;
384 }
385
386 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
387 if (atp == TypeOopPtr::BOTTOM) {
388 atp = phase->type(n)->isa_ptr();
389 }
390 // adjust atp to be the correct array element address type
391 return atp->add_offset(Type::OffsetBot)->is_aryptr();
392 }
393
394 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) {
395 Node* ctl = kit.control();
396 if (!disjoint_bases && count > 1) {
397 PhaseGVN& gvn = kit.gvn();
398 Node* src_offset = in(ArrayCopyNode::SrcPos);
399 Node* dest_offset = in(ArrayCopyNode::DestPos);
400 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
401 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset));
402 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
403 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
404
405 gvn.transform(iff);
406
407 kit.set_control(gvn.transform(new IfFalseNode(iff)));
408 backward_ctl = gvn.transform(new IfTrueNode(iff));
409 }
410 }
411
412 void ArrayCopyNode::copy(GraphKit& kit,
413 const TypeAryPtr* atp_src,
414 const TypeAryPtr* atp_dest,
415 int i,
416 Node* base_src,
417 Node* base_dest,
418 Node* adr_src,
419 Node* adr_dest,
420 BasicType copy_type,
421 const Type* value_type) {
422 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
423 Node* ctl = kit.control();
424 if (atp_dest->is_flat()) {
425 ciInlineKlass* vk = atp_src->elem()->inline_klass();
426 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) {
427 ciField* field = vk->nonstatic_field_at(j);
428 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset();
429 Node* off = kit.MakeConX(off_in_vt + i * atp_src->flat_elem_size());
430 ciType* ft = field->type();
431 BasicType bt = type2field[ft->basic_type()];
432 assert(!field->is_flat(), "flat field encountered");
433 const Type* rt = Type::get_const_type(ft);
434 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
435 assert(!bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), bt, false, false, BarrierSetC2::Optimization), "GC barriers required");
436 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
437 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
438 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, adr_type, rt, bt);
439 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, adr_type, v, rt, bt);
440 }
441 } else {
442 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i);
443 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
444 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
445 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, atp_src, value_type, copy_type);
446 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, atp_dest, v, value_type, copy_type);
447 }
448 kit.set_control(ctl);
449 }
450
451
452 void ArrayCopyNode::array_copy_forward(GraphKit& kit,
453 bool can_reshape,
454 const TypeAryPtr* atp_src,
455 const TypeAryPtr* atp_dest,
456 Node* adr_src,
457 Node* base_src,
458 Node* adr_dest,
459 Node* base_dest,
460 BasicType copy_type,
461 const Type* value_type,
462 int count) {
463 if (!kit.stopped()) {
464 // copy forward
465 if (count > 0) {
466 for (int i = 0; i < count; i++) {
467 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
468 }
469 } else if (can_reshape) {
470 PhaseGVN& gvn = kit.gvn();
471 assert(gvn.is_IterGVN(), "");
472 gvn.record_for_igvn(adr_src);
473 gvn.record_for_igvn(adr_dest);
474 }
475 }
476 }
477
478 void ArrayCopyNode::array_copy_backward(GraphKit& kit,
479 bool can_reshape,
480 const TypeAryPtr* atp_src,
481 const TypeAryPtr* atp_dest,
482 Node* adr_src,
483 Node* base_src,
484 Node* adr_dest,
485 Node* base_dest,
486 BasicType copy_type,
487 const Type* value_type,
488 int count) {
489 if (!kit.stopped()) {
490 // copy backward
491 PhaseGVN& gvn = kit.gvn();
492
493 if (count > 0) {
494 for (int i = count-1; i >= 0; i--) {
495 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
496 }
497 } else if(can_reshape) {
498 PhaseGVN& gvn = kit.gvn();
499 assert(gvn.is_IterGVN(), "");
500 gvn.record_for_igvn(adr_src);
501 gvn.record_for_igvn(adr_dest);
502 }
503 }
504 }
505
506 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
507 Node* ctl, Node *mem) {
508 if (can_reshape) {
509 PhaseIterGVN* igvn = phase->is_IterGVN();
510 igvn->set_delay_transform(false);
511 if (is_clonebasic()) {
512 Node* out_mem = proj_out(TypeFunc::Memory);
513
514 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
515 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
516 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
517 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
518 return false;
519 }
520
521 igvn->replace_node(out_mem->raw_out(0), mem);
522
523 Node* out_ctl = proj_out(TypeFunc::Control);
524 igvn->replace_node(out_ctl, ctl);
525 } else {
526 // replace fallthrough projections of the ArrayCopyNode by the
527 // new memory, control and the input IO.
528 CallProjections* callprojs = extract_projections(true, false);
529
530 if (callprojs->fallthrough_ioproj != nullptr) {
531 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O));
532 }
533 if (callprojs->fallthrough_memproj != nullptr) {
534 igvn->replace_node(callprojs->fallthrough_memproj, mem);
535 }
536 if (callprojs->fallthrough_catchproj != nullptr) {
537 igvn->replace_node(callprojs->fallthrough_catchproj, ctl);
538 }
539
540 // The ArrayCopyNode is not disconnected. It still has the
541 // projections for the exception case. Replace current
542 // ArrayCopyNode with a dummy new one with a top() control so
543 // that this part of the graph stays consistent but is
544 // eventually removed.
545
546 set_req(0, phase->C->top());
547 remove_dead_region(phase, can_reshape);
548 }
549 } else {
550 if (in(TypeFunc::Control) != ctl) {
551 // we can't return new memory and control from Ideal at parse time
552 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
553 phase->record_for_igvn(this);
554 return false;
555 }
556 }
557 return true;
558 }
559
560
561 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
562 // Perform any generic optimizations first
563 Node* result = SafePointNode::Ideal(phase, can_reshape);
564 if (result != nullptr) {
565 return result;
566 }
567
568 if (StressArrayCopyMacroNode && !can_reshape) {
569 phase->record_for_igvn(this);
570 return nullptr;
571 }
572
573 // See if it's a small array copy and we can inline it as
574 // loads/stores
575 // Here we can only do:
576 // - arraycopy if all arguments were validated before and we don't
577 // need card marking
578 // - clone for which we don't need to do card marking
579
580 if (!is_clonebasic() && !is_arraycopy_validated() &&
581 !is_copyofrange_validated() && !is_copyof_validated()) {
582 return nullptr;
583 }
584
585 assert(in(TypeFunc::Control) != nullptr &&
586 in(TypeFunc::Memory) != nullptr &&
588 in(ArrayCopyNode::Dest) != nullptr &&
589 in(ArrayCopyNode::Length) != nullptr &&
590 in(ArrayCopyNode::SrcPos) != nullptr &&
591 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
592
593 if (in(TypeFunc::Control)->is_top() ||
594 in(TypeFunc::Memory)->is_top() ||
595 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
596 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
597 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
598 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
599 return nullptr;
600 }
601
602 int count = get_count(phase);
603
604 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
605 return nullptr;
606 }
607
608 Node* src = in(ArrayCopyNode::Src);
609 Node* dest = in(ArrayCopyNode::Dest);
610 const Type* src_type = phase->type(src);
611 const Type* dest_type = phase->type(dest);
612
613 if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
614 // clone used for load of unknown inline type can't be optimized at
615 // this point
616 return nullptr;
617 }
618
619 Node* mem = try_clone_instance(phase, can_reshape, count);
620 if (mem != nullptr) {
621 return (mem == NodeSentinel) ? nullptr : mem;
622 }
623
624 Node* adr_src = nullptr;
625 Node* base_src = nullptr;
626 Node* adr_dest = nullptr;
627 Node* base_dest = nullptr;
628 BasicType copy_type = T_ILLEGAL;
629 const Type* value_type = nullptr;
630 bool disjoint_bases = false;
631
632 if (!prepare_array_copy(phase, can_reshape,
633 adr_src, base_src, adr_dest, base_dest,
634 copy_type, value_type, disjoint_bases)) {
635 assert(adr_src == nullptr, "no node can be left behind");
636 assert(adr_dest == nullptr, "no node can be left behind");
637 return nullptr;
638 }
639
640 JVMState* new_jvms = nullptr;
641 SafePointNode* new_map = nullptr;
642 if (!is_clonebasic()) {
643 new_jvms = jvms()->clone_shallow(phase->C);
644 new_map = new SafePointNode(req(), new_jvms);
645 for (uint i = TypeFunc::FramePtr; i < req(); i++) {
646 new_map->init_req(i, in(i));
647 }
648 new_jvms->set_map(new_map);
649 } else {
650 new_jvms = new (phase->C) JVMState(0);
651 new_map = new SafePointNode(TypeFunc::Parms, new_jvms);
652 new_jvms->set_map(new_map);
653 }
654 new_map->set_control(in(TypeFunc::Control));
655 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
656 new_map->set_i_o(in(TypeFunc::I_O));
657 phase->record_for_igvn(new_map);
658
659 const TypeAryPtr* atp_src = get_address_type(phase, _src_type, src);
660 const TypeAryPtr* atp_dest = get_address_type(phase, _dest_type, dest);
661
662 if (can_reshape) {
663 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
664 phase->is_IterGVN()->set_delay_transform(true);
665 }
666
667 GraphKit kit(new_jvms, phase);
668
669 SafePointNode* backward_map = nullptr;
670 SafePointNode* forward_map = nullptr;
671 Node* backward_ctl = phase->C->top();
672
673 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl);
674
675 {
676 PreserveJVMState pjvms(&kit);
677
678 array_copy_forward(kit, can_reshape,
679 atp_src, atp_dest,
680 adr_src, base_src, adr_dest, base_dest,
681 copy_type, value_type, count);
682
683 forward_map = kit.stop();
684 }
685
686 kit.set_control(backward_ctl);
687 array_copy_backward(kit, can_reshape,
688 atp_src, atp_dest,
689 adr_src, base_src, adr_dest, base_dest,
690 copy_type, value_type, count);
691
692 backward_map = kit.stop();
693
694 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) {
695 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?");
696 Node* ctl = new RegionNode(3);
697 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM);
698 kit.set_map(forward_map);
699 ctl->init_req(1, kit.control());
700 mem->init_req(1, kit.reset_memory());
701 kit.set_map(backward_map);
702 ctl->init_req(2, kit.control());
703 mem->init_req(2, kit.reset_memory());
704 kit.set_control(phase->transform(ctl));
705 kit.set_all_memory(phase->transform(mem));
706 } else if (!forward_map->control()->is_top()) {
707 kit.set_map(forward_map);
708 } else {
709 assert(!backward_map->control()->is_top(), "no copy?");
710 kit.set_map(backward_map);
711 }
712
713 if (can_reshape) {
714 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
715 phase->is_IterGVN()->set_delay_transform(false);
716 }
717
718 mem = kit.map()->memory();
719 if (!finish_transform(phase, can_reshape, kit.control(), mem)) {
720 if (!can_reshape) {
721 phase->record_for_igvn(this);
722 } else {
723 // put in worklist, so that if it happens to be dead it is removed
724 phase->is_IterGVN()->_worklist.push(mem);
725 }
726 return nullptr;
727 }
728
729 return mem;
730 }
731
732 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
733 Node* dest = in(ArrayCopyNode::Dest);
734 if (dest->is_top()) {
735 return false;
736 }
737 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
738 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
739 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
740 _src_type->is_known_instance(), "result of EA not recorded");
741
742 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
801 // write between offset_lo and offset_hi
802 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
803 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
804
805 Node* dest = in(Dest);
806 Node* dest_pos = in(DestPos);
807 Node* len = in(Length);
808
809 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
810 const TypeInt *len_t = phase->type(len)->isa_int();
811 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
812
813 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
814 return !must_modify;
815 }
816
817 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
818 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
819
820 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
821 uint elemsize = ary_t->is_flat() ? ary_t->flat_elem_size() : type2aelembytes(ary_elem);
822
823 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
824 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
825 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
826 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
827
828 if (must_modify) {
829 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
830 return true;
831 }
832 } else {
833 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
834 return true;
835 }
836 }
837 return false;
838 }
839
840 // As an optimization, choose optimum vector size for copy length known at compile time.
841 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
|