5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
28 #include "gc/shared/gc_globals.hpp"
29 #include "opto/arraycopynode.hpp"
30 #include "opto/graphKit.hpp"
31 #include "utilities/powerOfTwo.hpp"
32
33 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
34
35 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
36 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
37 _kind(None),
38 _alloc_tightly_coupled(alloc_tightly_coupled),
39 _has_negative_length_guard(has_negative_length_guard),
40 _arguments_validated(false),
41 _src_type(TypeOopPtr::BOTTOM),
42 _dest_type(TypeOopPtr::BOTTOM) {
43 init_class_id(Class_ArrayCopy);
44 init_flags(Flag_is_macro);
45 C->add_macro_node(this);
46 }
47
48 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
49
50 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
96 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
97 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
98 }
99 #endif
100
101 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
102 // check that length is constant
103 Node* length = in(ArrayCopyNode::Length);
104 const Type* length_type = phase->type(length);
105
106 if (length_type == Type::TOP) {
107 return -1;
108 }
109
110 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
111
112 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
113 }
114
115 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
116 Node* src = in(ArrayCopyNode::Src);
117 const Type* src_type = phase->type(src);
118
119 if (is_clonebasic()) {
120 if (src_type->isa_instptr()) {
121 const TypeInstPtr* inst_src = src_type->is_instptr();
122 ciInstanceKlass* ik = inst_src->instance_klass();
123 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
124 // fields into account. They are rare anyway so easier to simply
125 // skip instances with injected fields.
126 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
127 return -1;
128 }
129 int nb_fields = ik->nof_nonstatic_fields();
130 return nb_fields;
131 } else {
132 const TypeAryPtr* ary_src = src_type->isa_aryptr();
133 assert (ary_src != nullptr, "not an array or instance?");
134 // clone passes a length as a rounded number of longs. If we're
135 // cloning an array we'll do it element by element. If the
136 // length of the input array is constant, ArrayCopyNode::Length
137 // must be too. Note that the opposite does not need to hold,
138 // because different input array lengths (e.g. int arrays with
139 // 3 or 4 elements) might lead to the same length input
140 // (e.g. 2 double-words).
141 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
142 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
143 if (ary_src->size()->is_con()) {
144 return ary_src->size()->get_con();
145 }
146 return -1;
147 }
148 }
149
150 return get_length_if_constant(phase);
151 }
152
153 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
154 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
155 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
156 // above runtime checks that guarantee it is within bounds.
157 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
158 C2AccessValuePtr addr(adr, adr_type);
159 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
160 Node* res = bs->load_at(access, type);
161 ctl = access.ctl();
175 }
176
177
178 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
179 if (!is_clonebasic()) {
180 return nullptr;
181 }
182
183 Node* base_src = in(ArrayCopyNode::Src);
184 Node* base_dest = in(ArrayCopyNode::Dest);
185 Node* ctl = in(TypeFunc::Control);
186 Node* in_mem = in(TypeFunc::Memory);
187
188 const Type* src_type = phase->type(base_src);
189 const TypeInstPtr* inst_src = src_type->isa_instptr();
190 if (inst_src == nullptr) {
191 return nullptr;
192 }
193
194 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
195 if (can_reshape) {
196 phase->is_IterGVN()->_worklist.push(mem);
197 }
198
199
200 ciInstanceKlass* ik = inst_src->instance_klass();
201
202 if (!inst_src->klass_is_exact()) {
203 assert(!ik->is_interface(), "inconsistent klass hierarchy");
204 if (ik->has_subklass()) {
205 // Concurrent class loading.
206 // Fail fast and return NodeSentinel to indicate that the transform failed.
207 return NodeSentinel;
208 } else {
209 phase->C->dependencies()->assert_leaf_type(ik);
210 }
211 }
212
213 const TypeInstPtr* dest_type = phase->type(base_dest)->is_instptr();
214 if (dest_type->instance_klass() != ik) {
274 Node* src_offset = in(ArrayCopyNode::SrcPos);
275 Node* dest_offset = in(ArrayCopyNode::DestPos);
276
277 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
278 const Type* dest_type = phase->type(base_dest);
279 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
280
281 // newly allocated object is guaranteed to not overlap with source object
282 disjoint_bases = is_alloc_tightly_coupled();
283 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
284 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
285 // We don't know if arguments are arrays
286 return false;
287 }
288
289 BasicType src_elem = ary_src->elem()->array_element_basic_type();
290 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
291 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
292 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
293
294 if (src_elem != dest_elem || dest_elem == T_VOID) {
295 // We don't know if arguments are arrays of the same type
296 return false;
297 }
298
299 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
300 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) {
301 // It's an object array copy but we can't emit the card marking
302 // that is needed
303 return false;
304 }
305
306 value_type = ary_src->elem();
307
308 uint shift = exact_log2(type2aelembytes(dest_elem));
309 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
310
311 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
312 if (src_offset->is_top()) {
313 // Offset is out of bounds (the ArrayCopyNode will be removed)
314 return false;
315 }
316 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
317 if (dest_offset->is_top()) {
318 // Offset is out of bounds (the ArrayCopyNode will be removed)
319 if (can_reshape) {
320 // record src_offset, so it can be deleted later (if it is dead)
321 phase->is_IterGVN()->_worklist.push(src_offset);
322 }
323 return false;
324 }
325
326 Node* hook = new Node(1);
327 hook->init_req(0, dest_offset);
328
329 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
330
331 hook->destruct(phase);
332
333 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
334
335 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
336 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
337
338 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
339 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
340
341 copy_type = dest_elem;
342 } else {
343 assert(ary_src != nullptr, "should be a clone");
344 assert(is_clonebasic(), "should be");
345
346 disjoint_bases = true;
347
348 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
349 if (is_reference_type(elem, true)) {
350 elem = T_OBJECT;
351 }
352
353 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
354 if (bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) {
355 return false;
356 }
357
358 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
359 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
360
361 // The address is offsetted to an aligned address where a raw copy would start.
362 // If the clone copy is decomposed into load-stores - the address is adjusted to
363 // point at where the array starts.
364 const Type* toff = phase->type(src_offset);
365 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
366 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
367 assert(diff >= 0, "clone should not start after 1st array element");
368 if (diff > 0) {
369 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
370 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
371 }
372 copy_type = elem;
373 value_type = ary_src->elem();
374 }
375 return true;
376 }
377
378 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
379 if (atp == TypeOopPtr::BOTTOM) {
380 atp = phase->type(n)->isa_ptr();
381 }
382 // adjust atp to be the correct array element address type
383 return atp->add_offset(Type::OffsetBot);
384 }
385
386 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
387 Node* ctl = in(TypeFunc::Control);
388 if (!disjoint_bases && count > 1) {
389 Node* src_offset = in(ArrayCopyNode::SrcPos);
390 Node* dest_offset = in(ArrayCopyNode::DestPos);
391 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
392 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
393 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
394 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
395
396 phase->transform(iff);
397
398 forward_ctl = phase->transform(new IfFalseNode(iff));
399 backward_ctl = phase->transform(new IfTrueNode(iff));
400 } else {
401 forward_ctl = ctl;
402 }
403 }
404
405 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
406 bool can_reshape,
407 Node*& forward_ctl,
408 Node* mem,
409 const TypePtr* atp_src,
410 const TypePtr* atp_dest,
411 Node* adr_src,
412 Node* base_src,
413 Node* adr_dest,
414 Node* base_dest,
415 BasicType copy_type,
416 const Type* value_type,
417 int count) {
418 if (!forward_ctl->is_top()) {
419 // copy forward
420 MergeMemNode* mm = MergeMemNode::make(mem);
421
422 if (count > 0) {
423 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
424 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
425 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
426 for (int i = 1; i < count; i++) {
427 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
428 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
429 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
430 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
431 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
432 }
433 } else if (can_reshape) {
434 PhaseIterGVN* igvn = phase->is_IterGVN();
435 igvn->_worklist.push(adr_src);
436 igvn->_worklist.push(adr_dest);
437 }
438 return mm;
439 }
440 return phase->C->top();
441 }
442
443 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
444 bool can_reshape,
445 Node*& backward_ctl,
446 Node* mem,
447 const TypePtr* atp_src,
448 const TypePtr* atp_dest,
449 Node* adr_src,
450 Node* base_src,
451 Node* adr_dest,
452 Node* base_dest,
453 BasicType copy_type,
454 const Type* value_type,
455 int count) {
456 if (!backward_ctl->is_top()) {
457 // copy backward
458 MergeMemNode* mm = MergeMemNode::make(mem);
459
460 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
461 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
462
463 if (count > 0) {
464 for (int i = count-1; i >= 1; i--) {
465 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
466 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
467 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
468 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
469 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
470 }
471 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
472 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
473 } else if (can_reshape) {
474 PhaseIterGVN* igvn = phase->is_IterGVN();
475 igvn->_worklist.push(adr_src);
476 igvn->_worklist.push(adr_dest);
477 }
478 return phase->transform(mm);
479 }
480 return phase->C->top();
481 }
482
483 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
484 Node* ctl, Node *mem) {
485 if (can_reshape) {
486 PhaseIterGVN* igvn = phase->is_IterGVN();
487 igvn->set_delay_transform(false);
488 if (is_clonebasic()) {
489 Node* out_mem = proj_out(TypeFunc::Memory);
490
491 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
492 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
493 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
494 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
495 return false;
496 }
497
498 igvn->replace_node(out_mem->raw_out(0), mem);
499
500 Node* out_ctl = proj_out(TypeFunc::Control);
501 igvn->replace_node(out_ctl, ctl);
502 } else {
503 // replace fallthrough projections of the ArrayCopyNode by the
504 // new memory, control and the input IO.
505 CallProjections callprojs;
506 extract_projections(&callprojs, true, false);
507
508 if (callprojs.fallthrough_ioproj != nullptr) {
509 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
510 }
511 if (callprojs.fallthrough_memproj != nullptr) {
512 igvn->replace_node(callprojs.fallthrough_memproj, mem);
513 }
514 if (callprojs.fallthrough_catchproj != nullptr) {
515 igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
516 }
517
518 // The ArrayCopyNode is not disconnected. It still has the
519 // projections for the exception case. Replace current
520 // ArrayCopyNode with a dummy new one with a top() control so
521 // that this part of the graph stays consistent but is
522 // eventually removed.
523
524 set_req(0, phase->C->top());
525 remove_dead_region(phase, can_reshape);
526 }
527 } else {
528 if (in(TypeFunc::Control) != ctl) {
529 // we can't return new memory and control from Ideal at parse time
530 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
531 phase->record_for_igvn(this);
532 return false;
533 }
534 }
535 return true;
536 }
537
538
539 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
540 if (remove_dead_region(phase, can_reshape)) return this;
541
542 if (StressArrayCopyMacroNode && !can_reshape) {
543 phase->record_for_igvn(this);
544 return nullptr;
545 }
546
547 // See if it's a small array copy and we can inline it as
548 // loads/stores
549 // Here we can only do:
550 // - arraycopy if all arguments were validated before and we don't
551 // need card marking
552 // - clone for which we don't need to do card marking
553
554 if (!is_clonebasic() && !is_arraycopy_validated() &&
555 !is_copyofrange_validated() && !is_copyof_validated()) {
556 return nullptr;
557 }
558
559 assert(in(TypeFunc::Control) != nullptr &&
560 in(TypeFunc::Memory) != nullptr &&
562 in(ArrayCopyNode::Dest) != nullptr &&
563 in(ArrayCopyNode::Length) != nullptr &&
564 in(ArrayCopyNode::SrcPos) != nullptr &&
565 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
566
567 if (in(TypeFunc::Control)->is_top() ||
568 in(TypeFunc::Memory)->is_top() ||
569 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
570 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
571 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
572 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
573 return nullptr;
574 }
575
576 int count = get_count(phase);
577
578 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
579 return nullptr;
580 }
581
582 Node* mem = try_clone_instance(phase, can_reshape, count);
583 if (mem != nullptr) {
584 return (mem == NodeSentinel) ? nullptr : mem;
585 }
586
587 Node* adr_src = nullptr;
588 Node* base_src = nullptr;
589 Node* adr_dest = nullptr;
590 Node* base_dest = nullptr;
591 BasicType copy_type = T_ILLEGAL;
592 const Type* value_type = nullptr;
593 bool disjoint_bases = false;
594
595 if (!prepare_array_copy(phase, can_reshape,
596 adr_src, base_src, adr_dest, base_dest,
597 copy_type, value_type, disjoint_bases)) {
598 assert(adr_src == nullptr, "no node can be left behind");
599 assert(adr_dest == nullptr, "no node can be left behind");
600 return nullptr;
601 }
602
603 Node* src = in(ArrayCopyNode::Src);
604 Node* dest = in(ArrayCopyNode::Dest);
605 const TypePtr* atp_src = get_address_type(phase, _src_type, src);
606 const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
607 Node* in_mem = in(TypeFunc::Memory);
608
609 if (can_reshape) {
610 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
611 phase->is_IterGVN()->set_delay_transform(true);
612 }
613
614 Node* backward_ctl = phase->C->top();
615 Node* forward_ctl = phase->C->top();
616 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
617
618 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
619 in_mem,
620 atp_src, atp_dest,
621 adr_src, base_src, adr_dest, base_dest,
622 copy_type, value_type, count);
623
624 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
625 in_mem,
626 atp_src, atp_dest,
627 adr_src, base_src, adr_dest, base_dest,
628 copy_type, value_type, count);
629
630 Node* ctl = nullptr;
631 if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
632 ctl = new RegionNode(3);
633 ctl->init_req(1, forward_ctl);
634 ctl->init_req(2, backward_ctl);
635 ctl = phase->transform(ctl);
636 MergeMemNode* forward_mm = forward_mem->as_MergeMem();
637 MergeMemNode* backward_mm = backward_mem->as_MergeMem();
638 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
639 if (mms.memory() != mms.memory2()) {
640 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
641 phi->init_req(1, mms.memory());
642 phi->init_req(2, mms.memory2());
643 phi = phase->transform(phi);
644 mms.set_memory(phi);
645 }
646 }
647 mem = forward_mem;
648 } else if (!forward_ctl->is_top()) {
649 ctl = forward_ctl;
650 mem = forward_mem;
651 } else {
652 assert(!backward_ctl->is_top(), "no copy?");
653 ctl = backward_ctl;
654 mem = backward_mem;
655 }
656
657 if (can_reshape) {
658 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
659 phase->is_IterGVN()->set_delay_transform(false);
660 }
661
662 if (!finish_transform(phase, can_reshape, ctl, mem)) {
663 if (can_reshape) {
664 // put in worklist, so that if it happens to be dead it is removed
665 phase->is_IterGVN()->_worklist.push(mem);
666 }
667 return nullptr;
668 }
669
670 return mem;
671 }
672
673 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
674 Node* dest = in(ArrayCopyNode::Dest);
675 if (dest->is_top()) {
676 return false;
677 }
678 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
679 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
680 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
681 _src_type->is_known_instance(), "result of EA not recorded");
682
683 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
740 // if must_modify is true, return true if the copy is guaranteed to
741 // write between offset_lo and offset_hi
742 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
743 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
744
745 Node* dest = in(Dest);
746 Node* dest_pos = in(DestPos);
747 Node* len = in(Length);
748
749 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
750 const TypeInt *len_t = phase->type(len)->isa_int();
751 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
752
753 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
754 return !must_modify;
755 }
756
757 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
758 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
759
760 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
761 uint elemsize = type2aelembytes(ary_elem);
762
763 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
764 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
765 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
766 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
767
768 if (must_modify) {
769 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
770 return true;
771 }
772 } else {
773 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
774 return true;
775 }
776 }
777 return false;
778 }
779
780 // As an optimization, choose the optimal vector size for bounded copy length
781 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, jlong max_len) {
782 assert(max_len > 0, JLONG_FORMAT, max_len);
783 // We only care whether max_size_in_bytes is not larger than 32, we also want to avoid
784 // multiplication overflow, so clamp max_len to [0, 64]
785 int max_size_in_bytes = MIN2<jlong>(max_len, 64) * type2aelembytes(type);
786 if (ArrayOperationPartialInlineSize > 16 && max_size_in_bytes <= 16) {
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/inlinetypenode.hpp"
33 #include "utilities/powerOfTwo.hpp"
34
35 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
36
37 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
38 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
39 _kind(None),
40 _alloc_tightly_coupled(alloc_tightly_coupled),
41 _has_negative_length_guard(has_negative_length_guard),
42 _arguments_validated(false),
43 _src_type(TypeOopPtr::BOTTOM),
44 _dest_type(TypeOopPtr::BOTTOM) {
45 init_class_id(Class_ArrayCopy);
46 init_flags(Flag_is_macro);
47 C->add_macro_node(this);
48 }
49
50 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
51
52 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
98 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
99 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
100 }
101 #endif
102
103 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
104 // check that length is constant
105 Node* length = in(ArrayCopyNode::Length);
106 const Type* length_type = phase->type(length);
107
108 if (length_type == Type::TOP) {
109 return -1;
110 }
111
112 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
113
114 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
115 }
116
117 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
118 if (is_clonebasic()) {
119 Node* src = in(ArrayCopyNode::Src);
120 const Type* src_type = phase->type(src);
121
122 if (src_type == Type::TOP) {
123 return -1;
124 }
125
126 if (src_type->isa_instptr()) {
127 const TypeInstPtr* inst_src = src_type->is_instptr();
128 ciInstanceKlass* ik = inst_src->instance_klass();
129 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
130 // fields into account. They are rare anyway so easier to simply
131 // skip instances with injected fields.
132 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
133 return -1;
134 }
135 int nb_fields = ik->nof_nonstatic_fields();
136 return nb_fields;
137 } else {
138 const TypeAryPtr* ary_src = src_type->isa_aryptr();
139 assert (ary_src != nullptr, "not an array or instance?");
140 // clone passes a length as a rounded number of longs. If we're
141 // cloning an array we'll do it element by element. If the
142 // length of the input array is constant, ArrayCopyNode::Length
143 // must be too. Note that the opposite does not need to hold,
144 // because different input array lengths (e.g. int arrays with
145 // 3 or 4 elements) might lead to the same length input
146 // (e.g. 2 double-words).
147 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
148 (UseArrayFlattening && ary_src->elem()->make_oopptr() != nullptr && ary_src->elem()->make_oopptr()->can_be_inline_type()) ||
149 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
150 if (ary_src->size()->is_con()) {
151 return ary_src->size()->get_con();
152 }
153 return -1;
154 }
155 }
156
157 return get_length_if_constant(phase);
158 }
159
160 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
161 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
162 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
163 // above runtime checks that guarantee it is within bounds.
164 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
165 C2AccessValuePtr addr(adr, adr_type);
166 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
167 Node* res = bs->load_at(access, type);
168 ctl = access.ctl();
182 }
183
184
185 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
186 if (!is_clonebasic()) {
187 return nullptr;
188 }
189
190 Node* base_src = in(ArrayCopyNode::Src);
191 Node* base_dest = in(ArrayCopyNode::Dest);
192 Node* ctl = in(TypeFunc::Control);
193 Node* in_mem = in(TypeFunc::Memory);
194
195 const Type* src_type = phase->type(base_src);
196 const TypeInstPtr* inst_src = src_type->isa_instptr();
197 if (inst_src == nullptr) {
198 return nullptr;
199 }
200
201 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
202 phase->record_for_igvn(mem);
203 if (can_reshape) {
204 phase->is_IterGVN()->_worklist.push(mem);
205 }
206
207
208 ciInstanceKlass* ik = inst_src->instance_klass();
209
210 if (!inst_src->klass_is_exact()) {
211 assert(!ik->is_interface(), "inconsistent klass hierarchy");
212 if (ik->has_subklass()) {
213 // Concurrent class loading.
214 // Fail fast and return NodeSentinel to indicate that the transform failed.
215 return NodeSentinel;
216 } else {
217 phase->C->dependencies()->assert_leaf_type(ik);
218 }
219 }
220
221 const TypeInstPtr* dest_type = phase->type(base_dest)->is_instptr();
222 if (dest_type->instance_klass() != ik) {
282 Node* src_offset = in(ArrayCopyNode::SrcPos);
283 Node* dest_offset = in(ArrayCopyNode::DestPos);
284
285 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
286 const Type* dest_type = phase->type(base_dest);
287 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
288
289 // newly allocated object is guaranteed to not overlap with source object
290 disjoint_bases = is_alloc_tightly_coupled();
291 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
292 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
293 // We don't know if arguments are arrays
294 return false;
295 }
296
297 BasicType src_elem = ary_src->elem()->array_element_basic_type();
298 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
299 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
300 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
301
302 // TODO 8350865 What about atomicity?
303 if (src_elem != dest_elem || ary_src->is_null_free() != ary_dest->is_null_free() || ary_src->is_flat() != ary_dest->is_flat() || dest_elem == T_VOID) {
304 // We don't know if arguments are arrays of the same type
305 return false;
306 }
307
308 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
309 if ((!ary_dest->is_flat() && bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) ||
310 (ary_dest->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
311 bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), T_OBJECT, false, false, BarrierSetC2::Optimization))) {
312 // It's an object array copy but we can't emit the card marking that is needed
313 return false;
314 }
315
316 value_type = ary_src->elem();
317
318 uint shift = exact_log2(type2aelembytes(dest_elem));
319 if (ary_dest->is_flat()) {
320 assert(ary_src->is_flat(), "src and dest must be flat");
321 shift = ary_src->flat_log_elem_size();
322 src_elem = T_FLAT_ELEMENT;
323 dest_elem = T_FLAT_ELEMENT;
324 }
325
326 const uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
327
328 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
329 if (src_offset->is_top()) {
330 // Offset is out of bounds (the ArrayCopyNode will be removed)
331 return false;
332 }
333 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
334 if (dest_offset->is_top()) {
335 // Offset is out of bounds (the ArrayCopyNode will be removed)
336 if (can_reshape) {
337 // record src_offset, so it can be deleted later (if it is dead)
338 phase->is_IterGVN()->_worklist.push(src_offset);
339 }
340 return false;
341 }
342
343 Node* hook = new Node(1);
344 hook->init_req(0, dest_offset);
345
346 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
347
348 hook->destruct(phase);
349
350 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
351
352 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
353 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
354
355 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
356 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
357
358 copy_type = dest_elem;
359 } else {
360 assert(ary_src != nullptr, "should be a clone");
361 assert(is_clonebasic(), "should be");
362
363 disjoint_bases = true;
364
365 if (ary_src->elem()->make_oopptr() != nullptr &&
366 ary_src->elem()->make_oopptr()->can_be_inline_type()) {
367 return false;
368 }
369
370 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
371 if (is_reference_type(elem, true)) {
372 elem = T_OBJECT;
373 }
374
375 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
376 if ((!ary_src->is_flat() && bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) ||
377 (ary_src->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
378 bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization))) {
379 // It's an object array copy but we can't emit the card marking that is needed
380 return false;
381 }
382
383 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
384 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
385
386 // The address is offsetted to an aligned address where a raw copy would start.
387 // If the clone copy is decomposed into load-stores - the address is adjusted to
388 // point at where the array starts.
389 const Type* toff = phase->type(src_offset);
390 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
391 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
392 assert(diff >= 0, "clone should not start after 1st array element");
393 if (diff > 0) {
394 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
395 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
396 }
397 copy_type = elem;
398 value_type = ary_src->elem();
399 }
400 return true;
401 }
402
403 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
404 if (atp == TypeOopPtr::BOTTOM) {
405 atp = phase->type(n)->isa_ptr();
406 }
407 // adjust atp to be the correct array element address type
408 return atp->add_offset(Type::OffsetBot)->is_aryptr();
409 }
410
411 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) {
412 Node* ctl = kit.control();
413 if (!disjoint_bases && count > 1) {
414 PhaseGVN& gvn = kit.gvn();
415 Node* src_offset = in(ArrayCopyNode::SrcPos);
416 Node* dest_offset = in(ArrayCopyNode::DestPos);
417 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
418 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset));
419 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
420 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
421
422 gvn.transform(iff);
423
424 kit.set_control(gvn.transform(new IfFalseNode(iff)));
425 backward_ctl = gvn.transform(new IfTrueNode(iff));
426 }
427 }
428
429 void ArrayCopyNode::copy(GraphKit& kit,
430 const TypeAryPtr* atp_src,
431 const TypeAryPtr* atp_dest,
432 int i,
433 Node* base_src,
434 Node* base_dest,
435 Node* adr_src,
436 Node* adr_dest,
437 BasicType copy_type,
438 const Type* value_type) {
439 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
440 Node* ctl = kit.control();
441 if (atp_dest->is_flat()) {
442 ciInlineKlass* vk = atp_src->elem()->inline_klass();
443 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) {
444 ciField* field = vk->nonstatic_field_at(j);
445 int off_in_vt = field->offset_in_bytes() - vk->payload_offset();
446 Node* off = kit.MakeConX(off_in_vt + i * atp_src->flat_elem_size());
447 ciType* ft = field->type();
448 BasicType bt = type2field[ft->basic_type()];
449 assert(!field->is_flat(), "flat field encountered");
450 const Type* rt = Type::get_const_type(ft);
451 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
452 assert(!bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), bt, false, false, BarrierSetC2::Optimization), "GC barriers required");
453 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
454 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
455 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, adr_type, rt, bt);
456 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, adr_type, v, rt, bt);
457 }
458 } else {
459 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i);
460 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
461 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
462 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, atp_src, value_type, copy_type);
463 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, atp_dest, v, value_type, copy_type);
464 }
465 kit.set_control(ctl);
466 }
467
468
469 void ArrayCopyNode::array_copy_forward(GraphKit& kit,
470 bool can_reshape,
471 const TypeAryPtr* atp_src,
472 const TypeAryPtr* atp_dest,
473 Node* adr_src,
474 Node* base_src,
475 Node* adr_dest,
476 Node* base_dest,
477 BasicType copy_type,
478 const Type* value_type,
479 int count) {
480 if (!kit.stopped()) {
481 // copy forward
482 if (count > 0) {
483 for (int i = 0; i < count; i++) {
484 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
485 }
486 } else if (can_reshape) {
487 PhaseGVN& gvn = kit.gvn();
488 assert(gvn.is_IterGVN(), "");
489 gvn.record_for_igvn(adr_src);
490 gvn.record_for_igvn(adr_dest);
491 }
492 }
493 }
494
495 void ArrayCopyNode::array_copy_backward(GraphKit& kit,
496 bool can_reshape,
497 const TypeAryPtr* atp_src,
498 const TypeAryPtr* atp_dest,
499 Node* adr_src,
500 Node* base_src,
501 Node* adr_dest,
502 Node* base_dest,
503 BasicType copy_type,
504 const Type* value_type,
505 int count) {
506 if (!kit.stopped()) {
507 // copy backward
508 PhaseGVN& gvn = kit.gvn();
509
510 if (count > 0) {
511 for (int i = count-1; i >= 0; i--) {
512 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
513 }
514 } else if(can_reshape) {
515 PhaseGVN& gvn = kit.gvn();
516 assert(gvn.is_IterGVN(), "");
517 gvn.record_for_igvn(adr_src);
518 gvn.record_for_igvn(adr_dest);
519 }
520 }
521 }
522
523 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
524 Node* ctl, Node *mem) {
525 if (can_reshape) {
526 PhaseIterGVN* igvn = phase->is_IterGVN();
527 igvn->set_delay_transform(false);
528 if (is_clonebasic()) {
529 Node* out_mem = proj_out(TypeFunc::Memory);
530
531 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
532 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
533 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
534 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
535 return false;
536 }
537
538 igvn->replace_node(out_mem->raw_out(0), mem);
539
540 Node* out_ctl = proj_out(TypeFunc::Control);
541 igvn->replace_node(out_ctl, ctl);
542 } else {
543 // replace fallthrough projections of the ArrayCopyNode by the
544 // new memory, control and the input IO.
545 CallProjections* callprojs = extract_projections(true, false);
546
547 if (callprojs->fallthrough_ioproj != nullptr) {
548 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O));
549 }
550 if (callprojs->fallthrough_memproj != nullptr) {
551 igvn->replace_node(callprojs->fallthrough_memproj, mem);
552 }
553 if (callprojs->fallthrough_catchproj != nullptr) {
554 igvn->replace_node(callprojs->fallthrough_catchproj, ctl);
555 }
556
557 // The ArrayCopyNode is not disconnected. It still has the
558 // projections for the exception case. Replace current
559 // ArrayCopyNode with a dummy new one with a top() control so
560 // that this part of the graph stays consistent but is
561 // eventually removed.
562
563 set_req(0, phase->C->top());
564 remove_dead_region(phase, can_reshape);
565 }
566 } else {
567 if (in(TypeFunc::Control) != ctl) {
568 // we can't return new memory and control from Ideal at parse time
569 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
570 phase->record_for_igvn(this);
571 return false;
572 }
573 }
574 return true;
575 }
576
577
578 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
579 // Perform any generic optimizations first
580 Node* result = SafePointNode::Ideal(phase, can_reshape);
581 if (result != nullptr) {
582 return result;
583 }
584
585 if (StressArrayCopyMacroNode && !can_reshape) {
586 phase->record_for_igvn(this);
587 return nullptr;
588 }
589
590 // See if it's a small array copy and we can inline it as
591 // loads/stores
592 // Here we can only do:
593 // - arraycopy if all arguments were validated before and we don't
594 // need card marking
595 // - clone for which we don't need to do card marking
596
597 if (!is_clonebasic() && !is_arraycopy_validated() &&
598 !is_copyofrange_validated() && !is_copyof_validated()) {
599 return nullptr;
600 }
601
602 assert(in(TypeFunc::Control) != nullptr &&
603 in(TypeFunc::Memory) != nullptr &&
605 in(ArrayCopyNode::Dest) != nullptr &&
606 in(ArrayCopyNode::Length) != nullptr &&
607 in(ArrayCopyNode::SrcPos) != nullptr &&
608 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
609
610 if (in(TypeFunc::Control)->is_top() ||
611 in(TypeFunc::Memory)->is_top() ||
612 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
613 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
614 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
615 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
616 return nullptr;
617 }
618
619 int count = get_count(phase);
620
621 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
622 return nullptr;
623 }
624
625 Node* src = in(ArrayCopyNode::Src);
626 Node* dest = in(ArrayCopyNode::Dest);
627 const Type* src_type = phase->type(src);
628 const Type* dest_type = phase->type(dest);
629
630 if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
631 // clone used for load of unknown inline type can't be optimized at
632 // this point
633 return nullptr;
634 }
635
636 Node* mem = try_clone_instance(phase, can_reshape, count);
637 if (mem != nullptr) {
638 return (mem == NodeSentinel) ? nullptr : mem;
639 }
640
641 Node* adr_src = nullptr;
642 Node* base_src = nullptr;
643 Node* adr_dest = nullptr;
644 Node* base_dest = nullptr;
645 BasicType copy_type = T_ILLEGAL;
646 const Type* value_type = nullptr;
647 bool disjoint_bases = false;
648
649 if (!prepare_array_copy(phase, can_reshape,
650 adr_src, base_src, adr_dest, base_dest,
651 copy_type, value_type, disjoint_bases)) {
652 assert(adr_src == nullptr, "no node can be left behind");
653 assert(adr_dest == nullptr, "no node can be left behind");
654 return nullptr;
655 }
656
657 JVMState* new_jvms = nullptr;
658 SafePointNode* new_map = nullptr;
659 if (!is_clonebasic()) {
660 new_jvms = jvms()->clone_shallow(phase->C);
661 new_map = new SafePointNode(req(), new_jvms);
662 for (uint i = TypeFunc::FramePtr; i < req(); i++) {
663 new_map->init_req(i, in(i));
664 }
665 new_jvms->set_map(new_map);
666 } else {
667 new_jvms = new (phase->C) JVMState(0);
668 new_map = new SafePointNode(TypeFunc::Parms, new_jvms);
669 new_jvms->set_map(new_map);
670 }
671 new_map->set_control(in(TypeFunc::Control));
672 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
673 new_map->set_i_o(in(TypeFunc::I_O));
674 phase->record_for_igvn(new_map);
675
676 const TypeAryPtr* atp_src = get_address_type(phase, _src_type, src);
677 const TypeAryPtr* atp_dest = get_address_type(phase, _dest_type, dest);
678
679 if (can_reshape) {
680 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
681 phase->is_IterGVN()->set_delay_transform(true);
682 }
683
684 GraphKit kit(new_jvms, phase);
685
686 SafePointNode* backward_map = nullptr;
687 SafePointNode* forward_map = nullptr;
688 Node* backward_ctl = phase->C->top();
689
690 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl);
691
692 {
693 PreserveJVMState pjvms(&kit);
694
695 array_copy_forward(kit, can_reshape,
696 atp_src, atp_dest,
697 adr_src, base_src, adr_dest, base_dest,
698 copy_type, value_type, count);
699
700 forward_map = kit.stop();
701 }
702
703 kit.set_control(backward_ctl);
704 array_copy_backward(kit, can_reshape,
705 atp_src, atp_dest,
706 adr_src, base_src, adr_dest, base_dest,
707 copy_type, value_type, count);
708
709 backward_map = kit.stop();
710
711 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) {
712 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?");
713 Node* ctl = new RegionNode(3);
714 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM);
715 kit.set_map(forward_map);
716 ctl->init_req(1, kit.control());
717 mem->init_req(1, kit.reset_memory());
718 kit.set_map(backward_map);
719 ctl->init_req(2, kit.control());
720 mem->init_req(2, kit.reset_memory());
721 kit.set_control(phase->transform(ctl));
722 kit.set_all_memory(phase->transform(mem));
723 } else if (!forward_map->control()->is_top()) {
724 kit.set_map(forward_map);
725 } else {
726 assert(!backward_map->control()->is_top(), "no copy?");
727 kit.set_map(backward_map);
728 }
729
730 if (can_reshape) {
731 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
732 phase->is_IterGVN()->set_delay_transform(false);
733 }
734
735 mem = kit.map()->memory();
736 if (!finish_transform(phase, can_reshape, kit.control(), mem)) {
737 if (!can_reshape) {
738 phase->record_for_igvn(this);
739 } else {
740 // put in worklist, so that if it happens to be dead it is removed
741 phase->is_IterGVN()->_worklist.push(mem);
742 }
743 return nullptr;
744 }
745
746 return mem;
747 }
748
749 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
750 Node* dest = in(ArrayCopyNode::Dest);
751 if (dest->is_top()) {
752 return false;
753 }
754 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
755 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
756 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
757 _src_type->is_known_instance(), "result of EA not recorded");
758
759 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
816 // if must_modify is true, return true if the copy is guaranteed to
817 // write between offset_lo and offset_hi
818 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
819 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
820
821 Node* dest = in(Dest);
822 Node* dest_pos = in(DestPos);
823 Node* len = in(Length);
824
825 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
826 const TypeInt *len_t = phase->type(len)->isa_int();
827 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
828
829 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
830 return !must_modify;
831 }
832
833 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
834 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
835
836 uint header;
837 uint elem_size;
838 if (ary_t->is_flat()) {
839 header = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT);
840 elem_size = ary_t->flat_elem_size();
841 } else {
842 header = arrayOopDesc::base_offset_in_bytes(ary_elem);
843 elem_size = type2aelembytes(ary_elem);
844 }
845
846 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elem_size + header;
847 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elem_size + header;
848 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elem_size + header;
849 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elem_size + header;
850
851 if (must_modify) {
852 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
853 return true;
854 }
855 } else {
856 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
857 return true;
858 }
859 }
860 return false;
861 }
862
863 // As an optimization, choose the optimal vector size for bounded copy length
864 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, jlong max_len) {
865 assert(max_len > 0, JLONG_FORMAT, max_len);
866 // We only care whether max_size_in_bytes is not larger than 32, we also want to avoid
867 // multiplication overflow, so clamp max_len to [0, 64]
868 int max_size_in_bytes = MIN2<jlong>(max_len, 64) * type2aelembytes(type);
869 if (ArrayOperationPartialInlineSize > 16 && max_size_in_bytes <= 16) {
|