5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/c2/barrierSetC2.hpp"
27 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
28 #include "gc/shared/gc_globals.hpp"
29 #include "opto/arraycopynode.hpp"
30 #include "opto/graphKit.hpp"
31 #include "runtime/sharedRuntime.hpp"
32 #include "utilities/macros.hpp"
33 #include "utilities/powerOfTwo.hpp"
34
35 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
36
37 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
38 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
39 _kind(None),
40 _alloc_tightly_coupled(alloc_tightly_coupled),
41 _has_negative_length_guard(has_negative_length_guard),
42 _arguments_validated(false),
43 _src_type(TypeOopPtr::BOTTOM),
44 _dest_type(TypeOopPtr::BOTTOM) {
45 init_class_id(Class_ArrayCopy);
46 init_flags(Flag_is_macro);
47 C->add_macro_node(this);
48 }
49
50 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
98 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
99 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
100 }
101 #endif
102
103 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
104 // check that length is constant
105 Node* length = in(ArrayCopyNode::Length);
106 const Type* length_type = phase->type(length);
107
108 if (length_type == Type::TOP) {
109 return -1;
110 }
111
112 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
113
114 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
115 }
116
117 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
118 Node* src = in(ArrayCopyNode::Src);
119 const Type* src_type = phase->type(src);
120
121 if (is_clonebasic()) {
122 if (src_type->isa_instptr()) {
123 const TypeInstPtr* inst_src = src_type->is_instptr();
124 ciInstanceKlass* ik = inst_src->instance_klass();
125 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
126 // fields into account. They are rare anyway so easier to simply
127 // skip instances with injected fields.
128 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
129 return -1;
130 }
131 int nb_fields = ik->nof_nonstatic_fields();
132 return nb_fields;
133 } else {
134 const TypeAryPtr* ary_src = src_type->isa_aryptr();
135 assert (ary_src != nullptr, "not an array or instance?");
136 // clone passes a length as a rounded number of longs. If we're
137 // cloning an array we'll do it element by element. If the
138 // length of the input array is constant, ArrayCopyNode::Length
139 // must be too. Note that the opposite does not need to hold,
140 // because different input array lengths (e.g. int arrays with
141 // 3 or 4 elements) might lead to the same length input
142 // (e.g. 2 double-words).
143 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
144 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
145 if (ary_src->size()->is_con()) {
146 return ary_src->size()->get_con();
147 }
148 return -1;
149 }
150 }
151
152 return get_length_if_constant(phase);
153 }
154
155 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
156 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
157 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
158 // above runtime checks that guarantee it is within bounds.
159 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
160 C2AccessValuePtr addr(adr, adr_type);
161 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
162 Node* res = bs->load_at(access, type);
163 ctl = access.ctl();
177 }
178
179
180 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
181 if (!is_clonebasic()) {
182 return nullptr;
183 }
184
185 Node* base_src = in(ArrayCopyNode::Src);
186 Node* base_dest = in(ArrayCopyNode::Dest);
187 Node* ctl = in(TypeFunc::Control);
188 Node* in_mem = in(TypeFunc::Memory);
189
190 const Type* src_type = phase->type(base_src);
191 const TypeInstPtr* inst_src = src_type->isa_instptr();
192 if (inst_src == nullptr) {
193 return nullptr;
194 }
195
196 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
197 if (can_reshape) {
198 phase->is_IterGVN()->_worklist.push(mem);
199 }
200
201
202 ciInstanceKlass* ik = inst_src->instance_klass();
203
204 if (!inst_src->klass_is_exact()) {
205 assert(!ik->is_interface(), "inconsistent klass hierarchy");
206 if (ik->has_subklass()) {
207 // Concurrent class loading.
208 // Fail fast and return NodeSentinel to indicate that the transform failed.
209 return NodeSentinel;
210 } else {
211 phase->C->dependencies()->assert_leaf_type(ik);
212 }
213 }
214
215 const TypeInstPtr* dest_type = phase->type(base_dest)->is_instptr();
216 if (dest_type->instance_klass() != ik) {
276 Node* src_offset = in(ArrayCopyNode::SrcPos);
277 Node* dest_offset = in(ArrayCopyNode::DestPos);
278
279 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
280 const Type* dest_type = phase->type(base_dest);
281 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
282
283 // newly allocated object is guaranteed to not overlap with source object
284 disjoint_bases = is_alloc_tightly_coupled();
285 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
286 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
287 // We don't know if arguments are arrays
288 return false;
289 }
290
291 BasicType src_elem = ary_src->elem()->array_element_basic_type();
292 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
293 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
294 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
295
296 if (src_elem != dest_elem || dest_elem == T_VOID) {
297 // We don't know if arguments are arrays of the same type
298 return false;
299 }
300
301 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
302 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) {
303 // It's an object array copy but we can't emit the card marking
304 // that is needed
305 return false;
306 }
307
308 value_type = ary_src->elem();
309
310 uint shift = exact_log2(type2aelembytes(dest_elem));
311 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
312
313 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
314 if (src_offset->is_top()) {
315 // Offset is out of bounds (the ArrayCopyNode will be removed)
316 return false;
317 }
318 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
319 if (dest_offset->is_top()) {
320 // Offset is out of bounds (the ArrayCopyNode will be removed)
321 if (can_reshape) {
322 // record src_offset, so it can be deleted later (if it is dead)
323 phase->is_IterGVN()->_worklist.push(src_offset);
324 }
325 return false;
326 }
327
328 Node* hook = new Node(1);
329 hook->init_req(0, dest_offset);
330
331 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
332
333 hook->destruct(phase);
334
335 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
336
337 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
338 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
339
340 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
341 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
342
343 copy_type = dest_elem;
344 } else {
345 assert(ary_src != nullptr, "should be a clone");
346 assert(is_clonebasic(), "should be");
347
348 disjoint_bases = true;
349
350 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
351 if (is_reference_type(elem, true)) {
352 elem = T_OBJECT;
353 }
354
355 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
356 if (bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) {
357 return false;
358 }
359
360 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
361 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
362
363 // The address is offsetted to an aligned address where a raw copy would start.
364 // If the clone copy is decomposed into load-stores - the address is adjusted to
365 // point at where the array starts.
366 const Type* toff = phase->type(src_offset);
367 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
368 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
369 assert(diff >= 0, "clone should not start after 1st array element");
370 if (diff > 0) {
371 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
372 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
373 }
374 copy_type = elem;
375 value_type = ary_src->elem();
376 }
377 return true;
378 }
379
380 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
381 if (atp == TypeOopPtr::BOTTOM) {
382 atp = phase->type(n)->isa_ptr();
383 }
384 // adjust atp to be the correct array element address type
385 return atp->add_offset(Type::OffsetBot);
386 }
387
388 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
389 Node* ctl = in(TypeFunc::Control);
390 if (!disjoint_bases && count > 1) {
391 Node* src_offset = in(ArrayCopyNode::SrcPos);
392 Node* dest_offset = in(ArrayCopyNode::DestPos);
393 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
394 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
395 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
396 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
397
398 phase->transform(iff);
399
400 forward_ctl = phase->transform(new IfFalseNode(iff));
401 backward_ctl = phase->transform(new IfTrueNode(iff));
402 } else {
403 forward_ctl = ctl;
404 }
405 }
406
407 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
408 bool can_reshape,
409 Node*& forward_ctl,
410 Node* mem,
411 const TypePtr* atp_src,
412 const TypePtr* atp_dest,
413 Node* adr_src,
414 Node* base_src,
415 Node* adr_dest,
416 Node* base_dest,
417 BasicType copy_type,
418 const Type* value_type,
419 int count) {
420 if (!forward_ctl->is_top()) {
421 // copy forward
422 MergeMemNode* mm = MergeMemNode::make(mem);
423
424 if (count > 0) {
425 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
426 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
427 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
428 for (int i = 1; i < count; i++) {
429 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
430 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
431 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
432 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
433 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
434 }
435 } else if (can_reshape) {
436 PhaseIterGVN* igvn = phase->is_IterGVN();
437 igvn->_worklist.push(adr_src);
438 igvn->_worklist.push(adr_dest);
439 }
440 return mm;
441 }
442 return phase->C->top();
443 }
444
445 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
446 bool can_reshape,
447 Node*& backward_ctl,
448 Node* mem,
449 const TypePtr* atp_src,
450 const TypePtr* atp_dest,
451 Node* adr_src,
452 Node* base_src,
453 Node* adr_dest,
454 Node* base_dest,
455 BasicType copy_type,
456 const Type* value_type,
457 int count) {
458 if (!backward_ctl->is_top()) {
459 // copy backward
460 MergeMemNode* mm = MergeMemNode::make(mem);
461
462 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
463 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
464
465 if (count > 0) {
466 for (int i = count-1; i >= 1; i--) {
467 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
468 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
469 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
470 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
471 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
472 }
473 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
474 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
475 } else if (can_reshape) {
476 PhaseIterGVN* igvn = phase->is_IterGVN();
477 igvn->_worklist.push(adr_src);
478 igvn->_worklist.push(adr_dest);
479 }
480 return phase->transform(mm);
481 }
482 return phase->C->top();
483 }
484
485 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
486 Node* ctl, Node *mem) {
487 if (can_reshape) {
488 PhaseIterGVN* igvn = phase->is_IterGVN();
489 igvn->set_delay_transform(false);
490 if (is_clonebasic()) {
491 Node* out_mem = proj_out(TypeFunc::Memory);
492
493 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
494 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
495 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
496 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
497 return false;
498 }
499
500 igvn->replace_node(out_mem->raw_out(0), mem);
501
502 Node* out_ctl = proj_out(TypeFunc::Control);
503 igvn->replace_node(out_ctl, ctl);
504 } else {
505 // replace fallthrough projections of the ArrayCopyNode by the
506 // new memory, control and the input IO.
507 CallProjections callprojs;
508 extract_projections(&callprojs, true, false);
509
510 if (callprojs.fallthrough_ioproj != nullptr) {
511 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
512 }
513 if (callprojs.fallthrough_memproj != nullptr) {
514 igvn->replace_node(callprojs.fallthrough_memproj, mem);
515 }
516 if (callprojs.fallthrough_catchproj != nullptr) {
517 igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
518 }
519
520 // The ArrayCopyNode is not disconnected. It still has the
521 // projections for the exception case. Replace current
522 // ArrayCopyNode with a dummy new one with a top() control so
523 // that this part of the graph stays consistent but is
524 // eventually removed.
525
526 set_req(0, phase->C->top());
527 remove_dead_region(phase, can_reshape);
528 }
529 } else {
530 if (in(TypeFunc::Control) != ctl) {
531 // we can't return new memory and control from Ideal at parse time
532 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
533 phase->record_for_igvn(this);
534 return false;
535 }
536 }
537 return true;
538 }
539
540
541 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
542 if (remove_dead_region(phase, can_reshape)) return this;
543
544 if (StressArrayCopyMacroNode && !can_reshape) {
545 phase->record_for_igvn(this);
546 return nullptr;
547 }
548
549 // See if it's a small array copy and we can inline it as
550 // loads/stores
551 // Here we can only do:
552 // - arraycopy if all arguments were validated before and we don't
553 // need card marking
554 // - clone for which we don't need to do card marking
555
556 if (!is_clonebasic() && !is_arraycopy_validated() &&
557 !is_copyofrange_validated() && !is_copyof_validated()) {
558 return nullptr;
559 }
560
561 assert(in(TypeFunc::Control) != nullptr &&
562 in(TypeFunc::Memory) != nullptr &&
564 in(ArrayCopyNode::Dest) != nullptr &&
565 in(ArrayCopyNode::Length) != nullptr &&
566 in(ArrayCopyNode::SrcPos) != nullptr &&
567 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
568
569 if (in(TypeFunc::Control)->is_top() ||
570 in(TypeFunc::Memory)->is_top() ||
571 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
572 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
573 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
574 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
575 return nullptr;
576 }
577
578 int count = get_count(phase);
579
580 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
581 return nullptr;
582 }
583
584 Node* mem = try_clone_instance(phase, can_reshape, count);
585 if (mem != nullptr) {
586 return (mem == NodeSentinel) ? nullptr : mem;
587 }
588
589 Node* adr_src = nullptr;
590 Node* base_src = nullptr;
591 Node* adr_dest = nullptr;
592 Node* base_dest = nullptr;
593 BasicType copy_type = T_ILLEGAL;
594 const Type* value_type = nullptr;
595 bool disjoint_bases = false;
596
597 if (!prepare_array_copy(phase, can_reshape,
598 adr_src, base_src, adr_dest, base_dest,
599 copy_type, value_type, disjoint_bases)) {
600 assert(adr_src == nullptr, "no node can be left behind");
601 assert(adr_dest == nullptr, "no node can be left behind");
602 return nullptr;
603 }
604
605 Node* src = in(ArrayCopyNode::Src);
606 Node* dest = in(ArrayCopyNode::Dest);
607 const TypePtr* atp_src = get_address_type(phase, _src_type, src);
608 const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest);
609 Node* in_mem = in(TypeFunc::Memory);
610
611 if (can_reshape) {
612 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
613 phase->is_IterGVN()->set_delay_transform(true);
614 }
615
616 Node* backward_ctl = phase->C->top();
617 Node* forward_ctl = phase->C->top();
618 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
619
620 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
621 in_mem,
622 atp_src, atp_dest,
623 adr_src, base_src, adr_dest, base_dest,
624 copy_type, value_type, count);
625
626 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
627 in_mem,
628 atp_src, atp_dest,
629 adr_src, base_src, adr_dest, base_dest,
630 copy_type, value_type, count);
631
632 Node* ctl = nullptr;
633 if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
634 ctl = new RegionNode(3);
635 ctl->init_req(1, forward_ctl);
636 ctl->init_req(2, backward_ctl);
637 ctl = phase->transform(ctl);
638 MergeMemNode* forward_mm = forward_mem->as_MergeMem();
639 MergeMemNode* backward_mm = backward_mem->as_MergeMem();
640 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
641 if (mms.memory() != mms.memory2()) {
642 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
643 phi->init_req(1, mms.memory());
644 phi->init_req(2, mms.memory2());
645 phi = phase->transform(phi);
646 mms.set_memory(phi);
647 }
648 }
649 mem = forward_mem;
650 } else if (!forward_ctl->is_top()) {
651 ctl = forward_ctl;
652 mem = forward_mem;
653 } else {
654 assert(!backward_ctl->is_top(), "no copy?");
655 ctl = backward_ctl;
656 mem = backward_mem;
657 }
658
659 if (can_reshape) {
660 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
661 phase->is_IterGVN()->set_delay_transform(false);
662 }
663
664 if (!finish_transform(phase, can_reshape, ctl, mem)) {
665 if (can_reshape) {
666 // put in worklist, so that if it happens to be dead it is removed
667 phase->is_IterGVN()->_worklist.push(mem);
668 }
669 return nullptr;
670 }
671
672 return mem;
673 }
674
675 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
676 Node* dest = in(ArrayCopyNode::Dest);
677 if (dest->is_top()) {
678 return false;
679 }
680 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
681 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
682 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
683 _src_type->is_known_instance(), "result of EA not recorded");
684
685 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
742 // if must_modify is true, return true if the copy is guaranteed to
743 // write between offset_lo and offset_hi
744 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
745 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
746
747 Node* dest = in(Dest);
748 Node* dest_pos = in(DestPos);
749 Node* len = in(Length);
750
751 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
752 const TypeInt *len_t = phase->type(len)->isa_int();
753 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
754
755 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
756 return !must_modify;
757 }
758
759 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
760 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
761
762 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
763 uint elemsize = type2aelembytes(ary_elem);
764
765 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
766 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
767 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
768 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
769
770 if (must_modify) {
771 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
772 return true;
773 }
774 } else {
775 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
776 return true;
777 }
778 }
779 return false;
780 }
781
782 // As an optimization, choose optimum vector size for copy length known at compile time.
783 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
784 int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type);
785 if (const_len > 0) {
786 int size_in_bytes = const_len * type2aelembytes(type);
787 if (size_in_bytes <= 16)
788 lane_count = 16/type2aelembytes(type);
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/c2/barrierSetC2.hpp"
28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "opto/arraycopynode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/inlinetypenode.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/macros.hpp"
35 #include "utilities/powerOfTwo.hpp"
36
37 const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
38
39 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
40 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM),
41 _kind(None),
42 _alloc_tightly_coupled(alloc_tightly_coupled),
43 _has_negative_length_guard(has_negative_length_guard),
44 _arguments_validated(false),
45 _src_type(TypeOopPtr::BOTTOM),
46 _dest_type(TypeOopPtr::BOTTOM) {
47 init_class_id(Class_ArrayCopy);
48 init_flags(Flag_is_macro);
49 C->add_macro_node(this);
50 }
51
52 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
100 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
101 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
102 }
103 #endif
104
105 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
106 // check that length is constant
107 Node* length = in(ArrayCopyNode::Length);
108 const Type* length_type = phase->type(length);
109
110 if (length_type == Type::TOP) {
111 return -1;
112 }
113
114 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
115
116 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
117 }
118
119 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
120 if (is_clonebasic()) {
121 Node* src = in(ArrayCopyNode::Src);
122 const Type* src_type = phase->type(src);
123
124 if (src_type == Type::TOP) {
125 return -1;
126 }
127
128 if (src_type->isa_instptr()) {
129 const TypeInstPtr* inst_src = src_type->is_instptr();
130 ciInstanceKlass* ik = inst_src->instance_klass();
131 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
132 // fields into account. They are rare anyway so easier to simply
133 // skip instances with injected fields.
134 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
135 return -1;
136 }
137 int nb_fields = ik->nof_nonstatic_fields();
138 return nb_fields;
139 } else {
140 const TypeAryPtr* ary_src = src_type->isa_aryptr();
141 assert (ary_src != nullptr, "not an array or instance?");
142 // clone passes a length as a rounded number of longs. If we're
143 // cloning an array we'll do it element by element. If the
144 // length of the input array is constant, ArrayCopyNode::Length
145 // must be too. Note that the opposite does not need to hold,
146 // because different input array lengths (e.g. int arrays with
147 // 3 or 4 elements) might lead to the same length input
148 // (e.g. 2 double-words).
149 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) ||
150 (UseArrayFlattening && ary_src->elem()->make_oopptr() != nullptr && ary_src->elem()->make_oopptr()->can_be_inline_type()) ||
151 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent");
152 if (ary_src->size()->is_con()) {
153 return ary_src->size()->get_con();
154 }
155 return -1;
156 }
157 }
158
159 return get_length_if_constant(phase);
160 }
161
162 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
163 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that
164 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating
165 // above runtime checks that guarantee it is within bounds.
166 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD;
167 C2AccessValuePtr addr(adr, adr_type);
168 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
169 Node* res = bs->load_at(access, type);
170 ctl = access.ctl();
184 }
185
186
187 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
188 if (!is_clonebasic()) {
189 return nullptr;
190 }
191
192 Node* base_src = in(ArrayCopyNode::Src);
193 Node* base_dest = in(ArrayCopyNode::Dest);
194 Node* ctl = in(TypeFunc::Control);
195 Node* in_mem = in(TypeFunc::Memory);
196
197 const Type* src_type = phase->type(base_src);
198 const TypeInstPtr* inst_src = src_type->isa_instptr();
199 if (inst_src == nullptr) {
200 return nullptr;
201 }
202
203 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem();
204 phase->record_for_igvn(mem);
205 if (can_reshape) {
206 phase->is_IterGVN()->_worklist.push(mem);
207 }
208
209
210 ciInstanceKlass* ik = inst_src->instance_klass();
211
212 if (!inst_src->klass_is_exact()) {
213 assert(!ik->is_interface(), "inconsistent klass hierarchy");
214 if (ik->has_subklass()) {
215 // Concurrent class loading.
216 // Fail fast and return NodeSentinel to indicate that the transform failed.
217 return NodeSentinel;
218 } else {
219 phase->C->dependencies()->assert_leaf_type(ik);
220 }
221 }
222
223 const TypeInstPtr* dest_type = phase->type(base_dest)->is_instptr();
224 if (dest_type->instance_klass() != ik) {
284 Node* src_offset = in(ArrayCopyNode::SrcPos);
285 Node* dest_offset = in(ArrayCopyNode::DestPos);
286
287 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
288 const Type* dest_type = phase->type(base_dest);
289 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
290
291 // newly allocated object is guaranteed to not overlap with source object
292 disjoint_bases = is_alloc_tightly_coupled();
293 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM ||
294 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) {
295 // We don't know if arguments are arrays
296 return false;
297 }
298
299 BasicType src_elem = ary_src->elem()->array_element_basic_type();
300 BasicType dest_elem = ary_dest->elem()->array_element_basic_type();
301 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
302 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
303
304 // TODO 8350865 What about atomicity?
305 if (src_elem != dest_elem || ary_src->is_null_free() != ary_dest->is_null_free() || ary_src->is_flat() != ary_dest->is_flat() || dest_elem == T_VOID) {
306 // We don't know if arguments are arrays of the same type
307 return false;
308 }
309
310 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
311 if ((!ary_dest->is_flat() && bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) ||
312 (ary_dest->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
313 bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), T_OBJECT, false, false, BarrierSetC2::Optimization))) {
314 // It's an object array copy but we can't emit the card marking that is needed
315 return false;
316 }
317
318 value_type = ary_src->elem();
319
320 uint shift = exact_log2(type2aelembytes(dest_elem));
321 if (ary_dest->is_flat()) {
322 assert(ary_src->is_flat(), "src and dest must be flat");
323 shift = ary_src->flat_log_elem_size();
324 src_elem = T_FLAT_ELEMENT;
325 dest_elem = T_FLAT_ELEMENT;
326 }
327
328 const uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
329
330 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
331 if (src_offset->is_top()) {
332 // Offset is out of bounds (the ArrayCopyNode will be removed)
333 return false;
334 }
335 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
336 if (dest_offset->is_top()) {
337 // Offset is out of bounds (the ArrayCopyNode will be removed)
338 if (can_reshape) {
339 // record src_offset, so it can be deleted later (if it is dead)
340 phase->is_IterGVN()->_worklist.push(src_offset);
341 }
342 return false;
343 }
344
345 Node* hook = new Node(1);
346 hook->init_req(0, dest_offset);
347
348 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
349
350 hook->destruct(phase);
351
352 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
353
354 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale));
355 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale));
356
357 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
358 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
359
360 copy_type = dest_elem;
361 } else {
362 assert(ary_src != nullptr, "should be a clone");
363 assert(is_clonebasic(), "should be");
364
365 disjoint_bases = true;
366
367 if (ary_src->elem()->make_oopptr() != nullptr &&
368 ary_src->elem()->make_oopptr()->can_be_inline_type()) {
369 return false;
370 }
371
372 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type();
373 if (is_reference_type(elem, true)) {
374 elem = T_OBJECT;
375 }
376
377 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
378 if ((!ary_src->is_flat() && bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) ||
379 (ary_src->is_flat() && ary_src->elem()->inline_klass()->contains_oops() &&
380 bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization))) {
381 // It's an object array copy but we can't emit the card marking that is needed
382 return false;
383 }
384
385 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset));
386 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset));
387
388 // The address is offsetted to an aligned address where a raw copy would start.
389 // If the clone copy is decomposed into load-stores - the address is adjusted to
390 // point at where the array starts.
391 const Type* toff = phase->type(src_offset);
392 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con();
393 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
394 assert(diff >= 0, "clone should not start after 1st array element");
395 if (diff > 0) {
396 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
397 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
398 }
399 copy_type = elem;
400 value_type = ary_src->elem();
401 }
402 return true;
403 }
404
405 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) {
406 if (atp == TypeOopPtr::BOTTOM) {
407 atp = phase->type(n)->isa_ptr();
408 }
409 // adjust atp to be the correct array element address type
410 return atp->add_offset(Type::OffsetBot)->is_aryptr();
411 }
412
413 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) {
414 Node* ctl = kit.control();
415 if (!disjoint_bases && count > 1) {
416 PhaseGVN& gvn = kit.gvn();
417 Node* src_offset = in(ArrayCopyNode::SrcPos);
418 Node* dest_offset = in(ArrayCopyNode::DestPos);
419 assert(src_offset != nullptr && dest_offset != nullptr, "should be");
420 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset));
421 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
422 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
423
424 gvn.transform(iff);
425
426 kit.set_control(gvn.transform(new IfFalseNode(iff)));
427 backward_ctl = gvn.transform(new IfTrueNode(iff));
428 }
429 }
430
431 void ArrayCopyNode::copy(GraphKit& kit,
432 const TypeAryPtr* atp_src,
433 const TypeAryPtr* atp_dest,
434 int i,
435 Node* base_src,
436 Node* base_dest,
437 Node* adr_src,
438 Node* adr_dest,
439 BasicType copy_type,
440 const Type* value_type) {
441 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
442 Node* ctl = kit.control();
443 if (atp_dest->is_flat()) {
444 ciInlineKlass* vk = atp_src->elem()->inline_klass();
445 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) {
446 ciField* field = vk->nonstatic_field_at(j);
447 int off_in_vt = field->offset_in_bytes() - vk->payload_offset();
448 Node* off = kit.MakeConX(off_in_vt + i * atp_src->flat_elem_size());
449 ciType* ft = field->type();
450 BasicType bt = type2field[ft->basic_type()];
451 assert(!field->is_flat(), "flat field encountered");
452 const Type* rt = Type::get_const_type(ft);
453 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
454 assert(!bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), bt, false, false, BarrierSetC2::Optimization), "GC barriers required");
455 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
456 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
457 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, adr_type, rt, bt);
458 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, adr_type, v, rt, bt);
459 }
460 } else {
461 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i);
462 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
463 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
464 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, atp_src, value_type, copy_type);
465 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, atp_dest, v, value_type, copy_type);
466 }
467 kit.set_control(ctl);
468 }
469
470
471 void ArrayCopyNode::array_copy_forward(GraphKit& kit,
472 bool can_reshape,
473 const TypeAryPtr* atp_src,
474 const TypeAryPtr* atp_dest,
475 Node* adr_src,
476 Node* base_src,
477 Node* adr_dest,
478 Node* base_dest,
479 BasicType copy_type,
480 const Type* value_type,
481 int count) {
482 if (!kit.stopped()) {
483 // copy forward
484 if (count > 0) {
485 for (int i = 0; i < count; i++) {
486 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
487 }
488 } else if (can_reshape) {
489 PhaseGVN& gvn = kit.gvn();
490 assert(gvn.is_IterGVN(), "");
491 gvn.record_for_igvn(adr_src);
492 gvn.record_for_igvn(adr_dest);
493 }
494 }
495 }
496
497 void ArrayCopyNode::array_copy_backward(GraphKit& kit,
498 bool can_reshape,
499 const TypeAryPtr* atp_src,
500 const TypeAryPtr* atp_dest,
501 Node* adr_src,
502 Node* base_src,
503 Node* adr_dest,
504 Node* base_dest,
505 BasicType copy_type,
506 const Type* value_type,
507 int count) {
508 if (!kit.stopped()) {
509 // copy backward
510 PhaseGVN& gvn = kit.gvn();
511
512 if (count > 0) {
513 for (int i = count-1; i >= 0; i--) {
514 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
515 }
516 } else if(can_reshape) {
517 PhaseGVN& gvn = kit.gvn();
518 assert(gvn.is_IterGVN(), "");
519 gvn.record_for_igvn(adr_src);
520 gvn.record_for_igvn(adr_dest);
521 }
522 }
523 }
524
525 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
526 Node* ctl, Node *mem) {
527 if (can_reshape) {
528 PhaseIterGVN* igvn = phase->is_IterGVN();
529 igvn->set_delay_transform(false);
530 if (is_clonebasic()) {
531 Node* out_mem = proj_out(TypeFunc::Memory);
532
533 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
534 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
535 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
536 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking");
537 return false;
538 }
539
540 igvn->replace_node(out_mem->raw_out(0), mem);
541
542 Node* out_ctl = proj_out(TypeFunc::Control);
543 igvn->replace_node(out_ctl, ctl);
544 } else {
545 // replace fallthrough projections of the ArrayCopyNode by the
546 // new memory, control and the input IO.
547 CallProjections* callprojs = extract_projections(true, false);
548
549 if (callprojs->fallthrough_ioproj != nullptr) {
550 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O));
551 }
552 if (callprojs->fallthrough_memproj != nullptr) {
553 igvn->replace_node(callprojs->fallthrough_memproj, mem);
554 }
555 if (callprojs->fallthrough_catchproj != nullptr) {
556 igvn->replace_node(callprojs->fallthrough_catchproj, ctl);
557 }
558
559 // The ArrayCopyNode is not disconnected. It still has the
560 // projections for the exception case. Replace current
561 // ArrayCopyNode with a dummy new one with a top() control so
562 // that this part of the graph stays consistent but is
563 // eventually removed.
564
565 set_req(0, phase->C->top());
566 remove_dead_region(phase, can_reshape);
567 }
568 } else {
569 if (in(TypeFunc::Control) != ctl) {
570 // we can't return new memory and control from Ideal at parse time
571 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
572 phase->record_for_igvn(this);
573 return false;
574 }
575 }
576 return true;
577 }
578
579
580 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
581 // Perform any generic optimizations first
582 Node* result = SafePointNode::Ideal(phase, can_reshape);
583 if (result != nullptr) {
584 return result;
585 }
586
587 if (StressArrayCopyMacroNode && !can_reshape) {
588 phase->record_for_igvn(this);
589 return nullptr;
590 }
591
592 // See if it's a small array copy and we can inline it as
593 // loads/stores
594 // Here we can only do:
595 // - arraycopy if all arguments were validated before and we don't
596 // need card marking
597 // - clone for which we don't need to do card marking
598
599 if (!is_clonebasic() && !is_arraycopy_validated() &&
600 !is_copyofrange_validated() && !is_copyof_validated()) {
601 return nullptr;
602 }
603
604 assert(in(TypeFunc::Control) != nullptr &&
605 in(TypeFunc::Memory) != nullptr &&
607 in(ArrayCopyNode::Dest) != nullptr &&
608 in(ArrayCopyNode::Length) != nullptr &&
609 in(ArrayCopyNode::SrcPos) != nullptr &&
610 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs");
611
612 if (in(TypeFunc::Control)->is_top() ||
613 in(TypeFunc::Memory)->is_top() ||
614 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
615 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
616 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) ||
617 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) {
618 return nullptr;
619 }
620
621 int count = get_count(phase);
622
623 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
624 return nullptr;
625 }
626
627 Node* src = in(ArrayCopyNode::Src);
628 Node* dest = in(ArrayCopyNode::Dest);
629 const Type* src_type = phase->type(src);
630 const Type* dest_type = phase->type(dest);
631
632 if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
633 // clone used for load of unknown inline type can't be optimized at
634 // this point
635 return nullptr;
636 }
637
638 Node* mem = try_clone_instance(phase, can_reshape, count);
639 if (mem != nullptr) {
640 return (mem == NodeSentinel) ? nullptr : mem;
641 }
642
643 Node* adr_src = nullptr;
644 Node* base_src = nullptr;
645 Node* adr_dest = nullptr;
646 Node* base_dest = nullptr;
647 BasicType copy_type = T_ILLEGAL;
648 const Type* value_type = nullptr;
649 bool disjoint_bases = false;
650
651 if (!prepare_array_copy(phase, can_reshape,
652 adr_src, base_src, adr_dest, base_dest,
653 copy_type, value_type, disjoint_bases)) {
654 assert(adr_src == nullptr, "no node can be left behind");
655 assert(adr_dest == nullptr, "no node can be left behind");
656 return nullptr;
657 }
658
659 JVMState* new_jvms = nullptr;
660 SafePointNode* new_map = nullptr;
661 if (!is_clonebasic()) {
662 new_jvms = jvms()->clone_shallow(phase->C);
663 new_map = new SafePointNode(req(), new_jvms);
664 for (uint i = TypeFunc::FramePtr; i < req(); i++) {
665 new_map->init_req(i, in(i));
666 }
667 new_jvms->set_map(new_map);
668 } else {
669 new_jvms = new (phase->C) JVMState(0);
670 new_map = new SafePointNode(TypeFunc::Parms, new_jvms);
671 new_jvms->set_map(new_map);
672 }
673 new_map->set_control(in(TypeFunc::Control));
674 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
675 new_map->set_i_o(in(TypeFunc::I_O));
676 phase->record_for_igvn(new_map);
677
678 const TypeAryPtr* atp_src = get_address_type(phase, _src_type, src);
679 const TypeAryPtr* atp_dest = get_address_type(phase, _dest_type, dest);
680
681 if (can_reshape) {
682 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
683 phase->is_IterGVN()->set_delay_transform(true);
684 }
685
686 GraphKit kit(new_jvms, phase);
687
688 SafePointNode* backward_map = nullptr;
689 SafePointNode* forward_map = nullptr;
690 Node* backward_ctl = phase->C->top();
691
692 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl);
693
694 {
695 PreserveJVMState pjvms(&kit);
696
697 array_copy_forward(kit, can_reshape,
698 atp_src, atp_dest,
699 adr_src, base_src, adr_dest, base_dest,
700 copy_type, value_type, count);
701
702 forward_map = kit.stop();
703 }
704
705 kit.set_control(backward_ctl);
706 array_copy_backward(kit, can_reshape,
707 atp_src, atp_dest,
708 adr_src, base_src, adr_dest, base_dest,
709 copy_type, value_type, count);
710
711 backward_map = kit.stop();
712
713 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) {
714 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?");
715 Node* ctl = new RegionNode(3);
716 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM);
717 kit.set_map(forward_map);
718 ctl->init_req(1, kit.control());
719 mem->init_req(1, kit.reset_memory());
720 kit.set_map(backward_map);
721 ctl->init_req(2, kit.control());
722 mem->init_req(2, kit.reset_memory());
723 kit.set_control(phase->transform(ctl));
724 kit.set_all_memory(phase->transform(mem));
725 } else if (!forward_map->control()->is_top()) {
726 kit.set_map(forward_map);
727 } else {
728 assert(!backward_map->control()->is_top(), "no copy?");
729 kit.set_map(backward_map);
730 }
731
732 if (can_reshape) {
733 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
734 phase->is_IterGVN()->set_delay_transform(false);
735 }
736
737 mem = kit.map()->memory();
738 if (!finish_transform(phase, can_reshape, kit.control(), mem)) {
739 if (!can_reshape) {
740 phase->record_for_igvn(this);
741 } else {
742 // put in worklist, so that if it happens to be dead it is removed
743 phase->is_IterGVN()->_worklist.push(mem);
744 }
745 return nullptr;
746 }
747
748 return mem;
749 }
750
751 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) {
752 Node* dest = in(ArrayCopyNode::Dest);
753 if (dest->is_top()) {
754 return false;
755 }
756 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
757 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
758 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
759 _src_type->is_known_instance(), "result of EA not recorded");
760
761 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
818 // if must_modify is true, return true if the copy is guaranteed to
819 // write between offset_lo and offset_hi
820 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const {
821 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
822
823 Node* dest = in(Dest);
824 Node* dest_pos = in(DestPos);
825 Node* len = in(Length);
826
827 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
828 const TypeInt *len_t = phase->type(len)->isa_int();
829 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
830
831 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) {
832 return !must_modify;
833 }
834
835 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type();
836 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT;
837
838 uint header;
839 uint elem_size;
840 if (ary_t->is_flat()) {
841 header = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT);
842 elem_size = ary_t->flat_elem_size();
843 } else {
844 header = arrayOopDesc::base_offset_in_bytes(ary_elem);
845 elem_size = type2aelembytes(ary_elem);
846 }
847
848 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elem_size + header;
849 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elem_size + header;
850 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elem_size + header;
851 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elem_size + header;
852
853 if (must_modify) {
854 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
855 return true;
856 }
857 } else {
858 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
859 return true;
860 }
861 }
862 return false;
863 }
864
865 // As an optimization, choose optimum vector size for copy length known at compile time.
866 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
867 int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type);
868 if (const_len > 0) {
869 int size_in_bytes = const_len * type2aelembytes(type);
870 if (size_in_bytes <= 16)
871 lane_count = 16/type2aelembytes(type);
|