5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/shared/barrierSet.hpp"
26 #include "gc/shared/tlab_globals.hpp"
27 #include "opto/arraycopynode.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/convertnode.hpp"
30 #include "opto/vectornode.hpp"
31 #include "opto/graphKit.hpp"
32 #include "opto/macro.hpp"
33 #include "opto/runtime.hpp"
34 #include "opto/castnode.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "utilities/align.hpp"
37 #include "utilities/powerOfTwo.hpp"
38
39 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, int alias_idx, Node* precedent) {
40 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
41 mb->init_req(TypeFunc::Control, *ctrl);
42 mb->init_req(TypeFunc::Memory, *mem);
43 transform_later(mb);
44 *ctrl = new ProjNode(mb,TypeFunc::Control);
128 }
129
130 IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN);
131 transform_later(iff);
132
133 Node* if_slow = new IfTrueNode(iff);
134 transform_later(if_slow);
135
136 if (region != nullptr) {
137 region->add_req(if_slow);
138 }
139
140 Node* if_fast = new IfFalseNode(iff);
141 transform_later(if_fast);
142
143 *ctrl = if_fast;
144
145 return if_slow;
146 }
147
148 inline Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) {
149 return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3));
150 }
151
152 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) {
153 if ((*ctrl)->is_top())
154 return; // already stopped
155 if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
156 return; // index is already adequately typed
157 Node* cmp_lt = new CmpINode(index, intcon(0));
158 transform_later(cmp_lt);
159 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
160 transform_later(bol_lt);
161 generate_guard(ctrl, bol_lt, region, PROB_MIN);
162 }
163
164 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) {
165 if ((*ctrl)->is_top())
166 return; // already stopped
167 bool zero_offset = _igvn.type(offset) == TypeInt::ZERO;
168 if (zero_offset && subseq_length->eqv_uncast(array_length))
169 return; // common case of whole-array copy
170 Node* last = subseq_length;
171 if (!zero_offset) { // last += offset
272
273 *ctrl = stub_block;
274 }
275
276
277 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) {
278 if ((*ctrl)->is_top()) return nullptr;
279
280 if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
281 return nullptr; // index is already adequately typed
282 Node* cmp_le = new CmpINode(index, intcon(0));
283 transform_later(cmp_le);
284 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
285 Node* bol_le = new BoolNode(cmp_le, le_or_eq);
286 transform_later(bol_le);
287 Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN);
288
289 return is_notp;
290 }
291
292 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) {
293 transform_later(call);
294
295 *ctrl = new ProjNode(call,TypeFunc::Control);
296 transform_later(*ctrl);
297 Node* newmem = new ProjNode(call, TypeFunc::Memory);
298 transform_later(newmem);
299
300 uint alias_idx = C->get_alias_index(adr_type);
301 if (alias_idx != Compile::AliasIdxBot) {
302 *mem = MergeMemNode::make(*mem);
303 (*mem)->set_memory_at(alias_idx, newmem);
304 } else {
305 *mem = MergeMemNode::make(newmem);
306 }
307 transform_later(*mem);
308 }
309
310 address PhaseMacroExpand::basictype2arraycopy(BasicType t,
311 Node* src_offset,
366 // }
367 // }
368 // // adjust params for remaining work:
369 // if (slowval != -1) {
370 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n
371 // }
372 // slow_region:
373 // call slow arraycopy(src, src_offset, dest, dest_offset, length)
374 // return // via slow_call_path
375 //
376 // This routine is used from several intrinsics: System.arraycopy,
377 // Object.clone (the array subcase), and Arrays.copyOf[Range].
378 //
379 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc,
380 Node** ctrl, MergeMemNode* mem, Node** io,
381 const TypePtr* adr_type,
382 BasicType basic_elem_type,
383 Node* src, Node* src_offset,
384 Node* dest, Node* dest_offset,
385 Node* copy_length,
386 bool disjoint_bases,
387 bool length_never_negative,
388 RegionNode* slow_region) {
389 if (slow_region == nullptr) {
390 slow_region = new RegionNode(1);
391 transform_later(slow_region);
392 }
393
394 Node* original_dest = dest;
395 bool dest_needs_zeroing = false;
396 bool acopy_to_uninitialized = false;
397
398 // See if this is the initialization of a newly-allocated array.
399 // If so, we will take responsibility here for initializing it to zero.
400 // (Note: Because tightly_coupled_allocation performs checks on the
401 // out-edges of the dest, we need to avoid making derived pointers
402 // from it until we have checked its uses.)
403 if (ReduceBulkZeroing
404 && !(UseTLAB && ZeroTLAB) // pointless if already zeroed
405 && basic_elem_type != T_CONFLICT // avoid corner case
406 && !src->eqv_uncast(dest)
407 && alloc != nullptr
408 && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) {
409 assert(ac->is_alloc_tightly_coupled(), "sanity");
410 // acopy to uninitialized tightly coupled allocations
411 // needs zeroing outside the copy range
412 // and the acopy itself will be to uninitialized memory
413 acopy_to_uninitialized = true;
414 if (alloc->maybe_set_complete(&_igvn)) {
415 // "You break it, you buy it."
416 InitializeNode* init = alloc->initialization();
417 assert(init->is_complete(), "we just did this");
418 init->set_complete_with_arraycopy();
419 assert(dest->is_CheckCastPP(), "sanity");
420 assert(dest->in(0)->in(0) == init, "dest pinned");
421 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory
422 // From this point on, every exit path is responsible for
423 // initializing any non-copied parts of the object to zero.
424 // Also, if this flag is set we make sure that arraycopy interacts properly
425 // with G1, eliding pre-barriers. See CR 6627983.
426 dest_needs_zeroing = true;
427 } else {
428 // dest_need_zeroing = false;
429 }
430 } else {
431 // No zeroing elimination needed here.
432 alloc = nullptr;
433 acopy_to_uninitialized = false;
434 //original_dest = dest;
435 //dest_needs_zeroing = false;
436 }
437
438 uint alias_idx = C->get_alias_index(adr_type);
439
440 // Results are placed here:
441 enum { fast_path = 1, // normal void-returning assembly stub
442 checked_path = 2, // special assembly stub with cleanup
443 slow_call_path = 3, // something went wrong; call the VM
444 zero_path = 4, // bypass when length of copy is zero
445 bcopy_path = 5, // copy primitive array by 64-bit blocks
446 PATH_LIMIT = 6
476 checked_i_o = *io;
477 checked_mem = mem->memory_at(alias_idx);
478 checked_value = cv;
479 *ctrl = top();
480 }
481
482 Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative);
483 if (not_pos != nullptr) {
484 Node* local_ctrl = not_pos, *local_io = *io;
485 MergeMemNode* local_mem = MergeMemNode::make(mem);
486 transform_later(local_mem);
487
488 // (6) length must not be negative.
489 if (!length_never_negative) {
490 generate_negative_guard(&local_ctrl, copy_length, slow_region);
491 }
492
493 // copy_length is 0.
494 if (dest_needs_zeroing) {
495 assert(!local_ctrl->is_top(), "no ctrl?");
496 Node* dest_length = alloc->in(AllocateNode::ALength);
497 if (copy_length->eqv_uncast(dest_length)
498 || _igvn.find_int_con(dest_length, 1) <= 0) {
499 // There is no zeroing to do. No need for a secondary raw memory barrier.
500 } else {
501 // Clear the whole thing since there are no source elements to copy.
502 generate_clear_array(local_ctrl, local_mem,
503 adr_type, dest, basic_elem_type,
504 intcon(0), nullptr,
505 alloc->in(AllocateNode::AllocSize));
506 // Use a secondary InitializeNode as raw memory barrier.
507 // Currently it is needed only on this path since other
508 // paths have stub or runtime calls as raw memory barriers.
509 MemBarNode* mb = MemBarNode::make(C, Op_Initialize,
510 Compile::AliasIdxRaw,
511 top());
512 transform_later(mb);
513 mb->set_req(TypeFunc::Control,local_ctrl);
514 mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw));
515 local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control));
516 local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory)));
517
518 InitializeNode* init = mb->as_Initialize();
519 init->set_complete(&_igvn); // (there is no corresponding AllocateNode)
520 }
521 }
522
523 // Present the results of the fast call.
524 result_region->init_req(zero_path, local_ctrl);
525 result_i_o ->init_req(zero_path, local_io);
526 result_memory->init_req(zero_path, local_mem->memory_at(alias_idx));
527 }
528
529 if (!(*ctrl)->is_top() && dest_needs_zeroing) {
530 // We have to initialize the *uncopied* part of the array to zero.
531 // The copy destination is the slice dest[off..off+len]. The other slices
532 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
533 Node* dest_size = alloc->in(AllocateNode::AllocSize);
534 Node* dest_length = alloc->in(AllocateNode::ALength);
535 Node* dest_tail = transform_later( new AddINode(dest_offset, copy_length));
536
537 // If there is a head section that needs zeroing, do it now.
538 if (_igvn.find_int_con(dest_offset, -1) != 0) {
539 generate_clear_array(*ctrl, mem,
540 adr_type, dest, basic_elem_type,
541 intcon(0), dest_offset,
542 nullptr);
543 }
544
545 // Next, perform a dynamic check on the tail length.
546 // It is often zero, and we can win big if we prove this.
547 // There are two wins: Avoid generating the ClearArray
548 // with its attendant messy index arithmetic, and upgrade
549 // the copy to a more hardware-friendly word size of 64 bits.
550 Node* tail_ctl = nullptr;
551 if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) {
552 Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) );
553 Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) );
554 tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr);
555 assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome");
556 }
557
558 // At this point, let's assume there is no tail.
559 if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) {
560 // There is no tail. Try an upgrade to a 64-bit copy.
569 src, src_offset, dest, dest_offset,
570 dest_size, acopy_to_uninitialized);
571 if (didit) {
572 // Present the results of the block-copying fast call.
573 result_region->init_req(bcopy_path, local_ctrl);
574 result_i_o ->init_req(bcopy_path, local_io);
575 result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx));
576 }
577 }
578 if (didit) {
579 *ctrl = top(); // no regular fast path
580 }
581 }
582
583 // Clear the tail, if any.
584 if (tail_ctl != nullptr) {
585 Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl;
586 *ctrl = tail_ctl;
587 if (notail_ctl == nullptr) {
588 generate_clear_array(*ctrl, mem,
589 adr_type, dest, basic_elem_type,
590 dest_tail, nullptr,
591 dest_size);
592 } else {
593 // Make a local merge.
594 Node* done_ctl = transform_later(new RegionNode(3));
595 Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type));
596 done_ctl->init_req(1, notail_ctl);
597 done_mem->init_req(1, mem->memory_at(alias_idx));
598 generate_clear_array(*ctrl, mem,
599 adr_type, dest, basic_elem_type,
600 dest_tail, nullptr,
601 dest_size);
602 done_ctl->init_req(2, *ctrl);
603 done_mem->init_req(2, mem->memory_at(alias_idx));
604 *ctrl = done_ctl;
605 mem->set_memory_at(alias_idx, done_mem);
606 }
607 }
608 }
609
610 BasicType copy_type = basic_elem_type;
611 assert(basic_elem_type != T_ARRAY, "caller must fix this");
612 if (!(*ctrl)->is_top() && copy_type == T_OBJECT) {
613 // If src and dest have compatible element types, we can copy bits.
614 // Types S[] and D[] are compatible if D is a supertype of S.
615 //
616 // If they are not, we will use checked_oop_disjoint_arraycopy,
617 // which performs a fast optimistic per-oop check, and backs off
618 // further to JVM_ArrayCopy on the first per-oop check that fails.
619 // (Actually, we don't move raw bits only; the GC requires card marks.)
756 Node* length_minus = new SubINode(copy_length, slow_offset);
757 transform_later(length_minus);
758
759 // Tweak the node variables to adjust the code produced below:
760 src_offset = src_off_plus;
761 dest_offset = dest_off_plus;
762 copy_length = length_minus;
763 }
764 }
765 *ctrl = slow_control;
766 if (!(*ctrl)->is_top()) {
767 Node* local_ctrl = *ctrl, *local_io = slow_i_o;
768 MergeMemNode* local_mem = MergeMemNode::make(mem);
769 transform_later(local_mem);
770
771 // Generate the slow path, if needed.
772 local_mem->set_memory_at(alias_idx, slow_mem);
773
774 if (dest_needs_zeroing) {
775 generate_clear_array(local_ctrl, local_mem,
776 adr_type, dest, basic_elem_type,
777 intcon(0), nullptr,
778 alloc->in(AllocateNode::AllocSize));
779 }
780
781 local_mem = generate_slow_arraycopy(ac,
782 &local_ctrl, local_mem, &local_io,
783 adr_type,
784 src, src_offset, dest, dest_offset,
785 copy_length, /*dest_uninitialized*/false);
786
787 result_region->init_req(slow_call_path, local_ctrl);
788 result_i_o ->init_req(slow_call_path, local_io);
789 result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx));
790 } else {
791 ShouldNotReachHere(); // no call to generate_slow_arraycopy:
792 // projections were not extracted
793 }
794
795 // Remove unused edges.
796 for (uint i = 1; i < result_region->req(); i++) {
825 // a subsequent store that would make this object accessible by
826 // other threads.
827 assert(ac->_dest_type == TypeOopPtr::BOTTOM, "non escaping destination shouldn't have narrow slice");
828 insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore, Compile::AliasIdxBot);
829 } else {
830 int mem_bar_alias_idx = Compile::AliasIdxBot;
831 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
832 // The graph was transformed under the assumption the ArrayCopy node only had an effect on a narrow slice. We can't
833 // insert a wide membar now that it's being expanded: a load that uses the input memory state of the ArrayCopy
834 // could then become anti dependent on the membar when it was not anti dependent on the ArrayCopy leading to a
835 // broken graph.
836 mem_bar_alias_idx = C->get_alias_index(ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr());
837 }
838 insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder, mem_bar_alias_idx);
839 }
840
841 assert((*ctrl)->is_Proj(), "MemBar control projection");
842 assert((*ctrl)->in(0)->isa_MemBar(), "MemBar node");
843 (*ctrl)->in(0)->isa_MemBar()->set_trailing_expanded_array_copy();
844
845 _igvn.replace_node(_callprojs.fallthrough_memproj, out_mem);
846 if (_callprojs.fallthrough_ioproj != nullptr) {
847 _igvn.replace_node(_callprojs.fallthrough_ioproj, *io);
848 }
849 _igvn.replace_node(_callprojs.fallthrough_catchproj, *ctrl);
850
851 #ifdef ASSERT
852 const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr();
853 if (dest_t->is_known_instance()) {
854 ArrayCopyNode* ac = nullptr;
855 assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost");
856 assert(ac == nullptr, "no arraycopy anymore");
857 }
858 #endif
859
860 return out_mem;
861 }
862
863 // Helper for initialization of arrays, creating a ClearArray.
864 // It writes zero bits in [start..end), within the body of an array object.
865 // The memory effects are all chained onto the 'adr_type' alias category.
866 //
867 // Since the object is otherwise uninitialized, we are free
868 // to put a little "slop" around the edges of the cleared area,
869 // as long as it does not go back into the array's header,
870 // or beyond the array end within the heap.
871 //
872 // The lower edge can be rounded down to the nearest jint and the
873 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
874 //
875 // Arguments:
876 // adr_type memory slice where writes are generated
877 // dest oop of the destination array
878 // basic_elem_type element type of the destination
879 // slice_idx array index of first element to store
880 // slice_len number of elements to store (or null)
881 // dest_size total size in bytes of the array object
882 //
883 // Exactly one of slice_len or dest_size must be non-null.
884 // If dest_size is non-null, zeroing extends to the end of the object.
885 // If slice_len is non-null, the slice_idx value must be a constant.
886 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
887 const TypePtr* adr_type,
888 Node* dest,
889 BasicType basic_elem_type,
890 Node* slice_idx,
891 Node* slice_len,
892 Node* dest_size) {
893 // one or the other but not both of slice_len and dest_size:
894 assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, "");
895 if (slice_len == nullptr) slice_len = top();
896 if (dest_size == nullptr) dest_size = top();
897
898 uint alias_idx = C->get_alias_index(adr_type);
899
900 // operate on this memory slice:
901 Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on
902
903 // scaling and rounding of indexes:
904 int scale = exact_log2(type2aelembytes(basic_elem_type));
905 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
906 int clear_low = (-1 << scale) & (BytesPerInt - 1);
907 int bump_bit = (-1 << scale) & BytesPerInt;
908
909 // determine constant starts and ends
910 const intptr_t BIG_NEG = -128;
911 assert(BIG_NEG + 2*abase < 0, "neg enough");
912 intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG);
913 intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG);
914 if (slice_len_con == 0) {
915 return; // nothing to do here
916 }
917 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
918 intptr_t end_con = _igvn.find_intptr_t_con(dest_size, -1);
919 if (slice_idx_con >= 0 && slice_len_con >= 0) {
920 assert(end_con < 0, "not two cons");
921 end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale),
922 BytesPerLong);
923 }
924
925 if (start_con >= 0 && end_con >= 0) {
926 // Constant start and end. Simple.
927 mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
928 start_con, end_con, &_igvn);
929 } else if (start_con >= 0 && dest_size != top()) {
930 // Constant start, pre-rounded end after the tail of the array.
931 Node* end = dest_size;
932 mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
933 start_con, end, &_igvn);
934 } else if (start_con >= 0 && slice_len != top()) {
935 // Constant start, non-constant end. End needs rounding up.
936 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
937 intptr_t end_base = abase + (slice_idx_con << scale);
938 int end_round = (-1 << scale) & (BytesPerLong - 1);
939 Node* end = ConvI2X(slice_len);
940 if (scale != 0)
941 end = transform_later(new LShiftXNode(end, intcon(scale) ));
942 end_base += end_round;
943 end = transform_later(new AddXNode(end, MakeConX(end_base)) );
944 end = transform_later(new AndXNode(end, MakeConX(~end_round)) );
945 mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
946 start_con, end, &_igvn);
947 } else if (start_con < 0 && dest_size != top()) {
948 // Non-constant start, pre-rounded end after the tail of the array.
949 // This is almost certainly a "round-to-end" operation.
950 Node* start = slice_idx;
951 start = ConvI2X(start);
952 if (scale != 0)
953 start = transform_later(new LShiftXNode( start, intcon(scale) ));
954 start = transform_later(new AddXNode(start, MakeConX(abase)) );
955 if ((bump_bit | clear_low) != 0) {
956 int to_clear = (bump_bit | clear_low);
957 // Align up mod 8, then store a jint zero unconditionally
958 // just before the mod-8 boundary.
959 if (((abase + bump_bit) & ~to_clear) - bump_bit
960 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
961 bump_bit = 0;
962 assert((abase & to_clear) == 0, "array base must be long-aligned");
963 } else {
964 // Bump 'start' up to (or past) the next jint boundary:
965 start = transform_later( new AddXNode(start, MakeConX(bump_bit)) );
966 assert((abase & clear_low) == 0, "array base must be int-aligned");
967 }
968 // Round bumped 'start' down to jlong boundary in body of array.
969 start = transform_later(new AndXNode(start, MakeConX(~to_clear)) );
970 if (bump_bit != 0) {
971 // Store a zero to the immediately preceding jint:
972 Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) );
973 Node* p1 = basic_plus_adr(dest, x1);
974 mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
975 mem = transform_later(mem);
976 }
977 }
978 Node* end = dest_size; // pre-rounded
979 mem = ClearArrayNode::clear_memory(ctrl, mem, dest,
980 start, end, &_igvn);
981 } else {
982 // Non-constant start, unrounded non-constant end.
983 // (Nobody zeroes a random midsection of an array using this routine.)
984 ShouldNotReachHere(); // fix caller
985 }
986
987 // Done.
988 merge_mem->set_memory_at(alias_idx, mem);
989 }
990
991 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
992 const TypePtr* adr_type,
993 BasicType basic_elem_type,
994 AllocateNode* alloc,
995 Node* src, Node* src_offset,
996 Node* dest, Node* dest_offset,
997 Node* dest_size, bool dest_uninitialized) {
998 // See if there is an advantage from block transfer.
999 int scale = exact_log2(type2aelembytes(basic_elem_type));
1075 const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type();
1076 CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(),
1077 "slow_arraycopy", TypePtr::BOTTOM);
1078
1079 call->init_req(TypeFunc::Control, *ctrl);
1080 call->init_req(TypeFunc::I_O , *io);
1081 call->init_req(TypeFunc::Memory , mem);
1082 call->init_req(TypeFunc::ReturnAdr, top());
1083 call->init_req(TypeFunc::FramePtr, top());
1084 call->init_req(TypeFunc::Parms+0, src);
1085 call->init_req(TypeFunc::Parms+1, src_offset);
1086 call->init_req(TypeFunc::Parms+2, dest);
1087 call->init_req(TypeFunc::Parms+3, dest_offset);
1088 call->init_req(TypeFunc::Parms+4, copy_length);
1089 call->copy_call_debug_info(&_igvn, ac);
1090
1091 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
1092 _igvn.replace_node(ac, call);
1093 transform_later(call);
1094
1095 call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/);
1096 *ctrl = _callprojs.fallthrough_catchproj->clone();
1097 transform_later(*ctrl);
1098
1099 Node* m = _callprojs.fallthrough_memproj->clone();
1100 transform_later(m);
1101
1102 uint alias_idx = C->get_alias_index(adr_type);
1103 MergeMemNode* out_mem;
1104 if (alias_idx != Compile::AliasIdxBot) {
1105 out_mem = MergeMemNode::make(mem);
1106 out_mem->set_memory_at(alias_idx, m);
1107 } else {
1108 out_mem = MergeMemNode::make(m);
1109 }
1110 transform_later(out_mem);
1111
1112 // When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj
1113 // could be null. Skip clone and update null fallthrough_ioproj.
1114 if (_callprojs.fallthrough_ioproj != nullptr) {
1115 *io = _callprojs.fallthrough_ioproj->clone();
1116 transform_later(*io);
1117 } else {
1118 *io = nullptr;
1119 }
1120
1121 return out_mem;
1122 }
1123
1124 // Helper function; generates code for cases requiring runtime checks.
1125 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
1126 const TypePtr* adr_type,
1127 Node* dest_elem_klass,
1128 Node* src, Node* src_offset,
1129 Node* dest, Node* dest_offset,
1130 Node* copy_length, bool dest_uninitialized) {
1131 if ((*ctrl)->is_top()) return nullptr;
1132
1133 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
1134 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path.
1135 return nullptr;
1227
1228 // Connecting remaining edges for exit_block coming from stub_block.
1229 if (exit_block) {
1230 exit_block->init_req(2, *ctrl);
1231
1232 // Memory edge corresponding to stub_region.
1233 result_memory->init_req(2, *mem);
1234
1235 uint alias_idx = C->get_alias_index(adr_type);
1236 if (alias_idx != Compile::AliasIdxBot) {
1237 *mem = MergeMemNode::make(*mem);
1238 (*mem)->set_memory_at(alias_idx, result_memory);
1239 } else {
1240 *mem = MergeMemNode::make(result_memory);
1241 }
1242 transform_later(*mem);
1243 *ctrl = exit_block;
1244 }
1245 }
1246
1247 #undef XTOP
1248
1249 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
1250 Node* ctrl = ac->in(TypeFunc::Control);
1251 Node* io = ac->in(TypeFunc::I_O);
1252 Node* src = ac->in(ArrayCopyNode::Src);
1253 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
1254 Node* dest = ac->in(ArrayCopyNode::Dest);
1255 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
1256 Node* length = ac->in(ArrayCopyNode::Length);
1257 MergeMemNode* merge_mem = nullptr;
1258
1259 if (ac->is_clonebasic()) {
1260 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1261 bs->clone_at_expansion(this, ac);
1262 return;
1263 } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_clone_oop_array()) {
1264 Node* mem = ac->in(TypeFunc::Memory);
1265 merge_mem = MergeMemNode::make(mem);
1266 transform_later(merge_mem);
1267
1268 AllocateArrayNode* alloc = nullptr;
1269 if (ac->is_alloc_tightly_coupled()) {
1270 alloc = AllocateArrayNode::Ideal_array_allocation(dest);
1271 assert(alloc != nullptr, "expect alloc");
1272 }
1273
1274 const TypePtr* adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot);
1275 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1276 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1277 }
1278 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1279 adr_type, T_OBJECT,
1280 src, src_offset, dest, dest_offset, length,
1281 true, ac->has_negative_length_guard());
1282
1283 return;
1284 }
1285
1286 AllocateArrayNode* alloc = nullptr;
1287 if (ac->is_alloc_tightly_coupled()) {
1288 alloc = AllocateArrayNode::Ideal_array_allocation(dest);
1289 assert(alloc != nullptr, "expect alloc");
1290 }
1291
1292 assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy");
1293
1294 // Compile time checks. If any of these checks cannot be verified at compile time,
1295 // we do not make a fast path for this call. Instead, we let the call remain as it
1296 // is. The checks we choose to mandate at compile time are:
1297 //
1298 // (1) src and dest are arrays.
1299 const Type* src_type = src->Value(&_igvn);
1300 const Type* dest_type = dest->Value(&_igvn);
1301 const TypeAryPtr* top_src = src_type->isa_aryptr();
1302 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
1303
1304 BasicType src_elem = T_CONFLICT;
1305 BasicType dest_elem = T_CONFLICT;
1306
1307 if (top_src != nullptr && top_src->elem() != Type::BOTTOM) {
1308 src_elem = top_src->elem()->array_element_basic_type();
1309 }
1310 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) {
1311 dest_elem = top_dest->elem()->array_element_basic_type();
1312 }
1313 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
1314 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
1315
1316 if (ac->is_arraycopy_validated() &&
1317 dest_elem != T_CONFLICT &&
1318 src_elem == T_CONFLICT) {
1319 src_elem = dest_elem;
1320 }
1321
1322 if (src_elem == T_CONFLICT || dest_elem == T_CONFLICT) {
1323 // Conservatively insert a memory barrier on all memory slices.
1324 // Do not let writes into the source float below the arraycopy.
1325 {
1326 Node* mem = ac->in(TypeFunc::Memory);
1327 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1328
1329 merge_mem = MergeMemNode::make(mem);
1330 transform_later(merge_mem);
1331 }
1332
1333 // Call StubRoutines::generic_arraycopy stub.
1334 Node* mem = generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io,
1335 TypeRawPtr::BOTTOM, T_CONFLICT,
1336 src, src_offset, dest, dest_offset, length,
1337 // If a negative length guard was generated for the ArrayCopyNode,
1338 // the length of the array can never be negative.
1339 false, ac->has_negative_length_guard());
1340 return;
1341 }
1342
1343 assert(!ac->is_arraycopy_validated() || (src_elem == dest_elem && dest_elem != T_VOID), "validated but different basic types");
1344
1345 // (2) src and dest arrays must have elements of the same BasicType
1346 // Figure out the size and type of the elements we will be copying.
1347 if (src_elem != dest_elem || dest_elem == T_VOID) {
1348 // The component types are not the same or are not recognized. Punt.
1349 // (But, avoid the native method wrapper to JVM_ArrayCopy.)
1350 {
1351 Node* mem = ac->in(TypeFunc::Memory);
1352 merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false);
1353 }
1354
1355 _igvn.replace_node(_callprojs.fallthrough_memproj, merge_mem);
1356 if (_callprojs.fallthrough_ioproj != nullptr) {
1357 _igvn.replace_node(_callprojs.fallthrough_ioproj, io);
1358 }
1359 _igvn.replace_node(_callprojs.fallthrough_catchproj, ctrl);
1360 return;
1361 }
1362
1363 //---------------------------------------------------------------------------
1364 // We will make a fast path for this call to arraycopy.
1365
1366 // We have the following tests left to perform:
1367 //
1368 // (3) src and dest must not be null.
1369 // (4) src_offset must not be negative.
1370 // (5) dest_offset must not be negative.
1371 // (6) length must not be negative.
1372 // (7) src_offset + length must not exceed length of src.
1373 // (8) dest_offset + length must not exceed length of dest.
1374 // (9) each element of an oop array must be assignable
1375
1376 {
1377 Node* mem = ac->in(TypeFunc::Memory);
1378 merge_mem = MergeMemNode::make(mem);
1379 transform_later(merge_mem);
1380 }
1381
1382 RegionNode* slow_region = new RegionNode(1);
1383 transform_later(slow_region);
1384
1385 if (!ac->is_arraycopy_validated()) {
1386 // (3) operands must not be null
1387 // We currently perform our null checks with the null_check routine.
1388 // This means that the null exceptions will be reported in the caller
1389 // rather than (correctly) reported inside of the native arraycopy call.
1390 // This should be corrected, given time. We do our null check with the
1391 // stack pointer restored.
1392 // null checks done library_call.cpp
1393
1394 // (4) src_offset must not be negative.
1395 generate_negative_guard(&ctrl, src_offset, slow_region);
1396
1397 // (5) dest_offset must not be negative.
1398 generate_negative_guard(&ctrl, dest_offset, slow_region);
1399
1400 // (6) length must not be negative (moved to generate_arraycopy()).
1401 // generate_negative_guard(length, slow_region);
1402
1403 // (7) src_offset + length must not exceed length of src.
1404 Node* alen = ac->in(ArrayCopyNode::SrcLen);
1405 assert(alen != nullptr, "need src len");
1406 generate_limit_guard(&ctrl,
1407 src_offset, length,
1408 alen,
1409 slow_region);
1410
1411 // (8) dest_offset + length must not exceed length of dest.
1412 alen = ac->in(ArrayCopyNode::DestLen);
1413 assert(alen != nullptr, "need dest len");
1414 generate_limit_guard(&ctrl,
1415 dest_offset, length,
1416 alen,
1417 slow_region);
1418
1419 // (9) each element of an oop array must be assignable
1420 // The generate_arraycopy subroutine checks this.
1421 }
1422 // This is where the memory effects are placed:
1423 const TypePtr* adr_type = nullptr;
1424 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1425 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1426 } else {
1427 adr_type = TypeAryPtr::get_array_body_type(dest_elem);
1428 }
1429
1430 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1431 adr_type, dest_elem,
1432 src, src_offset, dest, dest_offset, length,
1433 // If a negative length guard was generated for the ArrayCopyNode,
1434 // the length of the array can never be negative.
1435 false, ac->has_negative_length_guard(), slow_region);
1436 }
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "opto/arraycopynode.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/convertnode.hpp"
31 #include "opto/vectornode.hpp"
32 #include "opto/graphKit.hpp"
33 #include "opto/macro.hpp"
34 #include "opto/runtime.hpp"
35 #include "opto/castnode.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/powerOfTwo.hpp"
39
40 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, int alias_idx, Node* precedent) {
41 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
42 mb->init_req(TypeFunc::Control, *ctrl);
43 mb->init_req(TypeFunc::Memory, *mem);
44 transform_later(mb);
45 *ctrl = new ProjNode(mb,TypeFunc::Control);
129 }
130
131 IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN);
132 transform_later(iff);
133
134 Node* if_slow = new IfTrueNode(iff);
135 transform_later(if_slow);
136
137 if (region != nullptr) {
138 region->add_req(if_slow);
139 }
140
141 Node* if_fast = new IfFalseNode(iff);
142 transform_later(if_fast);
143
144 *ctrl = if_fast;
145
146 return if_slow;
147 }
148
149 Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) {
150 return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3));
151 }
152
153 inline Node* PhaseMacroExpand::generate_fair_guard(Node** ctrl, Node* test, RegionNode* region) {
154 return generate_guard(ctrl, test, region, PROB_FAIR);
155 }
156
157 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) {
158 if ((*ctrl)->is_top())
159 return; // already stopped
160 if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
161 return; // index is already adequately typed
162 Node* cmp_lt = new CmpINode(index, intcon(0));
163 transform_later(cmp_lt);
164 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt);
165 transform_later(bol_lt);
166 generate_guard(ctrl, bol_lt, region, PROB_MIN);
167 }
168
169 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) {
170 if ((*ctrl)->is_top())
171 return; // already stopped
172 bool zero_offset = _igvn.type(offset) == TypeInt::ZERO;
173 if (zero_offset && subseq_length->eqv_uncast(array_length))
174 return; // common case of whole-array copy
175 Node* last = subseq_length;
176 if (!zero_offset) { // last += offset
277
278 *ctrl = stub_block;
279 }
280
281
282 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) {
283 if ((*ctrl)->is_top()) return nullptr;
284
285 if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
286 return nullptr; // index is already adequately typed
287 Node* cmp_le = new CmpINode(index, intcon(0));
288 transform_later(cmp_le);
289 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
290 Node* bol_le = new BoolNode(cmp_le, le_or_eq);
291 transform_later(bol_le);
292 Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN);
293
294 return is_notp;
295 }
296
297 Node* PhaseMacroExpand::mark_word_test(Node** ctrl, Node* obj, MergeMemNode* mem, uintptr_t mask_val, RegionNode* region) {
298 // Load markword and check if obj is locked
299 Node* mark = make_load(nullptr, mem->memory_at(Compile::AliasIdxRaw), obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
300 Node* locked_bit = MakeConX(markWord::unlocked_value);
301 locked_bit = transform_later(new AndXNode(locked_bit, mark));
302 Node* cmp = transform_later(new CmpXNode(locked_bit, MakeConX(0)));
303 Node* is_unlocked = transform_later(new BoolNode(cmp, BoolTest::ne));
304 IfNode* iff = transform_later(new IfNode(*ctrl, is_unlocked, PROB_MAX, COUNT_UNKNOWN))->as_If();
305 Node* locked_region = transform_later(new RegionNode(3));
306 Node* mark_phi = transform_later(new PhiNode(locked_region, TypeX_X));
307
308 // Unlocked: Use bits from mark word
309 locked_region->init_req(1, transform_later(new IfTrueNode(iff)));
310 mark_phi->init_req(1, mark);
311
312 // Locked: Load prototype header from klass
313 *ctrl = transform_later(new IfFalseNode(iff));
314 // Make loads control dependent to make sure they are only executed if array is locked
315 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
316 Node* klass = transform_later(LoadKlassNode::make(_igvn, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
317 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
318 Node* proto = transform_later(LoadNode::make(_igvn, *ctrl, C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
319
320 locked_region->init_req(2, *ctrl);
321 mark_phi->init_req(2, proto);
322 *ctrl = locked_region;
323
324 // Now check if mark word bits are set
325 Node* mask = MakeConX(mask_val);
326 Node* masked = transform_later(new AndXNode(mark_phi, mask));
327 cmp = transform_later(new CmpXNode(masked, mask));
328 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
329 return generate_fair_guard(ctrl, bol, region);
330 }
331
332 Node* PhaseMacroExpand::generate_flat_array_guard(Node** ctrl, Node* array, MergeMemNode* mem, RegionNode* region) {
333 return mark_word_test(ctrl, array, mem, markWord::flat_array_bit_in_place, region);
334 }
335
336 Node* PhaseMacroExpand::generate_null_free_array_guard(Node** ctrl, Node* array, MergeMemNode* mem, RegionNode* region) {
337 return mark_word_test(ctrl, array, mem, markWord::null_free_array_bit_in_place, region);
338 }
339
340 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) {
341 transform_later(call);
342
343 *ctrl = new ProjNode(call,TypeFunc::Control);
344 transform_later(*ctrl);
345 Node* newmem = new ProjNode(call, TypeFunc::Memory);
346 transform_later(newmem);
347
348 uint alias_idx = C->get_alias_index(adr_type);
349 if (alias_idx != Compile::AliasIdxBot) {
350 *mem = MergeMemNode::make(*mem);
351 (*mem)->set_memory_at(alias_idx, newmem);
352 } else {
353 *mem = MergeMemNode::make(newmem);
354 }
355 transform_later(*mem);
356 }
357
358 address PhaseMacroExpand::basictype2arraycopy(BasicType t,
359 Node* src_offset,
414 // }
415 // }
416 // // adjust params for remaining work:
417 // if (slowval != -1) {
418 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n
419 // }
420 // slow_region:
421 // call slow arraycopy(src, src_offset, dest, dest_offset, length)
422 // return // via slow_call_path
423 //
424 // This routine is used from several intrinsics: System.arraycopy,
425 // Object.clone (the array subcase), and Arrays.copyOf[Range].
426 //
427 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc,
428 Node** ctrl, MergeMemNode* mem, Node** io,
429 const TypePtr* adr_type,
430 BasicType basic_elem_type,
431 Node* src, Node* src_offset,
432 Node* dest, Node* dest_offset,
433 Node* copy_length,
434 Node* dest_length,
435 bool disjoint_bases,
436 bool length_never_negative,
437 RegionNode* slow_region) {
438 if (slow_region == nullptr) {
439 slow_region = new RegionNode(1);
440 transform_later(slow_region);
441 }
442
443 Node* original_dest = dest;
444 bool dest_needs_zeroing = false;
445 bool acopy_to_uninitialized = false;
446 Node* init_value = nullptr;
447 Node* raw_init_value = nullptr;
448
449 // See if this is the initialization of a newly-allocated array.
450 // If so, we will take responsibility here for initializing it to zero.
451 // (Note: Because tightly_coupled_allocation performs checks on the
452 // out-edges of the dest, we need to avoid making derived pointers
453 // from it until we have checked its uses.)
454 if (ReduceBulkZeroing
455 && !(UseTLAB && ZeroTLAB) // pointless if already zeroed
456 && basic_elem_type != T_CONFLICT // avoid corner case
457 && !src->eqv_uncast(dest)
458 && alloc != nullptr
459 && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) {
460 assert(ac->is_alloc_tightly_coupled(), "sanity");
461 // acopy to uninitialized tightly coupled allocations
462 // needs zeroing outside the copy range
463 // and the acopy itself will be to uninitialized memory
464 acopy_to_uninitialized = true;
465 if (alloc->maybe_set_complete(&_igvn)) {
466 // "You break it, you buy it."
467 InitializeNode* init = alloc->initialization();
468 assert(init->is_complete(), "we just did this");
469 init->set_complete_with_arraycopy();
470 assert(dest->is_CheckCastPP(), "sanity");
471 assert(dest->in(0)->in(0) == init, "dest pinned");
472 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory
473 // From this point on, every exit path is responsible for
474 // initializing any non-copied parts of the object to zero.
475 // Also, if this flag is set we make sure that arraycopy interacts properly
476 // with G1, eliding pre-barriers. See CR 6627983.
477 dest_needs_zeroing = true;
478 init_value = alloc->in(AllocateNode::InitValue);
479 raw_init_value = alloc->in(AllocateNode::RawInitValue);
480 } else {
481 // dest_need_zeroing = false;
482 }
483 } else {
484 // No zeroing elimination needed here.
485 alloc = nullptr;
486 acopy_to_uninitialized = false;
487 //original_dest = dest;
488 //dest_needs_zeroing = false;
489 }
490
491 uint alias_idx = C->get_alias_index(adr_type);
492
493 // Results are placed here:
494 enum { fast_path = 1, // normal void-returning assembly stub
495 checked_path = 2, // special assembly stub with cleanup
496 slow_call_path = 3, // something went wrong; call the VM
497 zero_path = 4, // bypass when length of copy is zero
498 bcopy_path = 5, // copy primitive array by 64-bit blocks
499 PATH_LIMIT = 6
529 checked_i_o = *io;
530 checked_mem = mem->memory_at(alias_idx);
531 checked_value = cv;
532 *ctrl = top();
533 }
534
535 Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative);
536 if (not_pos != nullptr) {
537 Node* local_ctrl = not_pos, *local_io = *io;
538 MergeMemNode* local_mem = MergeMemNode::make(mem);
539 transform_later(local_mem);
540
541 // (6) length must not be negative.
542 if (!length_never_negative) {
543 generate_negative_guard(&local_ctrl, copy_length, slow_region);
544 }
545
546 // copy_length is 0.
547 if (dest_needs_zeroing) {
548 assert(!local_ctrl->is_top(), "no ctrl?");
549 if (copy_length->eqv_uncast(dest_length)
550 || _igvn.find_int_con(dest_length, 1) <= 0) {
551 // There is no zeroing to do. No need for a secondary raw memory barrier.
552 } else {
553 // Clear the whole thing since there are no source elements to copy.
554 generate_clear_array(local_ctrl, local_mem,
555 adr_type, dest,
556 init_value, raw_init_value,
557 basic_elem_type,
558 intcon(0), nullptr,
559 alloc->in(AllocateNode::AllocSize));
560 // Use a secondary InitializeNode as raw memory barrier.
561 // Currently it is needed only on this path since other
562 // paths have stub or runtime calls as raw memory barriers.
563 MemBarNode* mb = MemBarNode::make(C, Op_Initialize,
564 Compile::AliasIdxRaw,
565 top());
566 transform_later(mb);
567 mb->set_req(TypeFunc::Control,local_ctrl);
568 mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw));
569 local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control));
570 local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory)));
571
572 InitializeNode* init = mb->as_Initialize();
573 init->set_complete(&_igvn); // (there is no corresponding AllocateNode)
574 }
575 }
576
577 // Present the results of the fast call.
578 result_region->init_req(zero_path, local_ctrl);
579 result_i_o ->init_req(zero_path, local_io);
580 result_memory->init_req(zero_path, local_mem->memory_at(alias_idx));
581 }
582
583 if (!(*ctrl)->is_top() && dest_needs_zeroing) {
584 // We have to initialize the *uncopied* part of the array to zero.
585 // The copy destination is the slice dest[off..off+len]. The other slices
586 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
587 Node* dest_size = alloc->in(AllocateNode::AllocSize);
588 Node* dest_tail = transform_later( new AddINode(dest_offset, copy_length));
589
590 // If there is a head section that needs zeroing, do it now.
591 if (_igvn.find_int_con(dest_offset, -1) != 0) {
592 generate_clear_array(*ctrl, mem,
593 adr_type, dest,
594 init_value, raw_init_value,
595 basic_elem_type,
596 intcon(0), dest_offset,
597 nullptr);
598 }
599
600 // Next, perform a dynamic check on the tail length.
601 // It is often zero, and we can win big if we prove this.
602 // There are two wins: Avoid generating the ClearArray
603 // with its attendant messy index arithmetic, and upgrade
604 // the copy to a more hardware-friendly word size of 64 bits.
605 Node* tail_ctl = nullptr;
606 if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) {
607 Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) );
608 Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) );
609 tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr);
610 assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome");
611 }
612
613 // At this point, let's assume there is no tail.
614 if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) {
615 // There is no tail. Try an upgrade to a 64-bit copy.
624 src, src_offset, dest, dest_offset,
625 dest_size, acopy_to_uninitialized);
626 if (didit) {
627 // Present the results of the block-copying fast call.
628 result_region->init_req(bcopy_path, local_ctrl);
629 result_i_o ->init_req(bcopy_path, local_io);
630 result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx));
631 }
632 }
633 if (didit) {
634 *ctrl = top(); // no regular fast path
635 }
636 }
637
638 // Clear the tail, if any.
639 if (tail_ctl != nullptr) {
640 Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl;
641 *ctrl = tail_ctl;
642 if (notail_ctl == nullptr) {
643 generate_clear_array(*ctrl, mem,
644 adr_type, dest,
645 init_value, raw_init_value,
646 basic_elem_type,
647 dest_tail, nullptr,
648 dest_size);
649 } else {
650 // Make a local merge.
651 Node* done_ctl = transform_later(new RegionNode(3));
652 Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type));
653 done_ctl->init_req(1, notail_ctl);
654 done_mem->init_req(1, mem->memory_at(alias_idx));
655 generate_clear_array(*ctrl, mem,
656 adr_type, dest,
657 init_value, raw_init_value,
658 basic_elem_type,
659 dest_tail, nullptr,
660 dest_size);
661 done_ctl->init_req(2, *ctrl);
662 done_mem->init_req(2, mem->memory_at(alias_idx));
663 *ctrl = done_ctl;
664 mem->set_memory_at(alias_idx, done_mem);
665 }
666 }
667 }
668
669 BasicType copy_type = basic_elem_type;
670 assert(basic_elem_type != T_ARRAY, "caller must fix this");
671 if (!(*ctrl)->is_top() && copy_type == T_OBJECT) {
672 // If src and dest have compatible element types, we can copy bits.
673 // Types S[] and D[] are compatible if D is a supertype of S.
674 //
675 // If they are not, we will use checked_oop_disjoint_arraycopy,
676 // which performs a fast optimistic per-oop check, and backs off
677 // further to JVM_ArrayCopy on the first per-oop check that fails.
678 // (Actually, we don't move raw bits only; the GC requires card marks.)
815 Node* length_minus = new SubINode(copy_length, slow_offset);
816 transform_later(length_minus);
817
818 // Tweak the node variables to adjust the code produced below:
819 src_offset = src_off_plus;
820 dest_offset = dest_off_plus;
821 copy_length = length_minus;
822 }
823 }
824 *ctrl = slow_control;
825 if (!(*ctrl)->is_top()) {
826 Node* local_ctrl = *ctrl, *local_io = slow_i_o;
827 MergeMemNode* local_mem = MergeMemNode::make(mem);
828 transform_later(local_mem);
829
830 // Generate the slow path, if needed.
831 local_mem->set_memory_at(alias_idx, slow_mem);
832
833 if (dest_needs_zeroing) {
834 generate_clear_array(local_ctrl, local_mem,
835 adr_type, dest,
836 init_value, raw_init_value,
837 basic_elem_type,
838 intcon(0), nullptr,
839 alloc->in(AllocateNode::AllocSize));
840 }
841
842 local_mem = generate_slow_arraycopy(ac,
843 &local_ctrl, local_mem, &local_io,
844 adr_type,
845 src, src_offset, dest, dest_offset,
846 copy_length, /*dest_uninitialized*/false);
847
848 result_region->init_req(slow_call_path, local_ctrl);
849 result_i_o ->init_req(slow_call_path, local_io);
850 result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx));
851 } else {
852 ShouldNotReachHere(); // no call to generate_slow_arraycopy:
853 // projections were not extracted
854 }
855
856 // Remove unused edges.
857 for (uint i = 1; i < result_region->req(); i++) {
886 // a subsequent store that would make this object accessible by
887 // other threads.
888 assert(ac->_dest_type == TypeOopPtr::BOTTOM, "non escaping destination shouldn't have narrow slice");
889 insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore, Compile::AliasIdxBot);
890 } else {
891 int mem_bar_alias_idx = Compile::AliasIdxBot;
892 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
893 // The graph was transformed under the assumption the ArrayCopy node only had an effect on a narrow slice. We can't
894 // insert a wide membar now that it's being expanded: a load that uses the input memory state of the ArrayCopy
895 // could then become anti dependent on the membar when it was not anti dependent on the ArrayCopy leading to a
896 // broken graph.
897 mem_bar_alias_idx = C->get_alias_index(ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr());
898 }
899 insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder, mem_bar_alias_idx);
900 }
901
902 assert((*ctrl)->is_Proj(), "MemBar control projection");
903 assert((*ctrl)->in(0)->isa_MemBar(), "MemBar node");
904 (*ctrl)->in(0)->isa_MemBar()->set_trailing_expanded_array_copy();
905
906 _igvn.replace_node(_callprojs->fallthrough_memproj, out_mem);
907 if (_callprojs->fallthrough_ioproj != nullptr) {
908 _igvn.replace_node(_callprojs->fallthrough_ioproj, *io);
909 }
910 _igvn.replace_node(_callprojs->fallthrough_catchproj, *ctrl);
911
912 #ifdef ASSERT
913 const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr();
914 if (dest_t->is_known_instance()) {
915 ArrayCopyNode* ac = nullptr;
916 assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost");
917 assert(ac == nullptr, "no arraycopy anymore");
918 }
919 #endif
920
921 return out_mem;
922 }
923
924 // Helper for initialization of arrays, creating a ClearArray.
925 // It writes zero bits in [start..end), within the body of an array object.
926 // The memory effects are all chained onto the 'adr_type' alias category.
927 //
928 // Since the object is otherwise uninitialized, we are free
929 // to put a little "slop" around the edges of the cleared area,
930 // as long as it does not go back into the array's header,
931 // or beyond the array end within the heap.
932 //
933 // The lower edge can be rounded down to the nearest jint and the
934 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
935 //
936 // Arguments:
937 // adr_type memory slice where writes are generated
938 // dest oop of the destination array
939 // basic_elem_type element type of the destination
940 // slice_idx array index of first element to store
941 // slice_len number of elements to store (or null)
942 // dest_size total size in bytes of the array object
943 //
944 // Exactly one of slice_len or dest_size must be non-null.
945 // If dest_size is non-null, zeroing extends to the end of the object.
946 // If slice_len is non-null, the slice_idx value must be a constant.
947 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem,
948 const TypePtr* adr_type,
949 Node* dest,
950 Node* val,
951 Node* raw_val,
952 BasicType basic_elem_type,
953 Node* slice_idx,
954 Node* slice_len,
955 Node* dest_size) {
956 // one or the other but not both of slice_len and dest_size:
957 assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, "");
958 if (slice_len == nullptr) slice_len = top();
959 if (dest_size == nullptr) dest_size = top();
960
961 uint alias_idx = C->get_alias_index(adr_type);
962
963 // operate on this memory slice:
964 Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on
965
966 // scaling and rounding of indexes:
967 int scale = exact_log2(type2aelembytes(basic_elem_type));
968 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
969 int clear_low = (-1 << scale) & (BytesPerInt - 1);
970 int bump_bit = (-1 << scale) & BytesPerInt;
971
972 // determine constant starts and ends
973 const intptr_t BIG_NEG = -128;
974 assert(BIG_NEG + 2*abase < 0, "neg enough");
975 intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG);
976 intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG);
977 if (slice_len_con == 0) {
978 return; // nothing to do here
979 }
980 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
981 intptr_t end_con = _igvn.find_intptr_t_con(dest_size, -1);
982 if (slice_idx_con >= 0 && slice_len_con >= 0) {
983 assert(end_con < 0, "not two cons");
984 end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale),
985 BytesPerLong);
986 }
987
988 if (start_con >= 0 && end_con >= 0) {
989 // Constant start and end. Simple.
990 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val,
991 start_con, end_con, &_igvn);
992 } else if (start_con >= 0 && dest_size != top()) {
993 // Constant start, pre-rounded end after the tail of the array.
994 Node* end = dest_size;
995 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val,
996 start_con, end, &_igvn);
997 } else if (start_con >= 0 && slice_len != top()) {
998 // Constant start, non-constant end. End needs rounding up.
999 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
1000 intptr_t end_base = abase + (slice_idx_con << scale);
1001 int end_round = (-1 << scale) & (BytesPerLong - 1);
1002 Node* end = ConvI2X(slice_len);
1003 if (scale != 0)
1004 end = transform_later(new LShiftXNode(end, intcon(scale) ));
1005 end_base += end_round;
1006 end = transform_later(new AddXNode(end, MakeConX(end_base)) );
1007 end = transform_later(new AndXNode(end, MakeConX(~end_round)) );
1008 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val,
1009 start_con, end, &_igvn);
1010 } else if (start_con < 0 && dest_size != top()) {
1011 // Non-constant start, pre-rounded end after the tail of the array.
1012 // This is almost certainly a "round-to-end" operation.
1013 Node* start = slice_idx;
1014 start = ConvI2X(start);
1015 if (scale != 0)
1016 start = transform_later(new LShiftXNode( start, intcon(scale) ));
1017 start = transform_later(new AddXNode(start, MakeConX(abase)) );
1018 if ((bump_bit | clear_low) != 0) {
1019 int to_clear = (bump_bit | clear_low);
1020 // Align up mod 8, then store a jint zero unconditionally
1021 // just before the mod-8 boundary.
1022 if (((abase + bump_bit) & ~to_clear) - bump_bit
1023 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
1024 bump_bit = 0;
1025 assert((abase & to_clear) == 0, "array base must be long-aligned");
1026 } else {
1027 // Bump 'start' up to (or past) the next jint boundary:
1028 start = transform_later( new AddXNode(start, MakeConX(bump_bit)) );
1029 assert((abase & clear_low) == 0, "array base must be int-aligned");
1030 }
1031 // Round bumped 'start' down to jlong boundary in body of array.
1032 start = transform_later(new AndXNode(start, MakeConX(~to_clear)) );
1033 if (bump_bit != 0) {
1034 // Store a zero to the immediately preceding jint:
1035 Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) );
1036 Node* p1 = basic_plus_adr(dest, x1);
1037 if (val == nullptr) {
1038 assert(raw_val == nullptr, "val may not be null");
1039 mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
1040 } else {
1041 assert(_igvn.type(val)->isa_narrowoop(), "should be narrow oop");
1042 mem = new StoreNNode(ctrl, mem, p1, adr_type, val, MemNode::unordered);
1043 }
1044 mem = transform_later(mem);
1045 }
1046 }
1047 Node* end = dest_size; // pre-rounded
1048 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, raw_val,
1049 start, end, &_igvn);
1050 } else {
1051 // Non-constant start, unrounded non-constant end.
1052 // (Nobody zeroes a random midsection of an array using this routine.)
1053 ShouldNotReachHere(); // fix caller
1054 }
1055
1056 // Done.
1057 merge_mem->set_memory_at(alias_idx, mem);
1058 }
1059
1060 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io,
1061 const TypePtr* adr_type,
1062 BasicType basic_elem_type,
1063 AllocateNode* alloc,
1064 Node* src, Node* src_offset,
1065 Node* dest, Node* dest_offset,
1066 Node* dest_size, bool dest_uninitialized) {
1067 // See if there is an advantage from block transfer.
1068 int scale = exact_log2(type2aelembytes(basic_elem_type));
1144 const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type();
1145 CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(),
1146 "slow_arraycopy", TypePtr::BOTTOM);
1147
1148 call->init_req(TypeFunc::Control, *ctrl);
1149 call->init_req(TypeFunc::I_O , *io);
1150 call->init_req(TypeFunc::Memory , mem);
1151 call->init_req(TypeFunc::ReturnAdr, top());
1152 call->init_req(TypeFunc::FramePtr, top());
1153 call->init_req(TypeFunc::Parms+0, src);
1154 call->init_req(TypeFunc::Parms+1, src_offset);
1155 call->init_req(TypeFunc::Parms+2, dest);
1156 call->init_req(TypeFunc::Parms+3, dest_offset);
1157 call->init_req(TypeFunc::Parms+4, copy_length);
1158 call->copy_call_debug_info(&_igvn, ac);
1159
1160 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
1161 _igvn.replace_node(ac, call);
1162 transform_later(call);
1163
1164 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/);
1165 *ctrl = _callprojs->fallthrough_catchproj->clone();
1166 transform_later(*ctrl);
1167
1168 Node* m = _callprojs->fallthrough_memproj->clone();
1169 transform_later(m);
1170
1171 uint alias_idx = C->get_alias_index(adr_type);
1172 MergeMemNode* out_mem;
1173 if (alias_idx != Compile::AliasIdxBot) {
1174 out_mem = MergeMemNode::make(mem);
1175 out_mem->set_memory_at(alias_idx, m);
1176 } else {
1177 out_mem = MergeMemNode::make(m);
1178 }
1179 transform_later(out_mem);
1180
1181 // When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj
1182 // could be nullptr. Skip clone and update nullptr fallthrough_ioproj.
1183 if (_callprojs->fallthrough_ioproj != nullptr) {
1184 *io = _callprojs->fallthrough_ioproj->clone();
1185 transform_later(*io);
1186 } else {
1187 *io = nullptr;
1188 }
1189
1190 return out_mem;
1191 }
1192
1193 // Helper function; generates code for cases requiring runtime checks.
1194 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem,
1195 const TypePtr* adr_type,
1196 Node* dest_elem_klass,
1197 Node* src, Node* src_offset,
1198 Node* dest, Node* dest_offset,
1199 Node* copy_length, bool dest_uninitialized) {
1200 if ((*ctrl)->is_top()) return nullptr;
1201
1202 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
1203 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path.
1204 return nullptr;
1296
1297 // Connecting remaining edges for exit_block coming from stub_block.
1298 if (exit_block) {
1299 exit_block->init_req(2, *ctrl);
1300
1301 // Memory edge corresponding to stub_region.
1302 result_memory->init_req(2, *mem);
1303
1304 uint alias_idx = C->get_alias_index(adr_type);
1305 if (alias_idx != Compile::AliasIdxBot) {
1306 *mem = MergeMemNode::make(*mem);
1307 (*mem)->set_memory_at(alias_idx, result_memory);
1308 } else {
1309 *mem = MergeMemNode::make(result_memory);
1310 }
1311 transform_later(*mem);
1312 *ctrl = exit_block;
1313 }
1314 }
1315
1316 const TypePtr* PhaseMacroExpand::adjust_for_flat_array(const TypeAryPtr* top_dest, Node*& src_offset,
1317 Node*& dest_offset, Node*& length, BasicType& dest_elem,
1318 Node*& dest_length) {
1319 #ifdef ASSERT
1320 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1321 bool needs_barriers = top_dest->elem()->inline_klass()->contains_oops() &&
1322 bs->array_copy_requires_gc_barriers(dest_length != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization);
1323 assert(!needs_barriers || StressReflectiveCode, "Flat arracopy would require GC barriers");
1324 #endif
1325 int elem_size = top_dest->flat_elem_size();
1326 if (elem_size >= 8) {
1327 if (elem_size > 8) {
1328 // treat as array of long but scale length, src offset and dest offset
1329 assert((elem_size % 8) == 0, "not a power of 2?");
1330 int factor = elem_size / 8;
1331 length = transform_later(new MulINode(length, intcon(factor)));
1332 src_offset = transform_later(new MulINode(src_offset, intcon(factor)));
1333 dest_offset = transform_later(new MulINode(dest_offset, intcon(factor)));
1334 if (dest_length != nullptr) {
1335 dest_length = transform_later(new MulINode(dest_length, intcon(factor)));
1336 }
1337 elem_size = 8;
1338 }
1339 dest_elem = T_LONG;
1340 } else if (elem_size == 4) {
1341 dest_elem = T_INT;
1342 } else if (elem_size == 2) {
1343 dest_elem = T_CHAR;
1344 } else if (elem_size == 1) {
1345 dest_elem = T_BYTE;
1346 } else {
1347 ShouldNotReachHere();
1348 }
1349 return TypeRawPtr::BOTTOM;
1350 }
1351
1352 #undef XTOP
1353
1354 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
1355 Node* ctrl = ac->in(TypeFunc::Control);
1356 Node* io = ac->in(TypeFunc::I_O);
1357 Node* src = ac->in(ArrayCopyNode::Src);
1358 Node* src_offset = ac->in(ArrayCopyNode::SrcPos);
1359 Node* dest = ac->in(ArrayCopyNode::Dest);
1360 Node* dest_offset = ac->in(ArrayCopyNode::DestPos);
1361 Node* length = ac->in(ArrayCopyNode::Length);
1362 MergeMemNode* merge_mem = nullptr;
1363
1364 if (ac->is_clonebasic()) {
1365 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1366 bs->clone_at_expansion(this, ac);
1367 return;
1368 } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_clone_oop_array()) {
1369 const Type* src_type = _igvn.type(src);
1370 const Type* dest_type = _igvn.type(dest);
1371 const TypeAryPtr* top_src = src_type->isa_aryptr();
1372 // Note: The destination could have type Object (i.e. non-array) when directly invoking the protected method
1373 // Object::clone() with reflection on a declared Object that is an array at runtime. top_dest is then null.
1374 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
1375 BasicType dest_elem = T_OBJECT;
1376 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) {
1377 dest_elem = top_dest->elem()->array_element_basic_type();
1378 }
1379 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
1380
1381 if (top_src != nullptr && top_src->is_flat()) {
1382 // If src is flat, dest is guaranteed to be flat as well
1383 top_dest = top_src;
1384 }
1385
1386 AllocateArrayNode* alloc = nullptr;
1387 Node* dest_length = nullptr;
1388 if (ac->is_alloc_tightly_coupled()) {
1389 alloc = AllocateArrayNode::Ideal_array_allocation(dest);
1390 assert(alloc != nullptr, "expect alloc");
1391 dest_length = alloc->in(AllocateNode::ALength);
1392 }
1393
1394 Node* mem = ac->in(TypeFunc::Memory);
1395 const TypePtr* adr_type = nullptr;
1396 if (top_dest != nullptr && top_dest->is_flat()) {
1397 assert(dest_length != nullptr || StressReflectiveCode, "must be tightly coupled");
1398 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier
1399 // on all slices to prevent writes into the source from floating below the arraycopy.
1400 int mem_bar_alias_idx = Compile::AliasIdxBot;
1401 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1402 mem_bar_alias_idx = C->get_alias_index(ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr());
1403 }
1404 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder, mem_bar_alias_idx);
1405 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length);
1406 } else {
1407 adr_type = dest_type->is_oopptr()->add_offset(Type::OffsetBot);
1408 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1409 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1410 }
1411 if (ac->_src_type != ac->_dest_type) {
1412 adr_type = TypeRawPtr::BOTTOM;
1413 }
1414 }
1415 merge_mem = MergeMemNode::make(mem);
1416 transform_later(merge_mem);
1417
1418 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1419 adr_type, dest_elem,
1420 src, src_offset, dest, dest_offset, length,
1421 dest_length,
1422 true, ac->has_negative_length_guard());
1423
1424 return;
1425 }
1426
1427 AllocateArrayNode* alloc = nullptr;
1428 if (ac->is_alloc_tightly_coupled()) {
1429 alloc = AllocateArrayNode::Ideal_array_allocation(dest);
1430 assert(alloc != nullptr, "expect alloc");
1431 }
1432
1433 assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy");
1434
1435 // Compile time checks. If any of these checks cannot be verified at compile time,
1436 // we do not make a fast path for this call. Instead, we let the call remain as it
1437 // is. The checks we choose to mandate at compile time are:
1438 //
1439 // (1) src and dest are arrays.
1440 const Type* src_type = src->Value(&_igvn);
1441 const Type* dest_type = dest->Value(&_igvn);
1442 const TypeAryPtr* top_src = src_type->isa_aryptr();
1443 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
1444
1445 BasicType src_elem = T_CONFLICT;
1446 BasicType dest_elem = T_CONFLICT;
1447
1448 if (top_src != nullptr && top_src->elem() != Type::BOTTOM) {
1449 src_elem = top_src->elem()->array_element_basic_type();
1450 }
1451 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) {
1452 dest_elem = top_dest->elem()->array_element_basic_type();
1453 }
1454 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT;
1455 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
1456
1457 if (ac->is_arraycopy_validated() && dest_elem != T_CONFLICT && src_elem == T_CONFLICT) {
1458 src_elem = dest_elem;
1459 }
1460
1461 if (src_elem == T_CONFLICT || dest_elem == T_CONFLICT) {
1462 // Conservatively insert a memory barrier on all memory slices.
1463 // Do not let writes into the source float below the arraycopy.
1464 {
1465 Node* mem = ac->in(TypeFunc::Memory);
1466 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder, Compile::AliasIdxBot);
1467
1468 merge_mem = MergeMemNode::make(mem);
1469 transform_later(merge_mem);
1470 }
1471
1472 // Call StubRoutines::generic_arraycopy stub.
1473 generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io,
1474 TypeRawPtr::BOTTOM, T_CONFLICT,
1475 src, src_offset, dest, dest_offset, length,
1476 nullptr,
1477 // If a negative length guard was generated for the ArrayCopyNode,
1478 // the length of the array can never be negative.
1479 false, ac->has_negative_length_guard());
1480 return;
1481 }
1482
1483 assert(!ac->is_arraycopy_validated() || (src_elem == dest_elem && dest_elem != T_VOID), "validated but different basic types");
1484
1485 // (2) src and dest arrays must have elements of the same BasicType
1486 // Figure out the size and type of the elements we will be copying.
1487 //
1488 // We have no stub to copy flat inline type arrays with oop
1489 // fields if we need to emit write barriers.
1490 //
1491 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1492 if (src_elem != dest_elem || top_src->is_flat() != top_dest->is_flat() || dest_elem == T_VOID ||
1493 (top_src->is_flat() && top_dest->elem()->inline_klass()->contains_oops() &&
1494 bs->array_copy_requires_gc_barriers(alloc != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization))) {
1495 // The component types are not the same or are not recognized. Punt.
1496 // (But, avoid the native method wrapper to JVM_ArrayCopy.)
1497 {
1498 Node* mem = ac->in(TypeFunc::Memory);
1499 merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false);
1500 }
1501
1502 _igvn.replace_node(_callprojs->fallthrough_memproj, merge_mem);
1503 if (_callprojs->fallthrough_ioproj != nullptr) {
1504 _igvn.replace_node(_callprojs->fallthrough_ioproj, io);
1505 }
1506 _igvn.replace_node(_callprojs->fallthrough_catchproj, ctrl);
1507 return;
1508 }
1509
1510 //---------------------------------------------------------------------------
1511 // We will make a fast path for this call to arraycopy.
1512
1513 // We have the following tests left to perform:
1514 //
1515 // (3) src and dest must not be null.
1516 // (4) src_offset must not be negative.
1517 // (5) dest_offset must not be negative.
1518 // (6) length must not be negative.
1519 // (7) src_offset + length must not exceed length of src.
1520 // (8) dest_offset + length must not exceed length of dest.
1521 // (9) each element of an oop array must be assignable
1522
1523 Node* mem = ac->in(TypeFunc::Memory);
1524 if (top_dest->is_flat()) {
1525 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier
1526 // on all slices to prevent writes into the source from floating below the arraycopy.
1527 int mem_bar_alias_idx = Compile::AliasIdxBot;
1528 if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1529 mem_bar_alias_idx = C->get_alias_index(ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr());
1530 }
1531 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder, mem_bar_alias_idx);
1532 }
1533 merge_mem = MergeMemNode::make(mem);
1534 transform_later(merge_mem);
1535
1536 RegionNode* slow_region = new RegionNode(1);
1537 transform_later(slow_region);
1538
1539 if (!ac->is_arraycopy_validated()) {
1540 // (3) operands must not be null
1541 // We currently perform our null checks with the null_check routine.
1542 // This means that the null exceptions will be reported in the caller
1543 // rather than (correctly) reported inside of the native arraycopy call.
1544 // This should be corrected, given time. We do our null check with the
1545 // stack pointer restored.
1546 // null checks done library_call.cpp
1547
1548 // (4) src_offset must not be negative.
1549 generate_negative_guard(&ctrl, src_offset, slow_region);
1550
1551 // (5) dest_offset must not be negative.
1552 generate_negative_guard(&ctrl, dest_offset, slow_region);
1553
1554 // (6) length must not be negative (moved to generate_arraycopy()).
1555 // generate_negative_guard(length, slow_region);
1556
1557 // (7) src_offset + length must not exceed length of src.
1558 Node* alen = ac->in(ArrayCopyNode::SrcLen);
1559 assert(alen != nullptr, "need src len");
1560 generate_limit_guard(&ctrl,
1561 src_offset, length,
1562 alen,
1563 slow_region);
1564
1565 // (8) dest_offset + length must not exceed length of dest.
1566 alen = ac->in(ArrayCopyNode::DestLen);
1567 assert(alen != nullptr, "need dest len");
1568 generate_limit_guard(&ctrl,
1569 dest_offset, length,
1570 alen,
1571 slow_region);
1572
1573 // (9) each element of an oop array must be assignable
1574 // The generate_arraycopy subroutine checks this.
1575
1576 // TODO 8350865 Fix below logic. Also handle atomicity.
1577 // We need to be careful here because 'adjust_for_flat_array' will adjust offsets/length etc. which then does not work anymore for the slow call to SharedRuntime::slow_arraycopy_C.
1578 if (!(top_src->is_flat() && top_dest->is_flat())) {
1579 generate_flat_array_guard(&ctrl, src, merge_mem, slow_region);
1580 generate_flat_array_guard(&ctrl, dest, merge_mem, slow_region);
1581 }
1582
1583 // Handle inline type arrays
1584 if (!top_src->is_flat()) {
1585 if (UseArrayFlattening && !top_src->is_not_flat()) {
1586 // Src might be flat and dest might not be flat. Go to the slow path if src is flat.
1587 generate_flat_array_guard(&ctrl, src, merge_mem, slow_region);
1588 }
1589 if (EnableValhalla) {
1590 // No validation. The subtype check emitted at macro expansion time will not go to the slow
1591 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays.
1592 generate_null_free_array_guard(&ctrl, dest, merge_mem, slow_region);
1593 }
1594 } else {
1595 assert(top_dest->is_flat(), "dest array must be flat");
1596 }
1597 }
1598
1599 // This is where the memory effects are placed:
1600 const TypePtr* adr_type = nullptr;
1601 Node* dest_length = (alloc != nullptr) ? alloc->in(AllocateNode::ALength) : nullptr;
1602
1603 if (top_src->is_flat() && top_dest->is_flat()) {
1604 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length);
1605 } else if (ac->_dest_type != TypeOopPtr::BOTTOM) {
1606 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr();
1607 } else {
1608 adr_type = TypeAryPtr::get_array_body_type(dest_elem);
1609 }
1610
1611 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io,
1612 adr_type, dest_elem,
1613 src, src_offset, dest, dest_offset, length,
1614 dest_length,
1615 // If a negative length guard was generated for the ArrayCopyNode,
1616 // the length of the array can never be negative.
1617 false, ac->has_negative_length_guard(),
1618 slow_region);
1619 }
|