1 /* 2 * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/tlab_globals.hpp" 29 #include "opto/arraycopynode.hpp" 30 #include "oops/objArrayKlass.hpp" 31 #include "opto/convertnode.hpp" 32 #include "opto/vectornode.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/macro.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/castnode.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 41 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) { 42 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); 43 mb->init_req(TypeFunc::Control, *ctrl); 44 mb->init_req(TypeFunc::Memory, *mem); 45 transform_later(mb); 46 *ctrl = new ProjNode(mb,TypeFunc::Control); 47 transform_later(*ctrl); 48 Node* mem_proj = new ProjNode(mb,TypeFunc::Memory); 49 transform_later(mem_proj); 50 *mem = mem_proj; 51 } 52 53 Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) { 54 uint shift = exact_log2(type2aelembytes(elembt)); 55 uint header = arrayOopDesc::base_offset_in_bytes(elembt); 56 Node* base = basic_plus_adr(ary, header); 57 #ifdef _LP64 58 // see comment in GraphKit::array_element_address 59 int index_max = max_jint - 1; // array size is max_jint, index is one less 60 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); 61 idx = transform_later( new ConvI2LNode(idx, lidxtype) ); 62 #endif 63 Node* scale = new LShiftXNode(idx, intcon(shift)); 64 transform_later(scale); 65 return basic_plus_adr(ary, base, scale); 66 } 67 68 Node* PhaseMacroExpand::ConvI2L(Node* offset) { 69 return transform_later(new ConvI2LNode(offset)); 70 } 71 72 Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem, 73 const TypeFunc* call_type, address call_addr, 74 const char* call_name, 75 const TypePtr* adr_type, 76 Node* parm0, Node* parm1, 77 Node* parm2, Node* parm3, 78 Node* parm4, Node* parm5, 79 Node* parm6, Node* parm7) { 80 Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); 81 call->init_req(TypeFunc::Control, ctrl); 82 call->init_req(TypeFunc::I_O , top()); 83 call->init_req(TypeFunc::Memory , mem); 84 call->init_req(TypeFunc::ReturnAdr, top()); 85 call->init_req(TypeFunc::FramePtr, top()); 86 87 // Hook each parm in order. Stop looking at the first null. 88 if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0); 89 if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1); 90 if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2); 91 if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3); 92 if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4); 93 if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5); 94 if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6); 95 if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7); 96 /* close each nested if ===> */ } } } } } } } } 97 assert(call->in(call->req()-1) != nullptr, "must initialize all parms"); 98 99 return call; 100 } 101 102 103 //------------------------------generate_guard--------------------------- 104 // Helper function for generating guarded fast-slow graph structures. 105 // The given 'test', if true, guards a slow path. If the test fails 106 // then a fast path can be taken. (We generally hope it fails.) 107 // In all cases, GraphKit::control() is updated to the fast path. 108 // The returned value represents the control for the slow path. 109 // The return value is never 'top'; it is either a valid control 110 // or null if it is obvious that the slow path can never be taken. 111 // Also, if region and the slow control are not null, the slow edge 112 // is appended to the region. 113 Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) { 114 if ((*ctrl)->is_top()) { 115 // Already short circuited. 116 return nullptr; 117 } 118 // Build an if node and its projections. 119 // If test is true we take the slow path, which we assume is uncommon. 120 if (_igvn.type(test) == TypeInt::ZERO) { 121 // The slow branch is never taken. No need to build this guard. 122 return nullptr; 123 } 124 125 IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN); 126 transform_later(iff); 127 128 Node* if_slow = new IfTrueNode(iff); 129 transform_later(if_slow); 130 131 if (region != nullptr) { 132 region->add_req(if_slow); 133 } 134 135 Node* if_fast = new IfFalseNode(iff); 136 transform_later(if_fast); 137 138 *ctrl = if_fast; 139 140 return if_slow; 141 } 142 143 Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) { 144 return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3)); 145 } 146 147 inline Node* PhaseMacroExpand::generate_fair_guard(Node** ctrl, Node* test, RegionNode* region) { 148 return generate_guard(ctrl, test, region, PROB_FAIR); 149 } 150 151 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) { 152 if ((*ctrl)->is_top()) 153 return; // already stopped 154 if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] 155 return; // index is already adequately typed 156 Node* cmp_lt = new CmpINode(index, intcon(0)); 157 transform_later(cmp_lt); 158 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt); 159 transform_later(bol_lt); 160 generate_guard(ctrl, bol_lt, region, PROB_MIN); 161 } 162 163 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) { 164 if ((*ctrl)->is_top()) 165 return; // already stopped 166 bool zero_offset = _igvn.type(offset) == TypeInt::ZERO; 167 if (zero_offset && subseq_length->eqv_uncast(array_length)) 168 return; // common case of whole-array copy 169 Node* last = subseq_length; 170 if (!zero_offset) { // last += offset 171 last = new AddINode(last, offset); 172 transform_later(last); 173 } 174 Node* cmp_lt = new CmpUNode(array_length, last); 175 transform_later(cmp_lt); 176 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt); 177 transform_later(bol_lt); 178 generate_guard(ctrl, bol_lt, region, PROB_MIN); 179 } 180 181 // 182 // Partial in-lining handling for smaller conjoint/disjoint array copies having 183 // length(in bytes) less than ArrayOperationPartialInlineSize. 184 // if (length <= ArrayOperationPartialInlineSize) { 185 // partial_inlining_block: 186 // mask = Mask_Gen 187 // vload = LoadVectorMasked src , mask 188 // StoreVectorMasked dst, mask, vload 189 // } else { 190 // stub_block: 191 // callstub array_copy 192 // } 193 // exit_block: 194 // Phi = label partial_inlining_block:mem , label stub_block:mem (filled by caller) 195 // mem = MergeMem (Phi) 196 // control = stub_block 197 // 198 // Exit_block and associated phi(memory) are partially initialized for partial_in-lining_block 199 // edges. Remaining edges for exit_block coming from stub_block are connected by the caller 200 // post stub nodes creation. 201 // 202 203 void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type, 204 RegionNode** exit_block, Node** result_memory, Node* length, 205 Node* src_start, Node* dst_start, BasicType type) { 206 const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr(); 207 Node* inline_block = nullptr; 208 Node* stub_block = nullptr; 209 210 int const_len = -1; 211 const TypeInt* lty = nullptr; 212 uint shift = exact_log2(type2aelembytes(type)); 213 if (length->Opcode() == Op_ConvI2L) { 214 lty = _igvn.type(length->in(1))->isa_int(); 215 } else { 216 lty = _igvn.type(length)->isa_int(); 217 } 218 if (lty && lty->is_con()) { 219 const_len = lty->get_con() << shift; 220 } 221 222 // Return if copy length is greater than partial inline size limit or 223 // target does not supports masked load/stores. 224 int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, const_len); 225 if ( const_len > ArrayOperationPartialInlineSize || 226 !Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) || 227 !Matcher::match_rule_supported_vector(Op_StoreVectorMasked, lane_count, type) || 228 !Matcher::match_rule_supported_vector(Op_VectorMaskGen, lane_count, type)) { 229 return; 230 } 231 232 int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type); 233 Node* casted_length = new CastLLNode(*ctrl, length, TypeLong::make(0, inline_limit, Type::WidenMin)); 234 transform_later(casted_length); 235 Node* copy_bytes = new LShiftXNode(length, intcon(shift)); 236 transform_later(copy_bytes); 237 238 Node* cmp_le = new CmpULNode(copy_bytes, longcon(ArrayOperationPartialInlineSize)); 239 transform_later(cmp_le); 240 Node* bol_le = new BoolNode(cmp_le, BoolTest::le); 241 transform_later(bol_le); 242 inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR); 243 stub_block = *ctrl; 244 245 Node* mask_gen = VectorMaskGenNode::make(casted_length, type); 246 transform_later(mask_gen); 247 248 unsigned vec_size = lane_count * type2aelembytes(type); 249 if (C->max_vector_size() < vec_size) { 250 C->set_max_vector_size(vec_size); 251 } 252 253 const TypeVect * vt = TypeVect::make(type, lane_count); 254 Node* mm = (*mem)->memory_at(C->get_alias_index(src_adr_type)); 255 Node* masked_load = new LoadVectorMaskedNode(inline_block, mm, src_start, 256 src_adr_type, vt, mask_gen); 257 transform_later(masked_load); 258 259 mm = (*mem)->memory_at(C->get_alias_index(adr_type)); 260 Node* masked_store = new StoreVectorMaskedNode(inline_block, mm, dst_start, 261 masked_load, adr_type, mask_gen); 262 transform_later(masked_store); 263 264 // Convergence region for inline_block and stub_block. 265 *exit_block = new RegionNode(3); 266 transform_later(*exit_block); 267 (*exit_block)->init_req(1, inline_block); 268 *result_memory = new PhiNode(*exit_block, Type::MEMORY, adr_type); 269 transform_later(*result_memory); 270 (*result_memory)->init_req(1, masked_store); 271 272 *ctrl = stub_block; 273 } 274 275 276 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) { 277 if ((*ctrl)->is_top()) return nullptr; 278 279 if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] 280 return nullptr; // index is already adequately typed 281 Node* cmp_le = new CmpINode(index, intcon(0)); 282 transform_later(cmp_le); 283 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); 284 Node* bol_le = new BoolNode(cmp_le, le_or_eq); 285 transform_later(bol_le); 286 Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN); 287 288 return is_notp; 289 } 290 291 Node* PhaseMacroExpand::array_lh_test(Node* array, jint mask) { 292 Node* klass_adr = basic_plus_adr(array, oopDesc::klass_offset_in_bytes()); 293 Node* klass = transform_later(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); 294 Node* lh_addr = basic_plus_adr(klass, in_bytes(Klass::layout_helper_offset())); 295 Node* lh_val = _igvn.transform(LoadNode::make(_igvn, nullptr, C->immutable_memory(), lh_addr, lh_addr->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered)); 296 Node* masked = transform_later(new AndINode(lh_val, intcon(mask))); 297 Node* cmp = transform_later(new CmpINode(masked, intcon(0))); 298 return transform_later(new BoolNode(cmp, BoolTest::ne)); 299 } 300 301 Node* PhaseMacroExpand::generate_flat_array_guard(Node** ctrl, Node* array, RegionNode* region) { 302 assert(UseFlatArray, "can never be flat"); 303 return generate_fair_guard(ctrl, array_lh_test(array, Klass::_lh_array_tag_flat_value_bit_inplace), region); 304 } 305 306 Node* PhaseMacroExpand::generate_null_free_array_guard(Node** ctrl, Node* array, RegionNode* region) { 307 assert(EnableValhalla, "can never be null free"); 308 return generate_fair_guard(ctrl, array_lh_test(array, Klass::_lh_null_free_array_bit_inplace), region); 309 } 310 311 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) { 312 transform_later(call); 313 314 *ctrl = new ProjNode(call,TypeFunc::Control); 315 transform_later(*ctrl); 316 Node* newmem = new ProjNode(call, TypeFunc::Memory); 317 transform_later(newmem); 318 319 uint alias_idx = C->get_alias_index(adr_type); 320 if (alias_idx != Compile::AliasIdxBot) { 321 *mem = MergeMemNode::make(*mem); 322 (*mem)->set_memory_at(alias_idx, newmem); 323 } else { 324 *mem = MergeMemNode::make(newmem); 325 } 326 transform_later(*mem); 327 } 328 329 address PhaseMacroExpand::basictype2arraycopy(BasicType t, 330 Node* src_offset, 331 Node* dest_offset, 332 bool disjoint_bases, 333 const char* &name, 334 bool dest_uninitialized) { 335 const TypeInt* src_offset_inttype = _igvn.find_int_type(src_offset); 336 const TypeInt* dest_offset_inttype = _igvn.find_int_type(dest_offset); 337 338 bool aligned = false; 339 bool disjoint = disjoint_bases; 340 341 // if the offsets are the same, we can treat the memory regions as 342 // disjoint, because either the memory regions are in different arrays, 343 // or they are identical (which we can treat as disjoint.) We can also 344 // treat a copy with a destination index less that the source index 345 // as disjoint since a low->high copy will work correctly in this case. 346 if (src_offset_inttype != nullptr && src_offset_inttype->is_con() && 347 dest_offset_inttype != nullptr && dest_offset_inttype->is_con()) { 348 // both indices are constants 349 int s_offs = src_offset_inttype->get_con(); 350 int d_offs = dest_offset_inttype->get_con(); 351 int element_size = type2aelembytes(t); 352 aligned = ((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) && 353 ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0); 354 if (s_offs >= d_offs) disjoint = true; 355 } else if (src_offset == dest_offset && src_offset != nullptr) { 356 // This can occur if the offsets are identical non-constants. 357 disjoint = true; 358 } 359 360 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); 361 } 362 363 #define XTOP LP64_ONLY(COMMA top()) 364 365 // Generate an optimized call to arraycopy. 366 // Caller must guard against non-arrays. 367 // Caller must determine a common array basic-type for both arrays. 368 // Caller must validate offsets against array bounds. 369 // The slow_region has already collected guard failure paths 370 // (such as out of bounds length or non-conformable array types). 371 // The generated code has this shape, in general: 372 // 373 // if (length == 0) return // via zero_path 374 // slowval = -1 375 // if (types unknown) { 376 // slowval = call generic copy loop 377 // if (slowval == 0) return // via checked_path 378 // } else if (indexes in bounds) { 379 // if ((is object array) && !(array type check)) { 380 // slowval = call checked copy loop 381 // if (slowval == 0) return // via checked_path 382 // } else { 383 // call bulk copy loop 384 // return // via fast_path 385 // } 386 // } 387 // // adjust params for remaining work: 388 // if (slowval != -1) { 389 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n 390 // } 391 // slow_region: 392 // call slow arraycopy(src, src_offset, dest, dest_offset, length) 393 // return // via slow_call_path 394 // 395 // This routine is used from several intrinsics: System.arraycopy, 396 // Object.clone (the array subcase), and Arrays.copyOf[Range]. 397 // 398 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc, 399 Node** ctrl, MergeMemNode* mem, Node** io, 400 const TypePtr* adr_type, 401 BasicType basic_elem_type, 402 Node* src, Node* src_offset, 403 Node* dest, Node* dest_offset, 404 Node* copy_length, 405 Node* dest_length, 406 bool disjoint_bases, 407 bool length_never_negative, 408 RegionNode* slow_region) { 409 if (slow_region == nullptr) { 410 slow_region = new RegionNode(1); 411 transform_later(slow_region); 412 } 413 414 Node* original_dest = dest; 415 bool dest_needs_zeroing = false; 416 bool acopy_to_uninitialized = false; 417 Node* default_value = nullptr; 418 Node* raw_default_value = nullptr; 419 420 // See if this is the initialization of a newly-allocated array. 421 // If so, we will take responsibility here for initializing it to zero. 422 // (Note: Because tightly_coupled_allocation performs checks on the 423 // out-edges of the dest, we need to avoid making derived pointers 424 // from it until we have checked its uses.) 425 if (ReduceBulkZeroing 426 && !(UseTLAB && ZeroTLAB) // pointless if already zeroed 427 && basic_elem_type != T_CONFLICT // avoid corner case 428 && !src->eqv_uncast(dest) 429 && alloc != nullptr 430 && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) { 431 assert(ac->is_alloc_tightly_coupled(), "sanity"); 432 // acopy to uninitialized tightly coupled allocations 433 // needs zeroing outside the copy range 434 // and the acopy itself will be to uninitialized memory 435 acopy_to_uninitialized = true; 436 if (alloc->maybe_set_complete(&_igvn)) { 437 // "You break it, you buy it." 438 InitializeNode* init = alloc->initialization(); 439 assert(init->is_complete(), "we just did this"); 440 init->set_complete_with_arraycopy(); 441 assert(dest->is_CheckCastPP(), "sanity"); 442 assert(dest->in(0)->in(0) == init, "dest pinned"); 443 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory 444 // From this point on, every exit path is responsible for 445 // initializing any non-copied parts of the object to zero. 446 // Also, if this flag is set we make sure that arraycopy interacts properly 447 // with G1, eliding pre-barriers. See CR 6627983. 448 dest_needs_zeroing = true; 449 default_value = alloc->in(AllocateNode::DefaultValue); 450 raw_default_value = alloc->in(AllocateNode::RawDefaultValue); 451 } else { 452 // dest_need_zeroing = false; 453 } 454 } else { 455 // No zeroing elimination needed here. 456 alloc = nullptr; 457 acopy_to_uninitialized = false; 458 //original_dest = dest; 459 //dest_needs_zeroing = false; 460 } 461 462 uint alias_idx = C->get_alias_index(adr_type); 463 464 // Results are placed here: 465 enum { fast_path = 1, // normal void-returning assembly stub 466 checked_path = 2, // special assembly stub with cleanup 467 slow_call_path = 3, // something went wrong; call the VM 468 zero_path = 4, // bypass when length of copy is zero 469 bcopy_path = 5, // copy primitive array by 64-bit blocks 470 PATH_LIMIT = 6 471 }; 472 RegionNode* result_region = new RegionNode(PATH_LIMIT); 473 PhiNode* result_i_o = new PhiNode(result_region, Type::ABIO); 474 PhiNode* result_memory = new PhiNode(result_region, Type::MEMORY, adr_type); 475 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice"); 476 transform_later(result_region); 477 transform_later(result_i_o); 478 transform_later(result_memory); 479 480 // The slow_control path: 481 Node* slow_control; 482 Node* slow_i_o = *io; 483 Node* slow_mem = mem->memory_at(alias_idx); 484 DEBUG_ONLY(slow_control = (Node*) badAddress); 485 486 // Checked control path: 487 Node* checked_control = top(); 488 Node* checked_mem = nullptr; 489 Node* checked_i_o = nullptr; 490 Node* checked_value = nullptr; 491 492 if (basic_elem_type == T_CONFLICT) { 493 assert(!dest_needs_zeroing, ""); 494 Node* cv = generate_generic_arraycopy(ctrl, &mem, 495 adr_type, 496 src, src_offset, dest, dest_offset, 497 copy_length, acopy_to_uninitialized); 498 if (cv == nullptr) cv = intcon(-1); // failure (no stub available) 499 checked_control = *ctrl; 500 checked_i_o = *io; 501 checked_mem = mem->memory_at(alias_idx); 502 checked_value = cv; 503 *ctrl = top(); 504 } 505 506 Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative); 507 if (not_pos != nullptr) { 508 Node* local_ctrl = not_pos, *local_io = *io; 509 MergeMemNode* local_mem = MergeMemNode::make(mem); 510 transform_later(local_mem); 511 512 // (6) length must not be negative. 513 if (!length_never_negative) { 514 generate_negative_guard(&local_ctrl, copy_length, slow_region); 515 } 516 517 // copy_length is 0. 518 if (dest_needs_zeroing) { 519 assert(!local_ctrl->is_top(), "no ctrl?"); 520 if (copy_length->eqv_uncast(dest_length) 521 || _igvn.find_int_con(dest_length, 1) <= 0) { 522 // There is no zeroing to do. No need for a secondary raw memory barrier. 523 } else { 524 // Clear the whole thing since there are no source elements to copy. 525 generate_clear_array(local_ctrl, local_mem, 526 adr_type, dest, 527 default_value, raw_default_value, 528 basic_elem_type, 529 intcon(0), nullptr, 530 alloc->in(AllocateNode::AllocSize)); 531 // Use a secondary InitializeNode as raw memory barrier. 532 // Currently it is needed only on this path since other 533 // paths have stub or runtime calls as raw memory barriers. 534 MemBarNode* mb = MemBarNode::make(C, Op_Initialize, 535 Compile::AliasIdxRaw, 536 top()); 537 transform_later(mb); 538 mb->set_req(TypeFunc::Control,local_ctrl); 539 mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw)); 540 local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control)); 541 local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory))); 542 543 InitializeNode* init = mb->as_Initialize(); 544 init->set_complete(&_igvn); // (there is no corresponding AllocateNode) 545 } 546 } 547 548 // Present the results of the fast call. 549 result_region->init_req(zero_path, local_ctrl); 550 result_i_o ->init_req(zero_path, local_io); 551 result_memory->init_req(zero_path, local_mem->memory_at(alias_idx)); 552 } 553 554 if (!(*ctrl)->is_top() && dest_needs_zeroing) { 555 // We have to initialize the *uncopied* part of the array to zero. 556 // The copy destination is the slice dest[off..off+len]. The other slices 557 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 558 Node* dest_size = alloc->in(AllocateNode::AllocSize); 559 Node* dest_tail = transform_later( new AddINode(dest_offset, copy_length)); 560 561 // If there is a head section that needs zeroing, do it now. 562 if (_igvn.find_int_con(dest_offset, -1) != 0) { 563 generate_clear_array(*ctrl, mem, 564 adr_type, dest, 565 default_value, raw_default_value, 566 basic_elem_type, 567 intcon(0), dest_offset, 568 nullptr); 569 } 570 571 // Next, perform a dynamic check on the tail length. 572 // It is often zero, and we can win big if we prove this. 573 // There are two wins: Avoid generating the ClearArray 574 // with its attendant messy index arithmetic, and upgrade 575 // the copy to a more hardware-friendly word size of 64 bits. 576 Node* tail_ctl = nullptr; 577 if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) { 578 Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) ); 579 Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) ); 580 tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr); 581 assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome"); 582 } 583 584 // At this point, let's assume there is no tail. 585 if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) { 586 // There is no tail. Try an upgrade to a 64-bit copy. 587 bool didit = false; 588 { 589 Node* local_ctrl = *ctrl, *local_io = *io; 590 MergeMemNode* local_mem = MergeMemNode::make(mem); 591 transform_later(local_mem); 592 593 didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io, 594 adr_type, basic_elem_type, alloc, 595 src, src_offset, dest, dest_offset, 596 dest_size, acopy_to_uninitialized); 597 if (didit) { 598 // Present the results of the block-copying fast call. 599 result_region->init_req(bcopy_path, local_ctrl); 600 result_i_o ->init_req(bcopy_path, local_io); 601 result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx)); 602 } 603 } 604 if (didit) { 605 *ctrl = top(); // no regular fast path 606 } 607 } 608 609 // Clear the tail, if any. 610 if (tail_ctl != nullptr) { 611 Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl; 612 *ctrl = tail_ctl; 613 if (notail_ctl == nullptr) { 614 generate_clear_array(*ctrl, mem, 615 adr_type, dest, 616 default_value, raw_default_value, 617 basic_elem_type, 618 dest_tail, nullptr, 619 dest_size); 620 } else { 621 // Make a local merge. 622 Node* done_ctl = transform_later(new RegionNode(3)); 623 Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type)); 624 done_ctl->init_req(1, notail_ctl); 625 done_mem->init_req(1, mem->memory_at(alias_idx)); 626 generate_clear_array(*ctrl, mem, 627 adr_type, dest, 628 default_value, raw_default_value, 629 basic_elem_type, 630 dest_tail, nullptr, 631 dest_size); 632 done_ctl->init_req(2, *ctrl); 633 done_mem->init_req(2, mem->memory_at(alias_idx)); 634 *ctrl = done_ctl; 635 mem->set_memory_at(alias_idx, done_mem); 636 } 637 } 638 } 639 640 BasicType copy_type = basic_elem_type; 641 assert(basic_elem_type != T_ARRAY, "caller must fix this"); 642 if (!(*ctrl)->is_top() && copy_type == T_OBJECT) { 643 // If src and dest have compatible element types, we can copy bits. 644 // Types S[] and D[] are compatible if D is a supertype of S. 645 // 646 // If they are not, we will use checked_oop_disjoint_arraycopy, 647 // which performs a fast optimistic per-oop check, and backs off 648 // further to JVM_ArrayCopy on the first per-oop check that fails. 649 // (Actually, we don't move raw bits only; the GC requires card marks.) 650 651 // We don't need a subtype check for validated copies and Object[].clone() 652 bool skip_subtype_check = ac->is_arraycopy_validated() || ac->is_copyof_validated() || 653 ac->is_copyofrange_validated() || ac->is_clone_oop_array(); 654 if (!skip_subtype_check) { 655 // Get the klass* for both src and dest 656 Node* src_klass = ac->in(ArrayCopyNode::SrcKlass); 657 Node* dest_klass = ac->in(ArrayCopyNode::DestKlass); 658 659 assert(src_klass != nullptr && dest_klass != nullptr, "should have klasses"); 660 661 // Generate the subtype check. 662 // This might fold up statically, or then again it might not. 663 // 664 // Non-static example: Copying List<String>.elements to a new String[]. 665 // The backing store for a List<String> is always an Object[], 666 // but its elements are always type String, if the generic types 667 // are correct at the source level. 668 // 669 // Test S[] against D[], not S against D, because (probably) 670 // the secondary supertype cache is less busy for S[] than S. 671 // This usually only matters when D is an interface. 672 Node* not_subtype_ctrl = Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, _igvn); 673 // Plug failing path into checked_oop_disjoint_arraycopy 674 if (not_subtype_ctrl != top()) { 675 Node* local_ctrl = not_subtype_ctrl; 676 MergeMemNode* local_mem = MergeMemNode::make(mem); 677 transform_later(local_mem); 678 679 // (At this point we can assume disjoint_bases, since types differ.) 680 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 681 Node* p1 = basic_plus_adr(dest_klass, ek_offset); 682 Node* n1 = LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); 683 Node* dest_elem_klass = transform_later(n1); 684 Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem, 685 adr_type, 686 dest_elem_klass, 687 src, src_offset, dest, dest_offset, 688 ConvI2X(copy_length), acopy_to_uninitialized); 689 if (cv == nullptr) cv = intcon(-1); // failure (no stub available) 690 checked_control = local_ctrl; 691 checked_i_o = *io; 692 checked_mem = local_mem->memory_at(alias_idx); 693 checked_value = cv; 694 } 695 } 696 // At this point we know we do not need type checks on oop stores. 697 698 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 699 if (!bs->array_copy_requires_gc_barriers(alloc != nullptr, copy_type, false, false, BarrierSetC2::Expansion)) { 700 // If we do not need gc barriers, copy using the jint or jlong stub. 701 copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); 702 assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), 703 "sizes agree"); 704 } 705 } 706 707 bool is_partial_array_copy = false; 708 if (!(*ctrl)->is_top()) { 709 // Generate the fast path, if possible. 710 Node* local_ctrl = *ctrl; 711 MergeMemNode* local_mem = MergeMemNode::make(mem); 712 transform_later(local_mem); 713 is_partial_array_copy = generate_unchecked_arraycopy(&local_ctrl, &local_mem, 714 adr_type, copy_type, disjoint_bases, 715 src, src_offset, dest, dest_offset, 716 ConvI2X(copy_length), acopy_to_uninitialized); 717 718 // Present the results of the fast call. 719 result_region->init_req(fast_path, local_ctrl); 720 result_i_o ->init_req(fast_path, *io); 721 result_memory->init_req(fast_path, local_mem->memory_at(alias_idx)); 722 } 723 724 // Here are all the slow paths up to this point, in one bundle: 725 assert(slow_region != nullptr, "allocated on entry"); 726 slow_control = slow_region; 727 DEBUG_ONLY(slow_region = (RegionNode*)badAddress); 728 729 *ctrl = checked_control; 730 if (!(*ctrl)->is_top()) { 731 // Clean up after the checked call. 732 // The returned value is either 0 or -1^K, 733 // where K = number of partially transferred array elements. 734 Node* cmp = new CmpINode(checked_value, intcon(0)); 735 transform_later(cmp); 736 Node* bol = new BoolNode(cmp, BoolTest::eq); 737 transform_later(bol); 738 IfNode* iff = new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN); 739 transform_later(iff); 740 741 // If it is 0, we are done, so transfer to the end. 742 Node* checks_done = new IfTrueNode(iff); 743 transform_later(checks_done); 744 result_region->init_req(checked_path, checks_done); 745 result_i_o ->init_req(checked_path, checked_i_o); 746 result_memory->init_req(checked_path, checked_mem); 747 748 // If it is not zero, merge into the slow call. 749 *ctrl = new IfFalseNode(iff); 750 transform_later(*ctrl); 751 RegionNode* slow_reg2 = new RegionNode(3); 752 PhiNode* slow_i_o2 = new PhiNode(slow_reg2, Type::ABIO); 753 PhiNode* slow_mem2 = new PhiNode(slow_reg2, Type::MEMORY, adr_type); 754 transform_later(slow_reg2); 755 transform_later(slow_i_o2); 756 transform_later(slow_mem2); 757 slow_reg2 ->init_req(1, slow_control); 758 slow_i_o2 ->init_req(1, slow_i_o); 759 slow_mem2 ->init_req(1, slow_mem); 760 slow_reg2 ->init_req(2, *ctrl); 761 slow_i_o2 ->init_req(2, checked_i_o); 762 slow_mem2 ->init_req(2, checked_mem); 763 764 slow_control = slow_reg2; 765 slow_i_o = slow_i_o2; 766 slow_mem = slow_mem2; 767 768 if (alloc != nullptr) { 769 // We'll restart from the very beginning, after zeroing the whole thing. 770 // This can cause double writes, but that's OK since dest is brand new. 771 // So we ignore the low 31 bits of the value returned from the stub. 772 } else { 773 // We must continue the copy exactly where it failed, or else 774 // another thread might see the wrong number of writes to dest. 775 Node* checked_offset = new XorINode(checked_value, intcon(-1)); 776 Node* slow_offset = new PhiNode(slow_reg2, TypeInt::INT); 777 transform_later(checked_offset); 778 transform_later(slow_offset); 779 slow_offset->init_req(1, intcon(0)); 780 slow_offset->init_req(2, checked_offset); 781 782 // Adjust the arguments by the conditionally incoming offset. 783 Node* src_off_plus = new AddINode(src_offset, slow_offset); 784 transform_later(src_off_plus); 785 Node* dest_off_plus = new AddINode(dest_offset, slow_offset); 786 transform_later(dest_off_plus); 787 Node* length_minus = new SubINode(copy_length, slow_offset); 788 transform_later(length_minus); 789 790 // Tweak the node variables to adjust the code produced below: 791 src_offset = src_off_plus; 792 dest_offset = dest_off_plus; 793 copy_length = length_minus; 794 } 795 } 796 *ctrl = slow_control; 797 if (!(*ctrl)->is_top()) { 798 Node* local_ctrl = *ctrl, *local_io = slow_i_o; 799 MergeMemNode* local_mem = MergeMemNode::make(mem); 800 transform_later(local_mem); 801 802 // Generate the slow path, if needed. 803 local_mem->set_memory_at(alias_idx, slow_mem); 804 805 if (dest_needs_zeroing) { 806 generate_clear_array(local_ctrl, local_mem, 807 adr_type, dest, 808 default_value, raw_default_value, 809 basic_elem_type, 810 intcon(0), nullptr, 811 alloc->in(AllocateNode::AllocSize)); 812 } 813 814 local_mem = generate_slow_arraycopy(ac, 815 &local_ctrl, local_mem, &local_io, 816 adr_type, 817 src, src_offset, dest, dest_offset, 818 copy_length, /*dest_uninitialized*/false); 819 820 result_region->init_req(slow_call_path, local_ctrl); 821 result_i_o ->init_req(slow_call_path, local_io); 822 result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx)); 823 } else { 824 ShouldNotReachHere(); // no call to generate_slow_arraycopy: 825 // projections were not extracted 826 } 827 828 // Remove unused edges. 829 for (uint i = 1; i < result_region->req(); i++) { 830 if (result_region->in(i) == nullptr) { 831 result_region->init_req(i, top()); 832 } 833 } 834 835 // Finished; return the combined state. 836 *ctrl = result_region; 837 *io = result_i_o; 838 mem->set_memory_at(alias_idx, result_memory); 839 840 // mem no longer guaranteed to stay a MergeMemNode 841 Node* out_mem = mem; 842 DEBUG_ONLY(mem = nullptr); 843 844 // The memory edges above are precise in order to model effects around 845 // array copies accurately to allow value numbering of field loads around 846 // arraycopy. Such field loads, both before and after, are common in Java 847 // collections and similar classes involving header/array data structures. 848 // 849 // But with low number of register or when some registers are used or killed 850 // by arraycopy calls it causes registers spilling on stack. See 6544710. 851 // The next memory barrier is added to avoid it. If the arraycopy can be 852 // optimized away (which it can, sometimes) then we can manually remove 853 // the membar also. 854 // 855 // Do not let reads from the cloned object float above the arraycopy. 856 if (alloc != nullptr && !alloc->initialization()->does_not_escape()) { 857 // Do not let stores that initialize this object be reordered with 858 // a subsequent store that would make this object accessible by 859 // other threads. 860 insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore); 861 } else { 862 insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder); 863 } 864 865 if (is_partial_array_copy) { 866 assert((*ctrl)->is_Proj(), "MemBar control projection"); 867 assert((*ctrl)->in(0)->isa_MemBar(), "MemBar node"); 868 (*ctrl)->in(0)->isa_MemBar()->set_trailing_partial_array_copy(); 869 } 870 871 _igvn.replace_node(_callprojs->fallthrough_memproj, out_mem); 872 if (_callprojs->fallthrough_ioproj != nullptr) { 873 _igvn.replace_node(_callprojs->fallthrough_ioproj, *io); 874 } 875 _igvn.replace_node(_callprojs->fallthrough_catchproj, *ctrl); 876 877 #ifdef ASSERT 878 const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr(); 879 if (dest_t->is_known_instance() && !is_partial_array_copy) { 880 ArrayCopyNode* ac = nullptr; 881 assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost"); 882 assert(ac == nullptr, "no arraycopy anymore"); 883 } 884 #endif 885 886 return out_mem; 887 } 888 889 // Helper for initialization of arrays, creating a ClearArray. 890 // It writes zero bits in [start..end), within the body of an array object. 891 // The memory effects are all chained onto the 'adr_type' alias category. 892 // 893 // Since the object is otherwise uninitialized, we are free 894 // to put a little "slop" around the edges of the cleared area, 895 // as long as it does not go back into the array's header, 896 // or beyond the array end within the heap. 897 // 898 // The lower edge can be rounded down to the nearest jint and the 899 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes. 900 // 901 // Arguments: 902 // adr_type memory slice where writes are generated 903 // dest oop of the destination array 904 // basic_elem_type element type of the destination 905 // slice_idx array index of first element to store 906 // slice_len number of elements to store (or null) 907 // dest_size total size in bytes of the array object 908 // 909 // Exactly one of slice_len or dest_size must be non-null. 910 // If dest_size is non-null, zeroing extends to the end of the object. 911 // If slice_len is non-null, the slice_idx value must be a constant. 912 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, 913 const TypePtr* adr_type, 914 Node* dest, 915 Node* val, 916 Node* raw_val, 917 BasicType basic_elem_type, 918 Node* slice_idx, 919 Node* slice_len, 920 Node* dest_size) { 921 // one or the other but not both of slice_len and dest_size: 922 assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, ""); 923 if (slice_len == nullptr) slice_len = top(); 924 if (dest_size == nullptr) dest_size = top(); 925 926 uint alias_idx = C->get_alias_index(adr_type); 927 928 // operate on this memory slice: 929 Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on 930 931 // scaling and rounding of indexes: 932 assert(basic_elem_type != T_PRIMITIVE_OBJECT, "should have been converted to a basic type copy"); 933 int scale = exact_log2(type2aelembytes(basic_elem_type)); 934 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 935 int clear_low = (-1 << scale) & (BytesPerInt - 1); 936 int bump_bit = (-1 << scale) & BytesPerInt; 937 938 // determine constant starts and ends 939 const intptr_t BIG_NEG = -128; 940 assert(BIG_NEG + 2*abase < 0, "neg enough"); 941 intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG); 942 intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG); 943 if (slice_len_con == 0) { 944 return; // nothing to do here 945 } 946 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low; 947 intptr_t end_con = _igvn.find_intptr_t_con(dest_size, -1); 948 if (slice_idx_con >= 0 && slice_len_con >= 0) { 949 assert(end_con < 0, "not two cons"); 950 end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale), 951 BytesPerLong); 952 } 953 954 if (start_con >= 0 && end_con >= 0) { 955 // Constant start and end. Simple. 956 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 957 start_con, end_con, &_igvn); 958 } else if (start_con >= 0 && dest_size != top()) { 959 // Constant start, pre-rounded end after the tail of the array. 960 Node* end = dest_size; 961 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 962 start_con, end, &_igvn); 963 } else if (start_con >= 0 && slice_len != top()) { 964 // Constant start, non-constant end. End needs rounding up. 965 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) 966 intptr_t end_base = abase + (slice_idx_con << scale); 967 int end_round = (-1 << scale) & (BytesPerLong - 1); 968 Node* end = ConvI2X(slice_len); 969 if (scale != 0) 970 end = transform_later(new LShiftXNode(end, intcon(scale) )); 971 end_base += end_round; 972 end = transform_later(new AddXNode(end, MakeConX(end_base)) ); 973 end = transform_later(new AndXNode(end, MakeConX(~end_round)) ); 974 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 975 start_con, end, &_igvn); 976 } else if (start_con < 0 && dest_size != top()) { 977 // Non-constant start, pre-rounded end after the tail of the array. 978 // This is almost certainly a "round-to-end" operation. 979 Node* start = slice_idx; 980 start = ConvI2X(start); 981 if (scale != 0) 982 start = transform_later(new LShiftXNode( start, intcon(scale) )); 983 start = transform_later(new AddXNode(start, MakeConX(abase)) ); 984 if ((bump_bit | clear_low) != 0) { 985 int to_clear = (bump_bit | clear_low); 986 // Align up mod 8, then store a jint zero unconditionally 987 // just before the mod-8 boundary. 988 if (((abase + bump_bit) & ~to_clear) - bump_bit 989 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { 990 bump_bit = 0; 991 assert((abase & to_clear) == 0, "array base must be long-aligned"); 992 } else { 993 // Bump 'start' up to (or past) the next jint boundary: 994 start = transform_later( new AddXNode(start, MakeConX(bump_bit)) ); 995 assert((abase & clear_low) == 0, "array base must be int-aligned"); 996 } 997 // Round bumped 'start' down to jlong boundary in body of array. 998 start = transform_later(new AndXNode(start, MakeConX(~to_clear)) ); 999 if (bump_bit != 0) { 1000 // Store a zero to the immediately preceding jint: 1001 Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) ); 1002 Node* p1 = basic_plus_adr(dest, x1); 1003 if (val == nullptr) { 1004 assert(raw_val == nullptr, "val may not be null"); 1005 mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); 1006 } else { 1007 assert(_igvn.type(val)->isa_narrowoop(), "should be narrow oop"); 1008 mem = new StoreNNode(ctrl, mem, p1, adr_type, val, MemNode::unordered); 1009 } 1010 mem = transform_later(mem); 1011 } 1012 } 1013 Node* end = dest_size; // pre-rounded 1014 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, raw_val, 1015 start, end, &_igvn); 1016 } else { 1017 // Non-constant start, unrounded non-constant end. 1018 // (Nobody zeroes a random midsection of an array using this routine.) 1019 ShouldNotReachHere(); // fix caller 1020 } 1021 1022 // Done. 1023 merge_mem->set_memory_at(alias_idx, mem); 1024 } 1025 1026 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io, 1027 const TypePtr* adr_type, 1028 BasicType basic_elem_type, 1029 AllocateNode* alloc, 1030 Node* src, Node* src_offset, 1031 Node* dest, Node* dest_offset, 1032 Node* dest_size, bool dest_uninitialized) { 1033 // See if there is an advantage from block transfer. 1034 int scale = exact_log2(type2aelembytes(basic_elem_type)); 1035 if (scale >= LogBytesPerLong) 1036 return false; // it is already a block transfer 1037 1038 // Look at the alignment of the starting offsets. 1039 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 1040 1041 intptr_t src_off_con = (intptr_t) _igvn.find_int_con(src_offset, -1); 1042 intptr_t dest_off_con = (intptr_t) _igvn.find_int_con(dest_offset, -1); 1043 if (src_off_con < 0 || dest_off_con < 0) { 1044 // At present, we can only understand constants. 1045 return false; 1046 } 1047 1048 intptr_t src_off = abase + (src_off_con << scale); 1049 intptr_t dest_off = abase + (dest_off_con << scale); 1050 1051 if (((src_off | dest_off) & (BytesPerLong-1)) != 0) { 1052 // Non-aligned; too bad. 1053 // One more chance: Pick off an initial 32-bit word. 1054 // This is a common case, since abase can be odd mod 8. 1055 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && 1056 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { 1057 Node* sptr = basic_plus_adr(src, src_off); 1058 Node* dptr = basic_plus_adr(dest, dest_off); 1059 const TypePtr* s_adr_type = _igvn.type(sptr)->is_ptr(); 1060 assert(s_adr_type->isa_aryptr(), "impossible slice"); 1061 uint s_alias_idx = C->get_alias_index(s_adr_type); 1062 uint d_alias_idx = C->get_alias_index(adr_type); 1063 bool is_mismatched = (basic_elem_type != T_INT); 1064 Node* sval = transform_later( 1065 LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type, 1066 TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest, 1067 false /*require_atomic_access*/, false /*unaligned*/, is_mismatched)); 1068 Node* st = transform_later( 1069 StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type, 1070 sval, T_INT, MemNode::unordered)); 1071 if (is_mismatched) { 1072 st->as_Store()->set_mismatched_access(); 1073 } 1074 (*mem)->set_memory_at(d_alias_idx, st); 1075 src_off += BytesPerInt; 1076 dest_off += BytesPerInt; 1077 } else { 1078 return false; 1079 } 1080 } 1081 assert(src_off % BytesPerLong == 0, ""); 1082 assert(dest_off % BytesPerLong == 0, ""); 1083 1084 // Do this copy by giant steps. 1085 Node* sptr = basic_plus_adr(src, src_off); 1086 Node* dptr = basic_plus_adr(dest, dest_off); 1087 Node* countx = dest_size; 1088 countx = transform_later(new SubXNode(countx, MakeConX(dest_off))); 1089 countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong))); 1090 1091 bool disjoint_bases = true; // since alloc isn't null 1092 generate_unchecked_arraycopy(ctrl, mem, 1093 adr_type, T_LONG, disjoint_bases, 1094 sptr, nullptr, dptr, nullptr, countx, dest_uninitialized); 1095 1096 return true; 1097 } 1098 1099 // Helper function; generates code for the slow case. 1100 // We make a call to a runtime method which emulates the native method, 1101 // but without the native wrapper overhead. 1102 MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac, 1103 Node** ctrl, Node* mem, Node** io, 1104 const TypePtr* adr_type, 1105 Node* src, Node* src_offset, 1106 Node* dest, Node* dest_offset, 1107 Node* copy_length, bool dest_uninitialized) { 1108 assert(!dest_uninitialized, "Invariant"); 1109 1110 const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type(); 1111 CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(), 1112 "slow_arraycopy", TypePtr::BOTTOM); 1113 1114 call->init_req(TypeFunc::Control, *ctrl); 1115 call->init_req(TypeFunc::I_O , *io); 1116 call->init_req(TypeFunc::Memory , mem); 1117 call->init_req(TypeFunc::ReturnAdr, top()); 1118 call->init_req(TypeFunc::FramePtr, top()); 1119 call->init_req(TypeFunc::Parms+0, src); 1120 call->init_req(TypeFunc::Parms+1, src_offset); 1121 call->init_req(TypeFunc::Parms+2, dest); 1122 call->init_req(TypeFunc::Parms+3, dest_offset); 1123 call->init_req(TypeFunc::Parms+4, copy_length); 1124 call->copy_call_debug_info(&_igvn, ac); 1125 1126 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 1127 _igvn.replace_node(ac, call); 1128 transform_later(call); 1129 1130 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/); 1131 *ctrl = _callprojs->fallthrough_catchproj->clone(); 1132 transform_later(*ctrl); 1133 1134 Node* m = _callprojs->fallthrough_memproj->clone(); 1135 transform_later(m); 1136 1137 uint alias_idx = C->get_alias_index(adr_type); 1138 MergeMemNode* out_mem; 1139 if (alias_idx != Compile::AliasIdxBot) { 1140 out_mem = MergeMemNode::make(mem); 1141 out_mem->set_memory_at(alias_idx, m); 1142 } else { 1143 out_mem = MergeMemNode::make(m); 1144 } 1145 transform_later(out_mem); 1146 1147 // When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj 1148 // could be nullptr. Skip clone and update nullptr fallthrough_ioproj. 1149 if (_callprojs->fallthrough_ioproj != nullptr) { 1150 *io = _callprojs->fallthrough_ioproj->clone(); 1151 transform_later(*io); 1152 } else { 1153 *io = nullptr; 1154 } 1155 1156 return out_mem; 1157 } 1158 1159 // Helper function; generates code for cases requiring runtime checks. 1160 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem, 1161 const TypePtr* adr_type, 1162 Node* dest_elem_klass, 1163 Node* src, Node* src_offset, 1164 Node* dest, Node* dest_offset, 1165 Node* copy_length, bool dest_uninitialized) { 1166 if ((*ctrl)->is_top()) return nullptr; 1167 1168 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); 1169 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. 1170 return nullptr; 1171 } 1172 1173 // Pick out the parameters required to perform a store-check 1174 // for the target array. This is an optimistic check. It will 1175 // look in each non-null element's class, at the desired klass's 1176 // super_check_offset, for the desired klass. 1177 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1178 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 1179 Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); 1180 Node* check_offset = ConvI2X(transform_later(n3)); 1181 Node* check_value = dest_elem_klass; 1182 1183 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 1184 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 1185 1186 const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type(); 1187 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type, 1188 src_start, dest_start, copy_length XTOP, check_offset XTOP, check_value); 1189 1190 finish_arraycopy_call(call, ctrl, mem, adr_type); 1191 1192 Node* proj = new ProjNode(call, TypeFunc::Parms); 1193 transform_later(proj); 1194 1195 return proj; 1196 } 1197 1198 // Helper function; generates code for cases requiring runtime checks. 1199 Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem, 1200 const TypePtr* adr_type, 1201 Node* src, Node* src_offset, 1202 Node* dest, Node* dest_offset, 1203 Node* copy_length, bool dest_uninitialized) { 1204 if ((*ctrl)->is_top()) return nullptr; 1205 assert(!dest_uninitialized, "Invariant"); 1206 1207 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1208 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. 1209 return nullptr; 1210 } 1211 1212 const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type(); 1213 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "generic_arraycopy", adr_type, 1214 src, src_offset, dest, dest_offset, copy_length); 1215 1216 finish_arraycopy_call(call, ctrl, mem, adr_type); 1217 1218 Node* proj = new ProjNode(call, TypeFunc::Parms); 1219 transform_later(proj); 1220 1221 return proj; 1222 } 1223 1224 // Helper function; generates the fast out-of-line call to an arraycopy stub. 1225 bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem, 1226 const TypePtr* adr_type, 1227 BasicType basic_elem_type, 1228 bool disjoint_bases, 1229 Node* src, Node* src_offset, 1230 Node* dest, Node* dest_offset, 1231 Node* copy_length, bool dest_uninitialized) { 1232 if ((*ctrl)->is_top()) return false; 1233 1234 Node* src_start = src; 1235 Node* dest_start = dest; 1236 if (src_offset != nullptr || dest_offset != nullptr) { 1237 src_start = array_element_address(src, src_offset, basic_elem_type); 1238 dest_start = array_element_address(dest, dest_offset, basic_elem_type); 1239 } 1240 1241 // Figure out which arraycopy runtime method to call. 1242 const char* copyfunc_name = "arraycopy"; 1243 address copyfunc_addr = 1244 basictype2arraycopy(basic_elem_type, src_offset, dest_offset, 1245 disjoint_bases, copyfunc_name, dest_uninitialized); 1246 1247 Node* result_memory = nullptr; 1248 RegionNode* exit_block = nullptr; 1249 if (ArrayOperationPartialInlineSize > 0 && is_subword_type(basic_elem_type) && 1250 Matcher::vector_width_in_bytes(basic_elem_type) >= 16) { 1251 generate_partial_inlining_block(ctrl, mem, adr_type, &exit_block, &result_memory, 1252 copy_length, src_start, dest_start, basic_elem_type); 1253 } 1254 1255 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); 1256 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, copyfunc_name, adr_type, 1257 src_start, dest_start, copy_length XTOP); 1258 1259 finish_arraycopy_call(call, ctrl, mem, adr_type); 1260 1261 // Connecting remaining edges for exit_block coming from stub_block. 1262 if (exit_block) { 1263 exit_block->init_req(2, *ctrl); 1264 1265 // Memory edge corresponding to stub_region. 1266 result_memory->init_req(2, *mem); 1267 1268 uint alias_idx = C->get_alias_index(adr_type); 1269 if (alias_idx != Compile::AliasIdxBot) { 1270 *mem = MergeMemNode::make(*mem); 1271 (*mem)->set_memory_at(alias_idx, result_memory); 1272 } else { 1273 *mem = MergeMemNode::make(result_memory); 1274 } 1275 transform_later(*mem); 1276 *ctrl = exit_block; 1277 return true; 1278 } 1279 return false; 1280 } 1281 1282 const TypePtr* PhaseMacroExpand::adjust_for_flat_array(const TypeAryPtr* top_dest, Node*& src_offset, 1283 Node*& dest_offset, Node*& length, BasicType& dest_elem, 1284 Node*& dest_length) { 1285 #ifdef ASSERT 1286 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1287 bool needs_barriers = top_dest->elem()->inline_klass()->contains_oops() && 1288 bs->array_copy_requires_gc_barriers(dest_length != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization); 1289 assert(!needs_barriers || StressReflectiveCode, "Flat arracopy would require GC barriers"); 1290 #endif 1291 int elem_size = top_dest->flat_elem_size(); 1292 if (elem_size >= 8) { 1293 if (elem_size > 8) { 1294 // treat as array of long but scale length, src offset and dest offset 1295 assert((elem_size % 8) == 0, "not a power of 2?"); 1296 int factor = elem_size / 8; 1297 length = transform_later(new MulINode(length, intcon(factor))); 1298 src_offset = transform_later(new MulINode(src_offset, intcon(factor))); 1299 dest_offset = transform_later(new MulINode(dest_offset, intcon(factor))); 1300 if (dest_length != nullptr) { 1301 dest_length = transform_later(new MulINode(dest_length, intcon(factor))); 1302 } 1303 elem_size = 8; 1304 } 1305 dest_elem = T_LONG; 1306 } else if (elem_size == 4) { 1307 dest_elem = T_INT; 1308 } else if (elem_size == 2) { 1309 dest_elem = T_CHAR; 1310 } else if (elem_size == 1) { 1311 dest_elem = T_BYTE; 1312 } else { 1313 ShouldNotReachHere(); 1314 } 1315 return TypeRawPtr::BOTTOM; 1316 } 1317 1318 #undef XTOP 1319 1320 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { 1321 Node* ctrl = ac->in(TypeFunc::Control); 1322 Node* io = ac->in(TypeFunc::I_O); 1323 Node* src = ac->in(ArrayCopyNode::Src); 1324 Node* src_offset = ac->in(ArrayCopyNode::SrcPos); 1325 Node* dest = ac->in(ArrayCopyNode::Dest); 1326 Node* dest_offset = ac->in(ArrayCopyNode::DestPos); 1327 Node* length = ac->in(ArrayCopyNode::Length); 1328 MergeMemNode* merge_mem = nullptr; 1329 1330 if (ac->is_clonebasic()) { 1331 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1332 bs->clone_at_expansion(this, ac); 1333 return; 1334 } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_clone_oop_array()) { 1335 const Type* src_type = _igvn.type(src); 1336 const Type* dest_type = _igvn.type(dest); 1337 const TypeAryPtr* top_src = src_type->isa_aryptr(); 1338 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 1339 BasicType dest_elem = T_OBJECT; 1340 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) { 1341 dest_elem = top_dest->elem()->array_element_basic_type(); 1342 } 1343 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 1344 1345 if (top_src != nullptr && top_src->is_flat()) { 1346 // If src is flat, dest is guaranteed to be flat as well 1347 top_dest = top_src; 1348 } 1349 1350 AllocateArrayNode* alloc = nullptr; 1351 Node* dest_length = nullptr; 1352 if (ac->is_alloc_tightly_coupled()) { 1353 alloc = AllocateArrayNode::Ideal_array_allocation(dest); 1354 assert(alloc != nullptr, "expect alloc"); 1355 dest_length = alloc->in(AllocateNode::ALength); 1356 } 1357 1358 Node* mem = ac->in(TypeFunc::Memory); 1359 const TypePtr* adr_type = nullptr; 1360 if (top_dest->is_flat()) { 1361 assert(dest_length != nullptr || StressReflectiveCode, "must be tightly coupled"); 1362 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier 1363 // on all slices to prevent writes into the source from floating below the arraycopy. 1364 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1365 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); 1366 } else { 1367 adr_type = dest_type->is_oopptr()->add_offset(Type::OffsetBot); 1368 if (ac->_dest_type != TypeOopPtr::BOTTOM) { 1369 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); 1370 } 1371 if (ac->_src_type != ac->_dest_type) { 1372 adr_type = TypeRawPtr::BOTTOM; 1373 } 1374 } 1375 merge_mem = MergeMemNode::make(mem); 1376 transform_later(merge_mem); 1377 1378 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 1379 adr_type, dest_elem, 1380 src, src_offset, dest, dest_offset, length, 1381 dest_length, 1382 true, !ac->is_copyofrange()); 1383 return; 1384 } 1385 1386 AllocateArrayNode* alloc = nullptr; 1387 if (ac->is_alloc_tightly_coupled()) { 1388 alloc = AllocateArrayNode::Ideal_array_allocation(dest); 1389 assert(alloc != nullptr, "expect alloc"); 1390 } 1391 1392 assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy"); 1393 1394 // Compile time checks. If any of these checks cannot be verified at compile time, 1395 // we do not make a fast path for this call. Instead, we let the call remain as it 1396 // is. The checks we choose to mandate at compile time are: 1397 // 1398 // (1) src and dest are arrays. 1399 const Type* src_type = src->Value(&_igvn); 1400 const Type* dest_type = dest->Value(&_igvn); 1401 const TypeAryPtr* top_src = src_type->isa_aryptr(); 1402 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 1403 1404 BasicType src_elem = T_CONFLICT; 1405 BasicType dest_elem = T_CONFLICT; 1406 1407 if (top_src != nullptr && top_src->elem() != Type::BOTTOM) { 1408 src_elem = top_src->elem()->array_element_basic_type(); 1409 } 1410 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) { 1411 dest_elem = top_dest->elem()->array_element_basic_type(); 1412 } 1413 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; 1414 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 1415 1416 if (ac->is_arraycopy_validated() && dest_elem != T_CONFLICT && src_elem == T_CONFLICT) { 1417 src_elem = dest_elem; 1418 } 1419 1420 if (src_elem == T_CONFLICT || dest_elem == T_CONFLICT) { 1421 // Conservatively insert a memory barrier on all memory slices. 1422 // Do not let writes into the source float below the arraycopy. 1423 { 1424 Node* mem = ac->in(TypeFunc::Memory); 1425 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1426 1427 merge_mem = MergeMemNode::make(mem); 1428 transform_later(merge_mem); 1429 } 1430 1431 // Call StubRoutines::generic_arraycopy stub. 1432 Node* mem = generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io, 1433 TypeRawPtr::BOTTOM, T_CONFLICT, 1434 src, src_offset, dest, dest_offset, length, 1435 nullptr, 1436 // If a negative length guard was generated for the ArrayCopyNode, 1437 // the length of the array can never be negative. 1438 false, ac->has_negative_length_guard()); 1439 return; 1440 } 1441 1442 assert(!ac->is_arraycopy_validated() || (src_elem == dest_elem && dest_elem != T_VOID), "validated but different basic types"); 1443 1444 // (2) src and dest arrays must have elements of the same BasicType 1445 // Figure out the size and type of the elements we will be copying. 1446 // 1447 // We have no stub to copy flat inline type arrays with oop 1448 // fields if we need to emit write barriers. 1449 // 1450 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1451 if (src_elem != dest_elem || top_src->is_flat() != top_dest->is_flat() || dest_elem == T_VOID || 1452 (top_src->is_flat() && top_dest->elem()->inline_klass()->contains_oops() && 1453 bs->array_copy_requires_gc_barriers(alloc != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization))) { 1454 // The component types are not the same or are not recognized. Punt. 1455 // (But, avoid the native method wrapper to JVM_ArrayCopy.) 1456 { 1457 Node* mem = ac->in(TypeFunc::Memory); 1458 merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false); 1459 } 1460 1461 _igvn.replace_node(_callprojs->fallthrough_memproj, merge_mem); 1462 if (_callprojs->fallthrough_ioproj != nullptr) { 1463 _igvn.replace_node(_callprojs->fallthrough_ioproj, io); 1464 } 1465 _igvn.replace_node(_callprojs->fallthrough_catchproj, ctrl); 1466 return; 1467 } 1468 1469 //--------------------------------------------------------------------------- 1470 // We will make a fast path for this call to arraycopy. 1471 1472 // We have the following tests left to perform: 1473 // 1474 // (3) src and dest must not be null. 1475 // (4) src_offset must not be negative. 1476 // (5) dest_offset must not be negative. 1477 // (6) length must not be negative. 1478 // (7) src_offset + length must not exceed length of src. 1479 // (8) dest_offset + length must not exceed length of dest. 1480 // (9) each element of an oop array must be assignable 1481 1482 Node* mem = ac->in(TypeFunc::Memory); 1483 if (top_dest->is_flat()) { 1484 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier 1485 // on all slices to prevent writes into the source from floating below the arraycopy. 1486 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1487 } 1488 merge_mem = MergeMemNode::make(mem); 1489 transform_later(merge_mem); 1490 1491 RegionNode* slow_region = new RegionNode(1); 1492 transform_later(slow_region); 1493 1494 if (!ac->is_arraycopy_validated()) { 1495 // (3) operands must not be null 1496 // We currently perform our null checks with the null_check routine. 1497 // This means that the null exceptions will be reported in the caller 1498 // rather than (correctly) reported inside of the native arraycopy call. 1499 // This should be corrected, given time. We do our null check with the 1500 // stack pointer restored. 1501 // null checks done library_call.cpp 1502 1503 // (4) src_offset must not be negative. 1504 generate_negative_guard(&ctrl, src_offset, slow_region); 1505 1506 // (5) dest_offset must not be negative. 1507 generate_negative_guard(&ctrl, dest_offset, slow_region); 1508 1509 // (6) length must not be negative (moved to generate_arraycopy()). 1510 // generate_negative_guard(length, slow_region); 1511 1512 // (7) src_offset + length must not exceed length of src. 1513 Node* alen = ac->in(ArrayCopyNode::SrcLen); 1514 assert(alen != nullptr, "need src len"); 1515 generate_limit_guard(&ctrl, 1516 src_offset, length, 1517 alen, 1518 slow_region); 1519 1520 // (8) dest_offset + length must not exceed length of dest. 1521 alen = ac->in(ArrayCopyNode::DestLen); 1522 assert(alen != nullptr, "need dest len"); 1523 generate_limit_guard(&ctrl, 1524 dest_offset, length, 1525 alen, 1526 slow_region); 1527 1528 // (9) each element of an oop array must be assignable 1529 // The generate_arraycopy subroutine checks this. 1530 1531 // Handle inline type arrays 1532 if (!top_src->is_flat()) { 1533 if (UseFlatArray && !top_src->is_not_flat()) { 1534 // Src might be flat and dest might not be flat. Go to the slow path if src is flat. 1535 generate_flat_array_guard(&ctrl, src, slow_region); 1536 } 1537 if (EnableValhalla) { 1538 // No validation. The subtype check emitted at macro expansion time will not go to the slow 1539 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays. 1540 generate_null_free_array_guard(&ctrl, dest, slow_region); 1541 } 1542 } else { 1543 assert(top_dest->is_flat(), "dest array must be flat"); 1544 } 1545 } 1546 1547 // This is where the memory effects are placed: 1548 const TypePtr* adr_type = nullptr; 1549 Node* dest_length = (alloc != nullptr) ? alloc->in(AllocateNode::ALength) : nullptr; 1550 1551 if (top_dest->is_flat()) { 1552 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); 1553 } else if (ac->_dest_type != TypeOopPtr::BOTTOM) { 1554 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); 1555 } else { 1556 adr_type = TypeAryPtr::get_array_body_type(dest_elem); 1557 } 1558 1559 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 1560 adr_type, dest_elem, 1561 src, src_offset, dest, dest_offset, length, 1562 dest_length, 1563 // If a negative length guard was generated for the ArrayCopyNode, 1564 // the length of the array can never be negative. 1565 false, ac->has_negative_length_guard(), 1566 slow_region); 1567 }