1 /* 2 * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/tlab_globals.hpp" 29 #include "opto/arraycopynode.hpp" 30 #include "oops/objArrayKlass.hpp" 31 #include "opto/convertnode.hpp" 32 #include "opto/vectornode.hpp" 33 #include "opto/graphKit.hpp" 34 #include "opto/macro.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/castnode.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 41 void PhaseMacroExpand::insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent) { 42 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); 43 mb->init_req(TypeFunc::Control, *ctrl); 44 mb->init_req(TypeFunc::Memory, *mem); 45 transform_later(mb); 46 *ctrl = new ProjNode(mb,TypeFunc::Control); 47 transform_later(*ctrl); 48 Node* mem_proj = new ProjNode(mb,TypeFunc::Memory); 49 transform_later(mem_proj); 50 *mem = mem_proj; 51 } 52 53 Node* PhaseMacroExpand::array_element_address(Node* ary, Node* idx, BasicType elembt) { 54 uint shift = exact_log2(type2aelembytes(elembt)); 55 uint header = arrayOopDesc::base_offset_in_bytes(elembt); 56 Node* base = basic_plus_adr(ary, header); 57 #ifdef _LP64 58 // see comment in GraphKit::array_element_address 59 int index_max = max_jint - 1; // array size is max_jint, index is one less 60 const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); 61 idx = transform_later( new ConvI2LNode(idx, lidxtype) ); 62 #endif 63 Node* scale = new LShiftXNode(idx, intcon(shift)); 64 transform_later(scale); 65 return basic_plus_adr(ary, base, scale); 66 } 67 68 Node* PhaseMacroExpand::ConvI2L(Node* offset) { 69 return transform_later(new ConvI2LNode(offset)); 70 } 71 72 Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem, 73 const TypeFunc* call_type, address call_addr, 74 const char* call_name, 75 const TypePtr* adr_type, 76 Node* parm0, Node* parm1, 77 Node* parm2, Node* parm3, 78 Node* parm4, Node* parm5, 79 Node* parm6, Node* parm7) { 80 Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); 81 call->init_req(TypeFunc::Control, ctrl); 82 call->init_req(TypeFunc::I_O , top()); 83 call->init_req(TypeFunc::Memory , mem); 84 call->init_req(TypeFunc::ReturnAdr, top()); 85 call->init_req(TypeFunc::FramePtr, top()); 86 87 // Hook each parm in order. Stop looking at the first null. 88 if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0); 89 if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1); 90 if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2); 91 if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3); 92 if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4); 93 if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5); 94 if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6); 95 if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7); 96 /* close each nested if ===> */ } } } } } } } } 97 assert(call->in(call->req()-1) != nullptr, "must initialize all parms"); 98 99 return call; 100 } 101 102 103 //------------------------------generate_guard--------------------------- 104 // Helper function for generating guarded fast-slow graph structures. 105 // The given 'test', if true, guards a slow path. If the test fails 106 // then a fast path can be taken. (We generally hope it fails.) 107 // In all cases, GraphKit::control() is updated to the fast path. 108 // The returned value represents the control for the slow path. 109 // The return value is never 'top'; it is either a valid control 110 // or null if it is obvious that the slow path can never be taken. 111 // Also, if region and the slow control are not null, the slow edge 112 // is appended to the region. 113 Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) { 114 if ((*ctrl)->is_top()) { 115 // Already short circuited. 116 return nullptr; 117 } 118 // Build an if node and its projections. 119 // If test is true we take the slow path, which we assume is uncommon. 120 if (_igvn.type(test) == TypeInt::ZERO) { 121 // The slow branch is never taken. No need to build this guard. 122 return nullptr; 123 } 124 125 IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN); 126 transform_later(iff); 127 128 Node* if_slow = new IfTrueNode(iff); 129 transform_later(if_slow); 130 131 if (region != nullptr) { 132 region->add_req(if_slow); 133 } 134 135 Node* if_fast = new IfFalseNode(iff); 136 transform_later(if_fast); 137 138 *ctrl = if_fast; 139 140 return if_slow; 141 } 142 143 Node* PhaseMacroExpand::generate_slow_guard(Node** ctrl, Node* test, RegionNode* region) { 144 return generate_guard(ctrl, test, region, PROB_UNLIKELY_MAG(3)); 145 } 146 147 inline Node* PhaseMacroExpand::generate_fair_guard(Node** ctrl, Node* test, RegionNode* region) { 148 return generate_guard(ctrl, test, region, PROB_FAIR); 149 } 150 151 void PhaseMacroExpand::generate_negative_guard(Node** ctrl, Node* index, RegionNode* region) { 152 if ((*ctrl)->is_top()) 153 return; // already stopped 154 if (_igvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] 155 return; // index is already adequately typed 156 Node* cmp_lt = new CmpINode(index, intcon(0)); 157 transform_later(cmp_lt); 158 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt); 159 transform_later(bol_lt); 160 generate_guard(ctrl, bol_lt, region, PROB_MIN); 161 } 162 163 void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* subseq_length, Node* array_length, RegionNode* region) { 164 if ((*ctrl)->is_top()) 165 return; // already stopped 166 bool zero_offset = _igvn.type(offset) == TypeInt::ZERO; 167 if (zero_offset && subseq_length->eqv_uncast(array_length)) 168 return; // common case of whole-array copy 169 Node* last = subseq_length; 170 if (!zero_offset) { // last += offset 171 last = new AddINode(last, offset); 172 transform_later(last); 173 } 174 Node* cmp_lt = new CmpUNode(array_length, last); 175 transform_later(cmp_lt); 176 Node* bol_lt = new BoolNode(cmp_lt, BoolTest::lt); 177 transform_later(bol_lt); 178 generate_guard(ctrl, bol_lt, region, PROB_MIN); 179 } 180 181 // 182 // Partial in-lining handling for smaller conjoint/disjoint array copies having 183 // length(in bytes) less than ArrayOperationPartialInlineSize. 184 // if (length <= ArrayOperationPartialInlineSize) { 185 // partial_inlining_block: 186 // mask = Mask_Gen 187 // vload = LoadVectorMasked src , mask 188 // StoreVectorMasked dst, mask, vload 189 // } else { 190 // stub_block: 191 // callstub array_copy 192 // } 193 // exit_block: 194 // Phi = label partial_inlining_block:mem , label stub_block:mem (filled by caller) 195 // mem = MergeMem (Phi) 196 // control = stub_block 197 // 198 // Exit_block and associated phi(memory) are partially initialized for partial_in-lining_block 199 // edges. Remaining edges for exit_block coming from stub_block are connected by the caller 200 // post stub nodes creation. 201 // 202 203 void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type, 204 RegionNode** exit_block, Node** result_memory, Node* length, 205 Node* src_start, Node* dst_start, BasicType type) { 206 const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr(); 207 Node* inline_block = nullptr; 208 Node* stub_block = nullptr; 209 210 int const_len = -1; 211 const TypeInt* lty = nullptr; 212 uint shift = exact_log2(type2aelembytes(type)); 213 if (length->Opcode() == Op_ConvI2L) { 214 lty = _igvn.type(length->in(1))->isa_int(); 215 } else { 216 lty = _igvn.type(length)->isa_int(); 217 } 218 if (lty && lty->is_con()) { 219 const_len = lty->get_con() << shift; 220 } 221 222 // Return if copy length is greater than partial inline size limit or 223 // target does not supports masked load/stores. 224 int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, const_len); 225 if ( const_len > ArrayOperationPartialInlineSize || 226 !Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) || 227 !Matcher::match_rule_supported_vector(Op_StoreVectorMasked, lane_count, type) || 228 !Matcher::match_rule_supported_vector(Op_VectorMaskGen, lane_count, type)) { 229 return; 230 } 231 232 int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type); 233 Node* casted_length = new CastLLNode(*ctrl, length, TypeLong::make(0, inline_limit, Type::WidenMin)); 234 transform_later(casted_length); 235 Node* copy_bytes = new LShiftXNode(length, intcon(shift)); 236 transform_later(copy_bytes); 237 238 Node* cmp_le = new CmpULNode(copy_bytes, longcon(ArrayOperationPartialInlineSize)); 239 transform_later(cmp_le); 240 Node* bol_le = new BoolNode(cmp_le, BoolTest::le); 241 transform_later(bol_le); 242 inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR); 243 stub_block = *ctrl; 244 245 Node* mask_gen = VectorMaskGenNode::make(casted_length, type); 246 transform_later(mask_gen); 247 248 unsigned vec_size = lane_count * type2aelembytes(type); 249 if (C->max_vector_size() < vec_size) { 250 C->set_max_vector_size(vec_size); 251 } 252 253 const TypeVect * vt = TypeVect::make(type, lane_count); 254 Node* mm = (*mem)->memory_at(C->get_alias_index(src_adr_type)); 255 Node* masked_load = new LoadVectorMaskedNode(inline_block, mm, src_start, 256 src_adr_type, vt, mask_gen); 257 transform_later(masked_load); 258 259 mm = (*mem)->memory_at(C->get_alias_index(adr_type)); 260 Node* masked_store = new StoreVectorMaskedNode(inline_block, mm, dst_start, 261 masked_load, adr_type, mask_gen); 262 transform_later(masked_store); 263 264 // Convergence region for inline_block and stub_block. 265 *exit_block = new RegionNode(3); 266 transform_later(*exit_block); 267 (*exit_block)->init_req(1, inline_block); 268 *result_memory = new PhiNode(*exit_block, Type::MEMORY, adr_type); 269 transform_later(*result_memory); 270 (*result_memory)->init_req(1, masked_store); 271 272 *ctrl = stub_block; 273 } 274 275 276 Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) { 277 if ((*ctrl)->is_top()) return nullptr; 278 279 if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] 280 return nullptr; // index is already adequately typed 281 Node* cmp_le = new CmpINode(index, intcon(0)); 282 transform_later(cmp_le); 283 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); 284 Node* bol_le = new BoolNode(cmp_le, le_or_eq); 285 transform_later(bol_le); 286 Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN); 287 288 return is_notp; 289 } 290 291 Node* PhaseMacroExpand::mark_word_test(Node** ctrl, Node* obj, MergeMemNode* mem, uintptr_t mask_val, RegionNode* region) { 292 // Load markword and check if obj is locked 293 Node* mark = make_load(nullptr, mem->memory_at(Compile::AliasIdxRaw), obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); 294 Node* locked_bit = MakeConX(markWord::unlocked_value); 295 locked_bit = transform_later(new AndXNode(locked_bit, mark)); 296 Node* cmp = transform_later(new CmpXNode(locked_bit, MakeConX(0))); 297 Node* is_unlocked = transform_later(new BoolNode(cmp, BoolTest::ne)); 298 IfNode* iff = transform_later(new IfNode(*ctrl, is_unlocked, PROB_MAX, COUNT_UNKNOWN))->as_If(); 299 Node* locked_region = transform_later(new RegionNode(3)); 300 Node* mark_phi = transform_later(new PhiNode(locked_region, TypeX_X)); 301 302 // Unlocked: Use bits from mark word 303 locked_region->init_req(1, transform_later(new IfTrueNode(iff))); 304 mark_phi->init_req(1, mark); 305 306 // Locked: Load prototype header from klass 307 *ctrl = transform_later(new IfFalseNode(iff)); 308 // Make loads control dependent to make sure they are only executed if array is locked 309 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); 310 Node* klass = transform_later(LoadKlassNode::make(_igvn, *ctrl, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); 311 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset())); 312 Node* proto = transform_later(LoadNode::make(_igvn, *ctrl, C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered)); 313 314 locked_region->init_req(2, *ctrl); 315 mark_phi->init_req(2, proto); 316 *ctrl = locked_region; 317 318 // Now check if mark word bits are set 319 Node* mask = MakeConX(mask_val); 320 Node* masked = transform_later(new AndXNode(mark_phi, mask)); 321 cmp = transform_later(new CmpXNode(masked, mask)); 322 Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq)); 323 return generate_fair_guard(ctrl, bol, region); 324 } 325 326 Node* PhaseMacroExpand::generate_flat_array_guard(Node** ctrl, Node* array, MergeMemNode* mem, RegionNode* region) { 327 return mark_word_test(ctrl, array, mem, markWord::flat_array_bit_in_place, region); 328 } 329 330 Node* PhaseMacroExpand::generate_null_free_array_guard(Node** ctrl, Node* array, MergeMemNode* mem, RegionNode* region) { 331 return mark_word_test(ctrl, array, mem, markWord::null_free_array_bit_in_place, region); 332 } 333 334 void PhaseMacroExpand::finish_arraycopy_call(Node* call, Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type) { 335 transform_later(call); 336 337 *ctrl = new ProjNode(call,TypeFunc::Control); 338 transform_later(*ctrl); 339 Node* newmem = new ProjNode(call, TypeFunc::Memory); 340 transform_later(newmem); 341 342 uint alias_idx = C->get_alias_index(adr_type); 343 if (alias_idx != Compile::AliasIdxBot) { 344 *mem = MergeMemNode::make(*mem); 345 (*mem)->set_memory_at(alias_idx, newmem); 346 } else { 347 *mem = MergeMemNode::make(newmem); 348 } 349 transform_later(*mem); 350 } 351 352 address PhaseMacroExpand::basictype2arraycopy(BasicType t, 353 Node* src_offset, 354 Node* dest_offset, 355 bool disjoint_bases, 356 const char* &name, 357 bool dest_uninitialized) { 358 const TypeInt* src_offset_inttype = _igvn.find_int_type(src_offset); 359 const TypeInt* dest_offset_inttype = _igvn.find_int_type(dest_offset); 360 361 bool aligned = false; 362 bool disjoint = disjoint_bases; 363 364 // if the offsets are the same, we can treat the memory regions as 365 // disjoint, because either the memory regions are in different arrays, 366 // or they are identical (which we can treat as disjoint.) We can also 367 // treat a copy with a destination index less that the source index 368 // as disjoint since a low->high copy will work correctly in this case. 369 if (src_offset_inttype != nullptr && src_offset_inttype->is_con() && 370 dest_offset_inttype != nullptr && dest_offset_inttype->is_con()) { 371 // both indices are constants 372 int s_offs = src_offset_inttype->get_con(); 373 int d_offs = dest_offset_inttype->get_con(); 374 int element_size = type2aelembytes(t); 375 aligned = ((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) && 376 ((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0); 377 if (s_offs >= d_offs) disjoint = true; 378 } else if (src_offset == dest_offset && src_offset != nullptr) { 379 // This can occur if the offsets are identical non-constants. 380 disjoint = true; 381 } 382 383 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); 384 } 385 386 #define XTOP LP64_ONLY(COMMA top()) 387 388 // Generate an optimized call to arraycopy. 389 // Caller must guard against non-arrays. 390 // Caller must determine a common array basic-type for both arrays. 391 // Caller must validate offsets against array bounds. 392 // The slow_region has already collected guard failure paths 393 // (such as out of bounds length or non-conformable array types). 394 // The generated code has this shape, in general: 395 // 396 // if (length == 0) return // via zero_path 397 // slowval = -1 398 // if (types unknown) { 399 // slowval = call generic copy loop 400 // if (slowval == 0) return // via checked_path 401 // } else if (indexes in bounds) { 402 // if ((is object array) && !(array type check)) { 403 // slowval = call checked copy loop 404 // if (slowval == 0) return // via checked_path 405 // } else { 406 // call bulk copy loop 407 // return // via fast_path 408 // } 409 // } 410 // // adjust params for remaining work: 411 // if (slowval != -1) { 412 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n 413 // } 414 // slow_region: 415 // call slow arraycopy(src, src_offset, dest, dest_offset, length) 416 // return // via slow_call_path 417 // 418 // This routine is used from several intrinsics: System.arraycopy, 419 // Object.clone (the array subcase), and Arrays.copyOf[Range]. 420 // 421 Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* alloc, 422 Node** ctrl, MergeMemNode* mem, Node** io, 423 const TypePtr* adr_type, 424 BasicType basic_elem_type, 425 Node* src, Node* src_offset, 426 Node* dest, Node* dest_offset, 427 Node* copy_length, 428 Node* dest_length, 429 bool disjoint_bases, 430 bool length_never_negative, 431 RegionNode* slow_region) { 432 if (slow_region == nullptr) { 433 slow_region = new RegionNode(1); 434 transform_later(slow_region); 435 } 436 437 Node* original_dest = dest; 438 bool dest_needs_zeroing = false; 439 bool acopy_to_uninitialized = false; 440 Node* default_value = nullptr; 441 Node* raw_default_value = nullptr; 442 443 // See if this is the initialization of a newly-allocated array. 444 // If so, we will take responsibility here for initializing it to zero. 445 // (Note: Because tightly_coupled_allocation performs checks on the 446 // out-edges of the dest, we need to avoid making derived pointers 447 // from it until we have checked its uses.) 448 if (ReduceBulkZeroing 449 && !(UseTLAB && ZeroTLAB) // pointless if already zeroed 450 && basic_elem_type != T_CONFLICT // avoid corner case 451 && !src->eqv_uncast(dest) 452 && alloc != nullptr 453 && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) { 454 assert(ac->is_alloc_tightly_coupled(), "sanity"); 455 // acopy to uninitialized tightly coupled allocations 456 // needs zeroing outside the copy range 457 // and the acopy itself will be to uninitialized memory 458 acopy_to_uninitialized = true; 459 if (alloc->maybe_set_complete(&_igvn)) { 460 // "You break it, you buy it." 461 InitializeNode* init = alloc->initialization(); 462 assert(init->is_complete(), "we just did this"); 463 init->set_complete_with_arraycopy(); 464 assert(dest->is_CheckCastPP(), "sanity"); 465 assert(dest->in(0)->in(0) == init, "dest pinned"); 466 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory 467 // From this point on, every exit path is responsible for 468 // initializing any non-copied parts of the object to zero. 469 // Also, if this flag is set we make sure that arraycopy interacts properly 470 // with G1, eliding pre-barriers. See CR 6627983. 471 dest_needs_zeroing = true; 472 default_value = alloc->in(AllocateNode::DefaultValue); 473 raw_default_value = alloc->in(AllocateNode::RawDefaultValue); 474 } else { 475 // dest_need_zeroing = false; 476 } 477 } else { 478 // No zeroing elimination needed here. 479 alloc = nullptr; 480 acopy_to_uninitialized = false; 481 //original_dest = dest; 482 //dest_needs_zeroing = false; 483 } 484 485 uint alias_idx = C->get_alias_index(adr_type); 486 487 // Results are placed here: 488 enum { fast_path = 1, // normal void-returning assembly stub 489 checked_path = 2, // special assembly stub with cleanup 490 slow_call_path = 3, // something went wrong; call the VM 491 zero_path = 4, // bypass when length of copy is zero 492 bcopy_path = 5, // copy primitive array by 64-bit blocks 493 PATH_LIMIT = 6 494 }; 495 RegionNode* result_region = new RegionNode(PATH_LIMIT); 496 PhiNode* result_i_o = new PhiNode(result_region, Type::ABIO); 497 PhiNode* result_memory = new PhiNode(result_region, Type::MEMORY, adr_type); 498 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice"); 499 transform_later(result_region); 500 transform_later(result_i_o); 501 transform_later(result_memory); 502 503 // The slow_control path: 504 Node* slow_control; 505 Node* slow_i_o = *io; 506 Node* slow_mem = mem->memory_at(alias_idx); 507 DEBUG_ONLY(slow_control = (Node*) badAddress); 508 509 // Checked control path: 510 Node* checked_control = top(); 511 Node* checked_mem = nullptr; 512 Node* checked_i_o = nullptr; 513 Node* checked_value = nullptr; 514 515 if (basic_elem_type == T_CONFLICT) { 516 assert(!dest_needs_zeroing, ""); 517 Node* cv = generate_generic_arraycopy(ctrl, &mem, 518 adr_type, 519 src, src_offset, dest, dest_offset, 520 copy_length, acopy_to_uninitialized); 521 if (cv == nullptr) cv = intcon(-1); // failure (no stub available) 522 checked_control = *ctrl; 523 checked_i_o = *io; 524 checked_mem = mem->memory_at(alias_idx); 525 checked_value = cv; 526 *ctrl = top(); 527 } 528 529 Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative); 530 if (not_pos != nullptr) { 531 Node* local_ctrl = not_pos, *local_io = *io; 532 MergeMemNode* local_mem = MergeMemNode::make(mem); 533 transform_later(local_mem); 534 535 // (6) length must not be negative. 536 if (!length_never_negative) { 537 generate_negative_guard(&local_ctrl, copy_length, slow_region); 538 } 539 540 // copy_length is 0. 541 if (dest_needs_zeroing) { 542 assert(!local_ctrl->is_top(), "no ctrl?"); 543 if (copy_length->eqv_uncast(dest_length) 544 || _igvn.find_int_con(dest_length, 1) <= 0) { 545 // There is no zeroing to do. No need for a secondary raw memory barrier. 546 } else { 547 // Clear the whole thing since there are no source elements to copy. 548 generate_clear_array(local_ctrl, local_mem, 549 adr_type, dest, 550 default_value, raw_default_value, 551 basic_elem_type, 552 intcon(0), nullptr, 553 alloc->in(AllocateNode::AllocSize)); 554 // Use a secondary InitializeNode as raw memory barrier. 555 // Currently it is needed only on this path since other 556 // paths have stub or runtime calls as raw memory barriers. 557 MemBarNode* mb = MemBarNode::make(C, Op_Initialize, 558 Compile::AliasIdxRaw, 559 top()); 560 transform_later(mb); 561 mb->set_req(TypeFunc::Control,local_ctrl); 562 mb->set_req(TypeFunc::Memory, local_mem->memory_at(Compile::AliasIdxRaw)); 563 local_ctrl = transform_later(new ProjNode(mb, TypeFunc::Control)); 564 local_mem->set_memory_at(Compile::AliasIdxRaw, transform_later(new ProjNode(mb, TypeFunc::Memory))); 565 566 InitializeNode* init = mb->as_Initialize(); 567 init->set_complete(&_igvn); // (there is no corresponding AllocateNode) 568 } 569 } 570 571 // Present the results of the fast call. 572 result_region->init_req(zero_path, local_ctrl); 573 result_i_o ->init_req(zero_path, local_io); 574 result_memory->init_req(zero_path, local_mem->memory_at(alias_idx)); 575 } 576 577 if (!(*ctrl)->is_top() && dest_needs_zeroing) { 578 // We have to initialize the *uncopied* part of the array to zero. 579 // The copy destination is the slice dest[off..off+len]. The other slices 580 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 581 Node* dest_size = alloc->in(AllocateNode::AllocSize); 582 Node* dest_tail = transform_later( new AddINode(dest_offset, copy_length)); 583 584 // If there is a head section that needs zeroing, do it now. 585 if (_igvn.find_int_con(dest_offset, -1) != 0) { 586 generate_clear_array(*ctrl, mem, 587 adr_type, dest, 588 default_value, raw_default_value, 589 basic_elem_type, 590 intcon(0), dest_offset, 591 nullptr); 592 } 593 594 // Next, perform a dynamic check on the tail length. 595 // It is often zero, and we can win big if we prove this. 596 // There are two wins: Avoid generating the ClearArray 597 // with its attendant messy index arithmetic, and upgrade 598 // the copy to a more hardware-friendly word size of 64 bits. 599 Node* tail_ctl = nullptr; 600 if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) { 601 Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) ); 602 Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) ); 603 tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr); 604 assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome"); 605 } 606 607 // At this point, let's assume there is no tail. 608 if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) { 609 // There is no tail. Try an upgrade to a 64-bit copy. 610 bool didit = false; 611 { 612 Node* local_ctrl = *ctrl, *local_io = *io; 613 MergeMemNode* local_mem = MergeMemNode::make(mem); 614 transform_later(local_mem); 615 616 didit = generate_block_arraycopy(&local_ctrl, &local_mem, local_io, 617 adr_type, basic_elem_type, alloc, 618 src, src_offset, dest, dest_offset, 619 dest_size, acopy_to_uninitialized); 620 if (didit) { 621 // Present the results of the block-copying fast call. 622 result_region->init_req(bcopy_path, local_ctrl); 623 result_i_o ->init_req(bcopy_path, local_io); 624 result_memory->init_req(bcopy_path, local_mem->memory_at(alias_idx)); 625 } 626 } 627 if (didit) { 628 *ctrl = top(); // no regular fast path 629 } 630 } 631 632 // Clear the tail, if any. 633 if (tail_ctl != nullptr) { 634 Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl; 635 *ctrl = tail_ctl; 636 if (notail_ctl == nullptr) { 637 generate_clear_array(*ctrl, mem, 638 adr_type, dest, 639 default_value, raw_default_value, 640 basic_elem_type, 641 dest_tail, nullptr, 642 dest_size); 643 } else { 644 // Make a local merge. 645 Node* done_ctl = transform_later(new RegionNode(3)); 646 Node* done_mem = transform_later(new PhiNode(done_ctl, Type::MEMORY, adr_type)); 647 done_ctl->init_req(1, notail_ctl); 648 done_mem->init_req(1, mem->memory_at(alias_idx)); 649 generate_clear_array(*ctrl, mem, 650 adr_type, dest, 651 default_value, raw_default_value, 652 basic_elem_type, 653 dest_tail, nullptr, 654 dest_size); 655 done_ctl->init_req(2, *ctrl); 656 done_mem->init_req(2, mem->memory_at(alias_idx)); 657 *ctrl = done_ctl; 658 mem->set_memory_at(alias_idx, done_mem); 659 } 660 } 661 } 662 663 BasicType copy_type = basic_elem_type; 664 assert(basic_elem_type != T_ARRAY, "caller must fix this"); 665 if (!(*ctrl)->is_top() && copy_type == T_OBJECT) { 666 // If src and dest have compatible element types, we can copy bits. 667 // Types S[] and D[] are compatible if D is a supertype of S. 668 // 669 // If they are not, we will use checked_oop_disjoint_arraycopy, 670 // which performs a fast optimistic per-oop check, and backs off 671 // further to JVM_ArrayCopy on the first per-oop check that fails. 672 // (Actually, we don't move raw bits only; the GC requires card marks.) 673 674 // We don't need a subtype check for validated copies and Object[].clone() 675 bool skip_subtype_check = ac->is_arraycopy_validated() || ac->is_copyof_validated() || 676 ac->is_copyofrange_validated() || ac->is_clone_oop_array(); 677 if (!skip_subtype_check) { 678 // Get the klass* for both src and dest 679 Node* src_klass = ac->in(ArrayCopyNode::SrcKlass); 680 Node* dest_klass = ac->in(ArrayCopyNode::DestKlass); 681 682 assert(src_klass != nullptr && dest_klass != nullptr, "should have klasses"); 683 684 // Generate the subtype check. 685 // This might fold up statically, or then again it might not. 686 // 687 // Non-static example: Copying List<String>.elements to a new String[]. 688 // The backing store for a List<String> is always an Object[], 689 // but its elements are always type String, if the generic types 690 // are correct at the source level. 691 // 692 // Test S[] against D[], not S against D, because (probably) 693 // the secondary supertype cache is less busy for S[] than S. 694 // This usually only matters when D is an interface. 695 Node* not_subtype_ctrl = Phase::gen_subtype_check(src_klass, dest_klass, ctrl, mem, _igvn, nullptr, -1); 696 // Plug failing path into checked_oop_disjoint_arraycopy 697 if (not_subtype_ctrl != top()) { 698 Node* local_ctrl = not_subtype_ctrl; 699 MergeMemNode* local_mem = MergeMemNode::make(mem); 700 transform_later(local_mem); 701 702 // (At this point we can assume disjoint_bases, since types differ.) 703 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 704 Node* p1 = basic_plus_adr(dest_klass, ek_offset); 705 Node* n1 = LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); 706 Node* dest_elem_klass = transform_later(n1); 707 Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem, 708 adr_type, 709 dest_elem_klass, 710 src, src_offset, dest, dest_offset, 711 ConvI2X(copy_length), acopy_to_uninitialized); 712 if (cv == nullptr) cv = intcon(-1); // failure (no stub available) 713 checked_control = local_ctrl; 714 checked_i_o = *io; 715 checked_mem = local_mem->memory_at(alias_idx); 716 checked_value = cv; 717 } 718 } 719 // At this point we know we do not need type checks on oop stores. 720 721 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 722 if (!bs->array_copy_requires_gc_barriers(alloc != nullptr, copy_type, false, false, BarrierSetC2::Expansion)) { 723 // If we do not need gc barriers, copy using the jint or jlong stub. 724 copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); 725 assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), 726 "sizes agree"); 727 } 728 } 729 730 bool is_partial_array_copy = false; 731 if (!(*ctrl)->is_top()) { 732 // Generate the fast path, if possible. 733 Node* local_ctrl = *ctrl; 734 MergeMemNode* local_mem = MergeMemNode::make(mem); 735 transform_later(local_mem); 736 is_partial_array_copy = generate_unchecked_arraycopy(&local_ctrl, &local_mem, 737 adr_type, copy_type, disjoint_bases, 738 src, src_offset, dest, dest_offset, 739 ConvI2X(copy_length), acopy_to_uninitialized); 740 741 // Present the results of the fast call. 742 result_region->init_req(fast_path, local_ctrl); 743 result_i_o ->init_req(fast_path, *io); 744 result_memory->init_req(fast_path, local_mem->memory_at(alias_idx)); 745 } 746 747 // Here are all the slow paths up to this point, in one bundle: 748 assert(slow_region != nullptr, "allocated on entry"); 749 slow_control = slow_region; 750 DEBUG_ONLY(slow_region = (RegionNode*)badAddress); 751 752 *ctrl = checked_control; 753 if (!(*ctrl)->is_top()) { 754 // Clean up after the checked call. 755 // The returned value is either 0 or -1^K, 756 // where K = number of partially transferred array elements. 757 Node* cmp = new CmpINode(checked_value, intcon(0)); 758 transform_later(cmp); 759 Node* bol = new BoolNode(cmp, BoolTest::eq); 760 transform_later(bol); 761 IfNode* iff = new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN); 762 transform_later(iff); 763 764 // If it is 0, we are done, so transfer to the end. 765 Node* checks_done = new IfTrueNode(iff); 766 transform_later(checks_done); 767 result_region->init_req(checked_path, checks_done); 768 result_i_o ->init_req(checked_path, checked_i_o); 769 result_memory->init_req(checked_path, checked_mem); 770 771 // If it is not zero, merge into the slow call. 772 *ctrl = new IfFalseNode(iff); 773 transform_later(*ctrl); 774 RegionNode* slow_reg2 = new RegionNode(3); 775 PhiNode* slow_i_o2 = new PhiNode(slow_reg2, Type::ABIO); 776 PhiNode* slow_mem2 = new PhiNode(slow_reg2, Type::MEMORY, adr_type); 777 transform_later(slow_reg2); 778 transform_later(slow_i_o2); 779 transform_later(slow_mem2); 780 slow_reg2 ->init_req(1, slow_control); 781 slow_i_o2 ->init_req(1, slow_i_o); 782 slow_mem2 ->init_req(1, slow_mem); 783 slow_reg2 ->init_req(2, *ctrl); 784 slow_i_o2 ->init_req(2, checked_i_o); 785 slow_mem2 ->init_req(2, checked_mem); 786 787 slow_control = slow_reg2; 788 slow_i_o = slow_i_o2; 789 slow_mem = slow_mem2; 790 791 if (alloc != nullptr) { 792 // We'll restart from the very beginning, after zeroing the whole thing. 793 // This can cause double writes, but that's OK since dest is brand new. 794 // So we ignore the low 31 bits of the value returned from the stub. 795 } else { 796 // We must continue the copy exactly where it failed, or else 797 // another thread might see the wrong number of writes to dest. 798 Node* checked_offset = new XorINode(checked_value, intcon(-1)); 799 Node* slow_offset = new PhiNode(slow_reg2, TypeInt::INT); 800 transform_later(checked_offset); 801 transform_later(slow_offset); 802 slow_offset->init_req(1, intcon(0)); 803 slow_offset->init_req(2, checked_offset); 804 805 // Adjust the arguments by the conditionally incoming offset. 806 Node* src_off_plus = new AddINode(src_offset, slow_offset); 807 transform_later(src_off_plus); 808 Node* dest_off_plus = new AddINode(dest_offset, slow_offset); 809 transform_later(dest_off_plus); 810 Node* length_minus = new SubINode(copy_length, slow_offset); 811 transform_later(length_minus); 812 813 // Tweak the node variables to adjust the code produced below: 814 src_offset = src_off_plus; 815 dest_offset = dest_off_plus; 816 copy_length = length_minus; 817 } 818 } 819 *ctrl = slow_control; 820 if (!(*ctrl)->is_top()) { 821 Node* local_ctrl = *ctrl, *local_io = slow_i_o; 822 MergeMemNode* local_mem = MergeMemNode::make(mem); 823 transform_later(local_mem); 824 825 // Generate the slow path, if needed. 826 local_mem->set_memory_at(alias_idx, slow_mem); 827 828 if (dest_needs_zeroing) { 829 generate_clear_array(local_ctrl, local_mem, 830 adr_type, dest, 831 default_value, raw_default_value, 832 basic_elem_type, 833 intcon(0), nullptr, 834 alloc->in(AllocateNode::AllocSize)); 835 } 836 837 local_mem = generate_slow_arraycopy(ac, 838 &local_ctrl, local_mem, &local_io, 839 adr_type, 840 src, src_offset, dest, dest_offset, 841 copy_length, /*dest_uninitialized*/false); 842 843 result_region->init_req(slow_call_path, local_ctrl); 844 result_i_o ->init_req(slow_call_path, local_io); 845 result_memory->init_req(slow_call_path, local_mem->memory_at(alias_idx)); 846 } else { 847 ShouldNotReachHere(); // no call to generate_slow_arraycopy: 848 // projections were not extracted 849 } 850 851 // Remove unused edges. 852 for (uint i = 1; i < result_region->req(); i++) { 853 if (result_region->in(i) == nullptr) { 854 result_region->init_req(i, top()); 855 } 856 } 857 858 // Finished; return the combined state. 859 *ctrl = result_region; 860 *io = result_i_o; 861 mem->set_memory_at(alias_idx, result_memory); 862 863 // mem no longer guaranteed to stay a MergeMemNode 864 Node* out_mem = mem; 865 DEBUG_ONLY(mem = nullptr); 866 867 // The memory edges above are precise in order to model effects around 868 // array copies accurately to allow value numbering of field loads around 869 // arraycopy. Such field loads, both before and after, are common in Java 870 // collections and similar classes involving header/array data structures. 871 // 872 // But with low number of register or when some registers are used or killed 873 // by arraycopy calls it causes registers spilling on stack. See 6544710. 874 // The next memory barrier is added to avoid it. If the arraycopy can be 875 // optimized away (which it can, sometimes) then we can manually remove 876 // the membar also. 877 // 878 // Do not let reads from the cloned object float above the arraycopy. 879 if (alloc != nullptr && !alloc->initialization()->does_not_escape()) { 880 // Do not let stores that initialize this object be reordered with 881 // a subsequent store that would make this object accessible by 882 // other threads. 883 insert_mem_bar(ctrl, &out_mem, Op_MemBarStoreStore); 884 } else { 885 insert_mem_bar(ctrl, &out_mem, Op_MemBarCPUOrder); 886 } 887 888 if (is_partial_array_copy) { 889 assert((*ctrl)->is_Proj(), "MemBar control projection"); 890 assert((*ctrl)->in(0)->isa_MemBar(), "MemBar node"); 891 (*ctrl)->in(0)->isa_MemBar()->set_trailing_partial_array_copy(); 892 } 893 894 _igvn.replace_node(_callprojs->fallthrough_memproj, out_mem); 895 if (_callprojs->fallthrough_ioproj != nullptr) { 896 _igvn.replace_node(_callprojs->fallthrough_ioproj, *io); 897 } 898 _igvn.replace_node(_callprojs->fallthrough_catchproj, *ctrl); 899 900 #ifdef ASSERT 901 const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr(); 902 if (dest_t->is_known_instance() && !is_partial_array_copy) { 903 ArrayCopyNode* ac = nullptr; 904 assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost"); 905 assert(ac == nullptr, "no arraycopy anymore"); 906 } 907 #endif 908 909 return out_mem; 910 } 911 912 // Helper for initialization of arrays, creating a ClearArray. 913 // It writes zero bits in [start..end), within the body of an array object. 914 // The memory effects are all chained onto the 'adr_type' alias category. 915 // 916 // Since the object is otherwise uninitialized, we are free 917 // to put a little "slop" around the edges of the cleared area, 918 // as long as it does not go back into the array's header, 919 // or beyond the array end within the heap. 920 // 921 // The lower edge can be rounded down to the nearest jint and the 922 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes. 923 // 924 // Arguments: 925 // adr_type memory slice where writes are generated 926 // dest oop of the destination array 927 // basic_elem_type element type of the destination 928 // slice_idx array index of first element to store 929 // slice_len number of elements to store (or null) 930 // dest_size total size in bytes of the array object 931 // 932 // Exactly one of slice_len or dest_size must be non-null. 933 // If dest_size is non-null, zeroing extends to the end of the object. 934 // If slice_len is non-null, the slice_idx value must be a constant. 935 void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, 936 const TypePtr* adr_type, 937 Node* dest, 938 Node* val, 939 Node* raw_val, 940 BasicType basic_elem_type, 941 Node* slice_idx, 942 Node* slice_len, 943 Node* dest_size) { 944 // one or the other but not both of slice_len and dest_size: 945 assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, ""); 946 if (slice_len == nullptr) slice_len = top(); 947 if (dest_size == nullptr) dest_size = top(); 948 949 uint alias_idx = C->get_alias_index(adr_type); 950 951 // operate on this memory slice: 952 Node* mem = merge_mem->memory_at(alias_idx); // memory slice to operate on 953 954 // scaling and rounding of indexes: 955 int scale = exact_log2(type2aelembytes(basic_elem_type)); 956 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 957 int clear_low = (-1 << scale) & (BytesPerInt - 1); 958 int bump_bit = (-1 << scale) & BytesPerInt; 959 960 // determine constant starts and ends 961 const intptr_t BIG_NEG = -128; 962 assert(BIG_NEG + 2*abase < 0, "neg enough"); 963 intptr_t slice_idx_con = (intptr_t) _igvn.find_int_con(slice_idx, BIG_NEG); 964 intptr_t slice_len_con = (intptr_t) _igvn.find_int_con(slice_len, BIG_NEG); 965 if (slice_len_con == 0) { 966 return; // nothing to do here 967 } 968 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low; 969 intptr_t end_con = _igvn.find_intptr_t_con(dest_size, -1); 970 if (slice_idx_con >= 0 && slice_len_con >= 0) { 971 assert(end_con < 0, "not two cons"); 972 end_con = align_up(abase + ((slice_idx_con + slice_len_con) << scale), 973 BytesPerLong); 974 } 975 976 if (start_con >= 0 && end_con >= 0) { 977 // Constant start and end. Simple. 978 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 979 start_con, end_con, &_igvn); 980 } else if (start_con >= 0 && dest_size != top()) { 981 // Constant start, pre-rounded end after the tail of the array. 982 Node* end = dest_size; 983 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 984 start_con, end, &_igvn); 985 } else if (start_con >= 0 && slice_len != top()) { 986 // Constant start, non-constant end. End needs rounding up. 987 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8) 988 intptr_t end_base = abase + (slice_idx_con << scale); 989 int end_round = (-1 << scale) & (BytesPerLong - 1); 990 Node* end = ConvI2X(slice_len); 991 if (scale != 0) 992 end = transform_later(new LShiftXNode(end, intcon(scale) )); 993 end_base += end_round; 994 end = transform_later(new AddXNode(end, MakeConX(end_base)) ); 995 end = transform_later(new AndXNode(end, MakeConX(~end_round)) ); 996 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, val, raw_val, 997 start_con, end, &_igvn); 998 } else if (start_con < 0 && dest_size != top()) { 999 // Non-constant start, pre-rounded end after the tail of the array. 1000 // This is almost certainly a "round-to-end" operation. 1001 Node* start = slice_idx; 1002 start = ConvI2X(start); 1003 if (scale != 0) 1004 start = transform_later(new LShiftXNode( start, intcon(scale) )); 1005 start = transform_later(new AddXNode(start, MakeConX(abase)) ); 1006 if ((bump_bit | clear_low) != 0) { 1007 int to_clear = (bump_bit | clear_low); 1008 // Align up mod 8, then store a jint zero unconditionally 1009 // just before the mod-8 boundary. 1010 if (((abase + bump_bit) & ~to_clear) - bump_bit 1011 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) { 1012 bump_bit = 0; 1013 assert((abase & to_clear) == 0, "array base must be long-aligned"); 1014 } else { 1015 // Bump 'start' up to (or past) the next jint boundary: 1016 start = transform_later( new AddXNode(start, MakeConX(bump_bit)) ); 1017 assert((abase & clear_low) == 0, "array base must be int-aligned"); 1018 } 1019 // Round bumped 'start' down to jlong boundary in body of array. 1020 start = transform_later(new AndXNode(start, MakeConX(~to_clear)) ); 1021 if (bump_bit != 0) { 1022 // Store a zero to the immediately preceding jint: 1023 Node* x1 = transform_later(new AddXNode(start, MakeConX(-bump_bit)) ); 1024 Node* p1 = basic_plus_adr(dest, x1); 1025 if (val == nullptr) { 1026 assert(raw_val == nullptr, "val may not be null"); 1027 mem = StoreNode::make(_igvn, ctrl, mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered); 1028 } else { 1029 assert(_igvn.type(val)->isa_narrowoop(), "should be narrow oop"); 1030 mem = new StoreNNode(ctrl, mem, p1, adr_type, val, MemNode::unordered); 1031 } 1032 mem = transform_later(mem); 1033 } 1034 } 1035 Node* end = dest_size; // pre-rounded 1036 mem = ClearArrayNode::clear_memory(ctrl, mem, dest, raw_val, 1037 start, end, &_igvn); 1038 } else { 1039 // Non-constant start, unrounded non-constant end. 1040 // (Nobody zeroes a random midsection of an array using this routine.) 1041 ShouldNotReachHere(); // fix caller 1042 } 1043 1044 // Done. 1045 merge_mem->set_memory_at(alias_idx, mem); 1046 } 1047 1048 bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, Node* io, 1049 const TypePtr* adr_type, 1050 BasicType basic_elem_type, 1051 AllocateNode* alloc, 1052 Node* src, Node* src_offset, 1053 Node* dest, Node* dest_offset, 1054 Node* dest_size, bool dest_uninitialized) { 1055 // See if there is an advantage from block transfer. 1056 int scale = exact_log2(type2aelembytes(basic_elem_type)); 1057 if (scale >= LogBytesPerLong) 1058 return false; // it is already a block transfer 1059 1060 // Look at the alignment of the starting offsets. 1061 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type); 1062 1063 intptr_t src_off_con = (intptr_t) _igvn.find_int_con(src_offset, -1); 1064 intptr_t dest_off_con = (intptr_t) _igvn.find_int_con(dest_offset, -1); 1065 if (src_off_con < 0 || dest_off_con < 0) { 1066 // At present, we can only understand constants. 1067 return false; 1068 } 1069 1070 intptr_t src_off = abase + (src_off_con << scale); 1071 intptr_t dest_off = abase + (dest_off_con << scale); 1072 1073 if (((src_off | dest_off) & (BytesPerLong-1)) != 0) { 1074 // Non-aligned; too bad. 1075 // One more chance: Pick off an initial 32-bit word. 1076 // This is a common case, since abase can be odd mod 8. 1077 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && 1078 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { 1079 Node* sptr = basic_plus_adr(src, src_off); 1080 Node* dptr = basic_plus_adr(dest, dest_off); 1081 const TypePtr* s_adr_type = _igvn.type(sptr)->is_ptr(); 1082 assert(s_adr_type->isa_aryptr(), "impossible slice"); 1083 uint s_alias_idx = C->get_alias_index(s_adr_type); 1084 uint d_alias_idx = C->get_alias_index(adr_type); 1085 bool is_mismatched = (basic_elem_type != T_INT); 1086 Node* sval = transform_later( 1087 LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type, 1088 TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest, 1089 false /*require_atomic_access*/, false /*unaligned*/, is_mismatched)); 1090 Node* st = transform_later( 1091 StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type, 1092 sval, T_INT, MemNode::unordered)); 1093 if (is_mismatched) { 1094 st->as_Store()->set_mismatched_access(); 1095 } 1096 (*mem)->set_memory_at(d_alias_idx, st); 1097 src_off += BytesPerInt; 1098 dest_off += BytesPerInt; 1099 } else { 1100 return false; 1101 } 1102 } 1103 assert(src_off % BytesPerLong == 0, ""); 1104 assert(dest_off % BytesPerLong == 0, ""); 1105 1106 // Do this copy by giant steps. 1107 Node* sptr = basic_plus_adr(src, src_off); 1108 Node* dptr = basic_plus_adr(dest, dest_off); 1109 Node* countx = dest_size; 1110 countx = transform_later(new SubXNode(countx, MakeConX(dest_off))); 1111 countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong))); 1112 1113 bool disjoint_bases = true; // since alloc isn't null 1114 generate_unchecked_arraycopy(ctrl, mem, 1115 adr_type, T_LONG, disjoint_bases, 1116 sptr, nullptr, dptr, nullptr, countx, dest_uninitialized); 1117 1118 return true; 1119 } 1120 1121 // Helper function; generates code for the slow case. 1122 // We make a call to a runtime method which emulates the native method, 1123 // but without the native wrapper overhead. 1124 MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac, 1125 Node** ctrl, Node* mem, Node** io, 1126 const TypePtr* adr_type, 1127 Node* src, Node* src_offset, 1128 Node* dest, Node* dest_offset, 1129 Node* copy_length, bool dest_uninitialized) { 1130 assert(!dest_uninitialized, "Invariant"); 1131 1132 const TypeFunc* call_type = OptoRuntime::slow_arraycopy_Type(); 1133 CallNode* call = new CallStaticJavaNode(call_type, OptoRuntime::slow_arraycopy_Java(), 1134 "slow_arraycopy", TypePtr::BOTTOM); 1135 1136 call->init_req(TypeFunc::Control, *ctrl); 1137 call->init_req(TypeFunc::I_O , *io); 1138 call->init_req(TypeFunc::Memory , mem); 1139 call->init_req(TypeFunc::ReturnAdr, top()); 1140 call->init_req(TypeFunc::FramePtr, top()); 1141 call->init_req(TypeFunc::Parms+0, src); 1142 call->init_req(TypeFunc::Parms+1, src_offset); 1143 call->init_req(TypeFunc::Parms+2, dest); 1144 call->init_req(TypeFunc::Parms+3, dest_offset); 1145 call->init_req(TypeFunc::Parms+4, copy_length); 1146 call->copy_call_debug_info(&_igvn, ac); 1147 1148 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 1149 _igvn.replace_node(ac, call); 1150 transform_later(call); 1151 1152 _callprojs = call->extract_projections(false /*separate_io_proj*/, false /*do_asserts*/); 1153 *ctrl = _callprojs->fallthrough_catchproj->clone(); 1154 transform_later(*ctrl); 1155 1156 Node* m = _callprojs->fallthrough_memproj->clone(); 1157 transform_later(m); 1158 1159 uint alias_idx = C->get_alias_index(adr_type); 1160 MergeMemNode* out_mem; 1161 if (alias_idx != Compile::AliasIdxBot) { 1162 out_mem = MergeMemNode::make(mem); 1163 out_mem->set_memory_at(alias_idx, m); 1164 } else { 1165 out_mem = MergeMemNode::make(m); 1166 } 1167 transform_later(out_mem); 1168 1169 // When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj 1170 // could be nullptr. Skip clone and update nullptr fallthrough_ioproj. 1171 if (_callprojs->fallthrough_ioproj != nullptr) { 1172 *io = _callprojs->fallthrough_ioproj->clone(); 1173 transform_later(*io); 1174 } else { 1175 *io = nullptr; 1176 } 1177 1178 return out_mem; 1179 } 1180 1181 // Helper function; generates code for cases requiring runtime checks. 1182 Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** mem, 1183 const TypePtr* adr_type, 1184 Node* dest_elem_klass, 1185 Node* src, Node* src_offset, 1186 Node* dest, Node* dest_offset, 1187 Node* copy_length, bool dest_uninitialized) { 1188 if ((*ctrl)->is_top()) return nullptr; 1189 1190 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); 1191 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. 1192 return nullptr; 1193 } 1194 1195 // Pick out the parameters required to perform a store-check 1196 // for the target array. This is an optimistic check. It will 1197 // look in each non-null element's class, at the desired klass's 1198 // super_check_offset, for the desired klass. 1199 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1200 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 1201 Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); 1202 Node* check_offset = ConvI2X(transform_later(n3)); 1203 Node* check_value = dest_elem_klass; 1204 1205 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 1206 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 1207 1208 const TypeFunc* call_type = OptoRuntime::checkcast_arraycopy_Type(); 1209 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "checkcast_arraycopy", adr_type, 1210 src_start, dest_start, copy_length XTOP, check_offset XTOP, check_value); 1211 1212 finish_arraycopy_call(call, ctrl, mem, adr_type); 1213 1214 Node* proj = new ProjNode(call, TypeFunc::Parms); 1215 transform_later(proj); 1216 1217 return proj; 1218 } 1219 1220 // Helper function; generates code for cases requiring runtime checks. 1221 Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** mem, 1222 const TypePtr* adr_type, 1223 Node* src, Node* src_offset, 1224 Node* dest, Node* dest_offset, 1225 Node* copy_length, bool dest_uninitialized) { 1226 if ((*ctrl)->is_top()) return nullptr; 1227 assert(!dest_uninitialized, "Invariant"); 1228 1229 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1230 if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. 1231 return nullptr; 1232 } 1233 1234 const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type(); 1235 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, "generic_arraycopy", adr_type, 1236 src, src_offset, dest, dest_offset, copy_length); 1237 1238 finish_arraycopy_call(call, ctrl, mem, adr_type); 1239 1240 Node* proj = new ProjNode(call, TypeFunc::Parms); 1241 transform_later(proj); 1242 1243 return proj; 1244 } 1245 1246 // Helper function; generates the fast out-of-line call to an arraycopy stub. 1247 bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** mem, 1248 const TypePtr* adr_type, 1249 BasicType basic_elem_type, 1250 bool disjoint_bases, 1251 Node* src, Node* src_offset, 1252 Node* dest, Node* dest_offset, 1253 Node* copy_length, bool dest_uninitialized) { 1254 if ((*ctrl)->is_top()) return false; 1255 1256 Node* src_start = src; 1257 Node* dest_start = dest; 1258 if (src_offset != nullptr || dest_offset != nullptr) { 1259 src_start = array_element_address(src, src_offset, basic_elem_type); 1260 dest_start = array_element_address(dest, dest_offset, basic_elem_type); 1261 } 1262 1263 // Figure out which arraycopy runtime method to call. 1264 const char* copyfunc_name = "arraycopy"; 1265 address copyfunc_addr = 1266 basictype2arraycopy(basic_elem_type, src_offset, dest_offset, 1267 disjoint_bases, copyfunc_name, dest_uninitialized); 1268 1269 Node* result_memory = nullptr; 1270 RegionNode* exit_block = nullptr; 1271 if (ArrayOperationPartialInlineSize > 0 && is_subword_type(basic_elem_type) && 1272 Matcher::vector_width_in_bytes(basic_elem_type) >= 16) { 1273 generate_partial_inlining_block(ctrl, mem, adr_type, &exit_block, &result_memory, 1274 copy_length, src_start, dest_start, basic_elem_type); 1275 } 1276 1277 const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); 1278 Node* call = make_leaf_call(*ctrl, *mem, call_type, copyfunc_addr, copyfunc_name, adr_type, 1279 src_start, dest_start, copy_length XTOP); 1280 1281 finish_arraycopy_call(call, ctrl, mem, adr_type); 1282 1283 // Connecting remaining edges for exit_block coming from stub_block. 1284 if (exit_block) { 1285 exit_block->init_req(2, *ctrl); 1286 1287 // Memory edge corresponding to stub_region. 1288 result_memory->init_req(2, *mem); 1289 1290 uint alias_idx = C->get_alias_index(adr_type); 1291 if (alias_idx != Compile::AliasIdxBot) { 1292 *mem = MergeMemNode::make(*mem); 1293 (*mem)->set_memory_at(alias_idx, result_memory); 1294 } else { 1295 *mem = MergeMemNode::make(result_memory); 1296 } 1297 transform_later(*mem); 1298 *ctrl = exit_block; 1299 return true; 1300 } 1301 return false; 1302 } 1303 1304 const TypePtr* PhaseMacroExpand::adjust_for_flat_array(const TypeAryPtr* top_dest, Node*& src_offset, 1305 Node*& dest_offset, Node*& length, BasicType& dest_elem, 1306 Node*& dest_length) { 1307 #ifdef ASSERT 1308 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1309 bool needs_barriers = top_dest->elem()->inline_klass()->contains_oops() && 1310 bs->array_copy_requires_gc_barriers(dest_length != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization); 1311 assert(!needs_barriers || StressReflectiveCode, "Flat arracopy would require GC barriers"); 1312 #endif 1313 int elem_size = top_dest->flat_elem_size(); 1314 if (elem_size >= 8) { 1315 if (elem_size > 8) { 1316 // treat as array of long but scale length, src offset and dest offset 1317 assert((elem_size % 8) == 0, "not a power of 2?"); 1318 int factor = elem_size / 8; 1319 length = transform_later(new MulINode(length, intcon(factor))); 1320 src_offset = transform_later(new MulINode(src_offset, intcon(factor))); 1321 dest_offset = transform_later(new MulINode(dest_offset, intcon(factor))); 1322 if (dest_length != nullptr) { 1323 dest_length = transform_later(new MulINode(dest_length, intcon(factor))); 1324 } 1325 elem_size = 8; 1326 } 1327 dest_elem = T_LONG; 1328 } else if (elem_size == 4) { 1329 dest_elem = T_INT; 1330 } else if (elem_size == 2) { 1331 dest_elem = T_CHAR; 1332 } else if (elem_size == 1) { 1333 dest_elem = T_BYTE; 1334 } else { 1335 ShouldNotReachHere(); 1336 } 1337 return TypeRawPtr::BOTTOM; 1338 } 1339 1340 #undef XTOP 1341 1342 void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { 1343 Node* ctrl = ac->in(TypeFunc::Control); 1344 Node* io = ac->in(TypeFunc::I_O); 1345 Node* src = ac->in(ArrayCopyNode::Src); 1346 Node* src_offset = ac->in(ArrayCopyNode::SrcPos); 1347 Node* dest = ac->in(ArrayCopyNode::Dest); 1348 Node* dest_offset = ac->in(ArrayCopyNode::DestPos); 1349 Node* length = ac->in(ArrayCopyNode::Length); 1350 MergeMemNode* merge_mem = nullptr; 1351 1352 if (ac->is_clonebasic()) { 1353 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1354 bs->clone_at_expansion(this, ac); 1355 return; 1356 } else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_clone_oop_array()) { 1357 const Type* src_type = _igvn.type(src); 1358 const Type* dest_type = _igvn.type(dest); 1359 const TypeAryPtr* top_src = src_type->isa_aryptr(); 1360 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 1361 BasicType dest_elem = T_OBJECT; 1362 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) { 1363 dest_elem = top_dest->elem()->array_element_basic_type(); 1364 } 1365 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 1366 1367 if (top_src != nullptr && top_src->is_flat()) { 1368 // If src is flat, dest is guaranteed to be flat as well 1369 top_dest = top_src; 1370 } 1371 1372 AllocateArrayNode* alloc = nullptr; 1373 Node* dest_length = nullptr; 1374 if (ac->is_alloc_tightly_coupled()) { 1375 alloc = AllocateArrayNode::Ideal_array_allocation(dest); 1376 assert(alloc != nullptr, "expect alloc"); 1377 dest_length = alloc->in(AllocateNode::ALength); 1378 } 1379 1380 Node* mem = ac->in(TypeFunc::Memory); 1381 const TypePtr* adr_type = nullptr; 1382 if (top_dest->is_flat()) { 1383 assert(dest_length != nullptr || StressReflectiveCode, "must be tightly coupled"); 1384 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier 1385 // on all slices to prevent writes into the source from floating below the arraycopy. 1386 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1387 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); 1388 } else { 1389 adr_type = dest_type->is_oopptr()->add_offset(Type::OffsetBot); 1390 if (ac->_dest_type != TypeOopPtr::BOTTOM) { 1391 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); 1392 } 1393 if (ac->_src_type != ac->_dest_type) { 1394 adr_type = TypeRawPtr::BOTTOM; 1395 } 1396 } 1397 merge_mem = MergeMemNode::make(mem); 1398 transform_later(merge_mem); 1399 1400 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 1401 adr_type, dest_elem, 1402 src, src_offset, dest, dest_offset, length, 1403 dest_length, 1404 true, ac->has_negative_length_guard()); 1405 1406 return; 1407 } 1408 1409 AllocateArrayNode* alloc = nullptr; 1410 if (ac->is_alloc_tightly_coupled()) { 1411 alloc = AllocateArrayNode::Ideal_array_allocation(dest); 1412 assert(alloc != nullptr, "expect alloc"); 1413 } 1414 1415 assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy"); 1416 1417 // Compile time checks. If any of these checks cannot be verified at compile time, 1418 // we do not make a fast path for this call. Instead, we let the call remain as it 1419 // is. The checks we choose to mandate at compile time are: 1420 // 1421 // (1) src and dest are arrays. 1422 const Type* src_type = src->Value(&_igvn); 1423 const Type* dest_type = dest->Value(&_igvn); 1424 const TypeAryPtr* top_src = src_type->isa_aryptr(); 1425 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); 1426 1427 BasicType src_elem = T_CONFLICT; 1428 BasicType dest_elem = T_CONFLICT; 1429 1430 if (top_src != nullptr && top_src->elem() != Type::BOTTOM) { 1431 src_elem = top_src->elem()->array_element_basic_type(); 1432 } 1433 if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) { 1434 dest_elem = top_dest->elem()->array_element_basic_type(); 1435 } 1436 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; 1437 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 1438 1439 if (ac->is_arraycopy_validated() && dest_elem != T_CONFLICT && src_elem == T_CONFLICT) { 1440 src_elem = dest_elem; 1441 } 1442 1443 if (src_elem == T_CONFLICT || dest_elem == T_CONFLICT) { 1444 // Conservatively insert a memory barrier on all memory slices. 1445 // Do not let writes into the source float below the arraycopy. 1446 { 1447 Node* mem = ac->in(TypeFunc::Memory); 1448 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1449 1450 merge_mem = MergeMemNode::make(mem); 1451 transform_later(merge_mem); 1452 } 1453 1454 // Call StubRoutines::generic_arraycopy stub. 1455 Node* mem = generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io, 1456 TypeRawPtr::BOTTOM, T_CONFLICT, 1457 src, src_offset, dest, dest_offset, length, 1458 nullptr, 1459 // If a negative length guard was generated for the ArrayCopyNode, 1460 // the length of the array can never be negative. 1461 false, ac->has_negative_length_guard()); 1462 return; 1463 } 1464 1465 assert(!ac->is_arraycopy_validated() || (src_elem == dest_elem && dest_elem != T_VOID), "validated but different basic types"); 1466 1467 // (2) src and dest arrays must have elements of the same BasicType 1468 // Figure out the size and type of the elements we will be copying. 1469 // 1470 // We have no stub to copy flat inline type arrays with oop 1471 // fields if we need to emit write barriers. 1472 // 1473 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1474 if (src_elem != dest_elem || top_src->is_flat() != top_dest->is_flat() || dest_elem == T_VOID || 1475 (top_src->is_flat() && top_dest->elem()->inline_klass()->contains_oops() && 1476 bs->array_copy_requires_gc_barriers(alloc != nullptr, T_OBJECT, false, false, BarrierSetC2::Optimization))) { 1477 // The component types are not the same or are not recognized. Punt. 1478 // (But, avoid the native method wrapper to JVM_ArrayCopy.) 1479 { 1480 Node* mem = ac->in(TypeFunc::Memory); 1481 merge_mem = generate_slow_arraycopy(ac, &ctrl, mem, &io, TypePtr::BOTTOM, src, src_offset, dest, dest_offset, length, false); 1482 } 1483 1484 _igvn.replace_node(_callprojs->fallthrough_memproj, merge_mem); 1485 if (_callprojs->fallthrough_ioproj != nullptr) { 1486 _igvn.replace_node(_callprojs->fallthrough_ioproj, io); 1487 } 1488 _igvn.replace_node(_callprojs->fallthrough_catchproj, ctrl); 1489 return; 1490 } 1491 1492 //--------------------------------------------------------------------------- 1493 // We will make a fast path for this call to arraycopy. 1494 1495 // We have the following tests left to perform: 1496 // 1497 // (3) src and dest must not be null. 1498 // (4) src_offset must not be negative. 1499 // (5) dest_offset must not be negative. 1500 // (6) length must not be negative. 1501 // (7) src_offset + length must not exceed length of src. 1502 // (8) dest_offset + length must not exceed length of dest. 1503 // (9) each element of an oop array must be assignable 1504 1505 Node* mem = ac->in(TypeFunc::Memory); 1506 if (top_dest->is_flat()) { 1507 // Copy to a flat array modifies multiple memory slices. Conservatively insert a barrier 1508 // on all slices to prevent writes into the source from floating below the arraycopy. 1509 insert_mem_bar(&ctrl, &mem, Op_MemBarCPUOrder); 1510 } 1511 merge_mem = MergeMemNode::make(mem); 1512 transform_later(merge_mem); 1513 1514 RegionNode* slow_region = new RegionNode(1); 1515 transform_later(slow_region); 1516 1517 if (!ac->is_arraycopy_validated()) { 1518 // (3) operands must not be null 1519 // We currently perform our null checks with the null_check routine. 1520 // This means that the null exceptions will be reported in the caller 1521 // rather than (correctly) reported inside of the native arraycopy call. 1522 // This should be corrected, given time. We do our null check with the 1523 // stack pointer restored. 1524 // null checks done library_call.cpp 1525 1526 // (4) src_offset must not be negative. 1527 generate_negative_guard(&ctrl, src_offset, slow_region); 1528 1529 // (5) dest_offset must not be negative. 1530 generate_negative_guard(&ctrl, dest_offset, slow_region); 1531 1532 // (6) length must not be negative (moved to generate_arraycopy()). 1533 // generate_negative_guard(length, slow_region); 1534 1535 // (7) src_offset + length must not exceed length of src. 1536 Node* alen = ac->in(ArrayCopyNode::SrcLen); 1537 assert(alen != nullptr, "need src len"); 1538 generate_limit_guard(&ctrl, 1539 src_offset, length, 1540 alen, 1541 slow_region); 1542 1543 // (8) dest_offset + length must not exceed length of dest. 1544 alen = ac->in(ArrayCopyNode::DestLen); 1545 assert(alen != nullptr, "need dest len"); 1546 generate_limit_guard(&ctrl, 1547 dest_offset, length, 1548 alen, 1549 slow_region); 1550 1551 // (9) each element of an oop array must be assignable 1552 // The generate_arraycopy subroutine checks this. 1553 1554 // Handle inline type arrays 1555 if (!top_src->is_flat()) { 1556 if (UseFlatArray && !top_src->is_not_flat()) { 1557 // Src might be flat and dest might not be flat. Go to the slow path if src is flat. 1558 generate_flat_array_guard(&ctrl, src, merge_mem, slow_region); 1559 } 1560 if (EnableValhalla) { 1561 // No validation. The subtype check emitted at macro expansion time will not go to the slow 1562 // path but call checkcast_arraycopy which can not handle flat/null-free inline type arrays. 1563 generate_null_free_array_guard(&ctrl, dest, merge_mem, slow_region); 1564 } 1565 } else { 1566 assert(top_dest->is_flat(), "dest array must be flat"); 1567 } 1568 } 1569 1570 // This is where the memory effects are placed: 1571 const TypePtr* adr_type = nullptr; 1572 Node* dest_length = (alloc != nullptr) ? alloc->in(AllocateNode::ALength) : nullptr; 1573 1574 if (top_dest->is_flat()) { 1575 adr_type = adjust_for_flat_array(top_dest, src_offset, dest_offset, length, dest_elem, dest_length); 1576 } else if (ac->_dest_type != TypeOopPtr::BOTTOM) { 1577 adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); 1578 } else { 1579 adr_type = TypeAryPtr::get_array_body_type(dest_elem); 1580 } 1581 1582 generate_arraycopy(ac, alloc, &ctrl, merge_mem, &io, 1583 adr_type, dest_elem, 1584 src, src_offset, dest, dest_offset, length, 1585 dest_length, 1586 // If a negative length guard was generated for the ArrayCopyNode, 1587 // the length of the array can never be negative. 1588 false, ac->has_negative_length_guard(), 1589 slow_region); 1590 }