1 /* 2 * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/c2/barrierSetC2.hpp" 28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp" 29 #include "gc/shared/gc_globals.hpp" 30 #include "opto/arraycopynode.hpp" 31 #include "opto/graphKit.hpp" 32 #include "runtime/sharedRuntime.hpp" 33 #include "utilities/macros.hpp" 34 #include "utilities/powerOfTwo.hpp" 35 36 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) 37 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM), 38 _kind(None), 39 _alloc_tightly_coupled(alloc_tightly_coupled), 40 _has_negative_length_guard(has_negative_length_guard), 41 _arguments_validated(false), 42 _src_type(TypeOopPtr::BOTTOM), 43 _dest_type(TypeOopPtr::BOTTOM) { 44 init_class_id(Class_ArrayCopy); 45 init_flags(Flag_is_macro); 46 C->add_macro_node(this); 47 } 48 49 uint ArrayCopyNode::size_of() const { return sizeof(*this); } 50 51 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw, 52 Node* src, Node* src_offset, 53 Node* dest, Node* dest_offset, 54 Node* length, 55 bool alloc_tightly_coupled, 56 bool has_negative_length_guard, 57 Node* src_klass, Node* dest_klass, 58 Node* src_length, Node* dest_length) { 59 60 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard); 61 kit->set_predefined_input_for_runtime_call(ac); 62 63 ac->init_req(ArrayCopyNode::Src, src); 64 ac->init_req(ArrayCopyNode::SrcPos, src_offset); 65 ac->init_req(ArrayCopyNode::Dest, dest); 66 ac->init_req(ArrayCopyNode::DestPos, dest_offset); 67 ac->init_req(ArrayCopyNode::Length, length); 68 ac->init_req(ArrayCopyNode::SrcLen, src_length); 69 ac->init_req(ArrayCopyNode::DestLen, dest_length); 70 ac->init_req(ArrayCopyNode::SrcKlass, src_klass); 71 ac->init_req(ArrayCopyNode::DestKlass, dest_klass); 72 73 if (may_throw) { 74 ac->set_req(TypeFunc::I_O , kit->i_o()); 75 kit->add_safepoint_edges(ac, false); 76 } 77 78 return ac; 79 } 80 81 void ArrayCopyNode::connect_outputs(GraphKit* kit, bool deoptimize_on_exception) { 82 kit->set_all_memory_call(this, true); 83 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control))); 84 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O))); 85 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true, deoptimize_on_exception); 86 kit->set_all_memory_call(this); 87 } 88 89 #ifndef PRODUCT 90 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"}; 91 92 void ArrayCopyNode::dump_spec(outputStream *st) const { 93 CallNode::dump_spec(st); 94 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : ""); 95 } 96 97 void ArrayCopyNode::dump_compact_spec(outputStream* st) const { 98 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : ""); 99 } 100 #endif 101 102 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const { 103 // check that length is constant 104 Node* length = in(ArrayCopyNode::Length); 105 const Type* length_type = phase->type(length); 106 107 if (length_type == Type::TOP) { 108 return -1; 109 } 110 111 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type"); 112 113 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1); 114 } 115 116 int ArrayCopyNode::get_count(PhaseGVN *phase) const { 117 Node* src = in(ArrayCopyNode::Src); 118 const Type* src_type = phase->type(src); 119 120 if (is_clonebasic()) { 121 if (src_type->isa_instptr()) { 122 const TypeInstPtr* inst_src = src_type->is_instptr(); 123 ciInstanceKlass* ik = inst_src->instance_klass(); 124 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected 125 // fields into account. They are rare anyway so easier to simply 126 // skip instances with injected fields. 127 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) { 128 return -1; 129 } 130 int nb_fields = ik->nof_nonstatic_fields(); 131 return nb_fields; 132 } else { 133 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 134 assert (ary_src != nullptr, "not an array or instance?"); 135 // clone passes a length as a rounded number of longs. If we're 136 // cloning an array we'll do it element by element. If the 137 // length of the input array is constant, ArrayCopyNode::Length 138 // must be too. Note that the opposite does not need to hold, 139 // because different input array lengths (e.g. int arrays with 140 // 3 or 4 elements) might lead to the same length input 141 // (e.g. 2 double-words). 142 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) || 143 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent"); 144 if (ary_src->size()->is_con()) { 145 return ary_src->size()->get_con(); 146 } 147 return -1; 148 } 149 } 150 151 return get_length_if_constant(phase); 152 } 153 154 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) { 155 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that 156 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating 157 // above runtime checks that guarantee it is within bounds. 158 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD; 159 C2AccessValuePtr addr(adr, adr_type); 160 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr); 161 Node* res = bs->load_at(access, type); 162 ctl = access.ctl(); 163 return res; 164 } 165 166 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) { 167 DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY; 168 if (is_alloc_tightly_coupled()) { 169 decorators |= C2_TIGHTLY_COUPLED_ALLOC; 170 } 171 C2AccessValuePtr addr(adr, adr_type); 172 C2AccessValue value(val, type); 173 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr); 174 bs->store_at(access, value); 175 ctl = access.ctl(); 176 } 177 178 179 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) { 180 if (!is_clonebasic()) { 181 return nullptr; 182 } 183 184 Node* base_src = in(ArrayCopyNode::Src); 185 Node* base_dest = in(ArrayCopyNode::Dest); 186 Node* ctl = in(TypeFunc::Control); 187 Node* in_mem = in(TypeFunc::Memory); 188 189 const Type* src_type = phase->type(base_src); 190 const TypeInstPtr* inst_src = src_type->isa_instptr(); 191 if (inst_src == nullptr) { 192 return nullptr; 193 } 194 195 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); 196 if (can_reshape) { 197 phase->is_IterGVN()->_worklist.push(mem); 198 } 199 200 201 ciInstanceKlass* ik = inst_src->instance_klass(); 202 203 if (!inst_src->klass_is_exact()) { 204 assert(!ik->is_interface(), "inconsistent klass hierarchy"); 205 if (ik->has_subklass()) { 206 // Concurrent class loading. 207 // Fail fast and return NodeSentinel to indicate that the transform failed. 208 return NodeSentinel; 209 } else { 210 phase->C->dependencies()->assert_leaf_type(ik); 211 } 212 } 213 214 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields"); 215 216 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 217 for (int i = 0; i < count; i++) { 218 ciField* field = ik->nonstatic_field_at(i); 219 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type(); 220 Node* off = phase->MakeConX(field->offset_in_bytes()); 221 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off)); 222 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off)); 223 BasicType bt = field->layout_type(); 224 225 const Type *type; 226 if (bt == T_OBJECT) { 227 if (!field->type()->is_loaded()) { 228 type = TypeInstPtr::BOTTOM; 229 } else { 230 ciType* field_klass = field->type(); 231 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 232 } 233 } else { 234 type = Type::get_const_basic_type(bt); 235 } 236 237 Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt); 238 store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt); 239 } 240 241 if (!finish_transform(phase, can_reshape, ctl, mem)) { 242 // Return NodeSentinel to indicate that the transform failed 243 return NodeSentinel; 244 } 245 246 return mem; 247 } 248 249 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, 250 Node*& adr_src, 251 Node*& base_src, 252 Node*& adr_dest, 253 Node*& base_dest, 254 BasicType& copy_type, 255 const Type*& value_type, 256 bool& disjoint_bases) { 257 base_src = in(ArrayCopyNode::Src); 258 base_dest = in(ArrayCopyNode::Dest); 259 const Type* src_type = phase->type(base_src); 260 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 261 262 Node* src_offset = in(ArrayCopyNode::SrcPos); 263 Node* dest_offset = in(ArrayCopyNode::DestPos); 264 265 if (is_arraycopy() || is_copyofrange() || is_copyof()) { 266 const Type* dest_type = phase->type(base_dest); 267 const TypeAryPtr* ary_dest = dest_type->isa_aryptr(); 268 269 // newly allocated object is guaranteed to not overlap with source object 270 disjoint_bases = is_alloc_tightly_coupled(); 271 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM || 272 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) { 273 // We don't know if arguments are arrays 274 return false; 275 } 276 277 BasicType src_elem = ary_src->elem()->array_element_basic_type(); 278 BasicType dest_elem = ary_dest->elem()->array_element_basic_type(); 279 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; 280 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 281 282 if (src_elem != dest_elem || dest_elem == T_VOID) { 283 // We don't know if arguments are arrays of the same type 284 return false; 285 } 286 287 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 288 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) { 289 // It's an object array copy but we can't emit the card marking 290 // that is needed 291 return false; 292 } 293 294 value_type = ary_src->elem(); 295 296 uint shift = exact_log2(type2aelembytes(dest_elem)); 297 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem); 298 299 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size()); 300 if (src_offset->is_top()) { 301 // Offset is out of bounds (the ArrayCopyNode will be removed) 302 return false; 303 } 304 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size()); 305 if (dest_offset->is_top()) { 306 // Offset is out of bounds (the ArrayCopyNode will be removed) 307 if (can_reshape) { 308 // record src_offset, so it can be deleted later (if it is dead) 309 phase->is_IterGVN()->_worklist.push(src_offset); 310 } 311 return false; 312 } 313 314 Node* hook = new Node(1); 315 hook->init_req(0, dest_offset); 316 317 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift))); 318 319 hook->destruct(phase); 320 321 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); 322 323 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale)); 324 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale)); 325 326 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header))); 327 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header))); 328 329 copy_type = dest_elem; 330 } else { 331 assert(ary_src != nullptr, "should be a clone"); 332 assert(is_clonebasic(), "should be"); 333 334 disjoint_bases = true; 335 336 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type(); 337 if (is_reference_type(elem, true)) { 338 elem = T_OBJECT; 339 } 340 341 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 342 if (bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) { 343 return false; 344 } 345 346 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset)); 347 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset)); 348 349 // The address is offsetted to an aligned address where a raw copy would start. 350 // If the clone copy is decomposed into load-stores - the address is adjusted to 351 // point at where the array starts. 352 const Type* toff = phase->type(src_offset); 353 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con(); 354 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset; 355 assert(diff >= 0, "clone should not start after 1st array element"); 356 if (diff > 0) { 357 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff))); 358 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff))); 359 } 360 copy_type = elem; 361 value_type = ary_src->elem(); 362 } 363 return true; 364 } 365 366 const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) { 367 if (atp == TypeOopPtr::BOTTOM) { 368 atp = phase->type(n)->isa_ptr(); 369 } 370 // adjust atp to be the correct array element address type 371 return atp->add_offset(Type::OffsetBot); 372 } 373 374 void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) { 375 Node* ctl = in(TypeFunc::Control); 376 if (!disjoint_bases && count > 1) { 377 Node* src_offset = in(ArrayCopyNode::SrcPos); 378 Node* dest_offset = in(ArrayCopyNode::DestPos); 379 assert(src_offset != nullptr && dest_offset != nullptr, "should be"); 380 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset)); 381 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt)); 382 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); 383 384 phase->transform(iff); 385 386 forward_ctl = phase->transform(new IfFalseNode(iff)); 387 backward_ctl = phase->transform(new IfTrueNode(iff)); 388 } else { 389 forward_ctl = ctl; 390 } 391 } 392 393 Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase, 394 bool can_reshape, 395 Node*& forward_ctl, 396 Node* mem, 397 const TypePtr* atp_src, 398 const TypePtr* atp_dest, 399 Node* adr_src, 400 Node* base_src, 401 Node* adr_dest, 402 Node* base_dest, 403 BasicType copy_type, 404 const Type* value_type, 405 int count) { 406 if (!forward_ctl->is_top()) { 407 // copy forward 408 MergeMemNode* mm = MergeMemNode::make(mem); 409 410 if (count > 0) { 411 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 412 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type); 413 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type); 414 for (int i = 1; i < count; i++) { 415 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); 416 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); 417 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); 418 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type); 419 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); 420 } 421 } else if (can_reshape) { 422 PhaseIterGVN* igvn = phase->is_IterGVN(); 423 igvn->_worklist.push(adr_src); 424 igvn->_worklist.push(adr_dest); 425 } 426 return mm; 427 } 428 return phase->C->top(); 429 } 430 431 Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase, 432 bool can_reshape, 433 Node*& backward_ctl, 434 Node* mem, 435 const TypePtr* atp_src, 436 const TypePtr* atp_dest, 437 Node* adr_src, 438 Node* base_src, 439 Node* adr_dest, 440 Node* base_dest, 441 BasicType copy_type, 442 const Type* value_type, 443 int count) { 444 if (!backward_ctl->is_top()) { 445 // copy backward 446 MergeMemNode* mm = MergeMemNode::make(mem); 447 448 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 449 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays"); 450 451 if (count > 0) { 452 for (int i = count-1; i >= 1; i--) { 453 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i); 454 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off)); 455 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off)); 456 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type); 457 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type); 458 } 459 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type); 460 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type); 461 } else if (can_reshape) { 462 PhaseIterGVN* igvn = phase->is_IterGVN(); 463 igvn->_worklist.push(adr_src); 464 igvn->_worklist.push(adr_dest); 465 } 466 return phase->transform(mm); 467 } 468 return phase->C->top(); 469 } 470 471 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, 472 Node* ctl, Node *mem) { 473 if (can_reshape) { 474 PhaseIterGVN* igvn = phase->is_IterGVN(); 475 igvn->set_delay_transform(false); 476 if (is_clonebasic()) { 477 Node* out_mem = proj_out(TypeFunc::Memory); 478 479 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 480 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() || 481 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) { 482 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking"); 483 return false; 484 } 485 486 igvn->replace_node(out_mem->raw_out(0), mem); 487 488 Node* out_ctl = proj_out(TypeFunc::Control); 489 igvn->replace_node(out_ctl, ctl); 490 } else { 491 // replace fallthrough projections of the ArrayCopyNode by the 492 // new memory, control and the input IO. 493 CallProjections callprojs; 494 extract_projections(&callprojs, true, false); 495 496 if (callprojs.fallthrough_ioproj != nullptr) { 497 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O)); 498 } 499 if (callprojs.fallthrough_memproj != nullptr) { 500 igvn->replace_node(callprojs.fallthrough_memproj, mem); 501 } 502 if (callprojs.fallthrough_catchproj != nullptr) { 503 igvn->replace_node(callprojs.fallthrough_catchproj, ctl); 504 } 505 506 // The ArrayCopyNode is not disconnected. It still has the 507 // projections for the exception case. Replace current 508 // ArrayCopyNode with a dummy new one with a top() control so 509 // that this part of the graph stays consistent but is 510 // eventually removed. 511 512 set_req(0, phase->C->top()); 513 remove_dead_region(phase, can_reshape); 514 } 515 } else { 516 if (in(TypeFunc::Control) != ctl) { 517 // we can't return new memory and control from Ideal at parse time 518 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?"); 519 phase->record_for_igvn(this); 520 return false; 521 } 522 } 523 return true; 524 } 525 526 527 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { 528 if (remove_dead_region(phase, can_reshape)) return this; 529 530 if (StressArrayCopyMacroNode && !can_reshape) { 531 phase->record_for_igvn(this); 532 return nullptr; 533 } 534 535 // See if it's a small array copy and we can inline it as 536 // loads/stores 537 // Here we can only do: 538 // - arraycopy if all arguments were validated before and we don't 539 // need card marking 540 // - clone for which we don't need to do card marking 541 542 if (!is_clonebasic() && !is_arraycopy_validated() && 543 !is_copyofrange_validated() && !is_copyof_validated()) { 544 return nullptr; 545 } 546 547 assert(in(TypeFunc::Control) != nullptr && 548 in(TypeFunc::Memory) != nullptr && 549 in(ArrayCopyNode::Src) != nullptr && 550 in(ArrayCopyNode::Dest) != nullptr && 551 in(ArrayCopyNode::Length) != nullptr && 552 in(ArrayCopyNode::SrcPos) != nullptr && 553 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs"); 554 555 if (in(TypeFunc::Control)->is_top() || 556 in(TypeFunc::Memory)->is_top() || 557 phase->type(in(ArrayCopyNode::Src)) == Type::TOP || 558 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP || 559 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) || 560 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) { 561 return nullptr; 562 } 563 564 int count = get_count(phase); 565 566 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) { 567 return nullptr; 568 } 569 570 Node* mem = try_clone_instance(phase, can_reshape, count); 571 if (mem != nullptr) { 572 return (mem == NodeSentinel) ? nullptr : mem; 573 } 574 575 Node* adr_src = nullptr; 576 Node* base_src = nullptr; 577 Node* adr_dest = nullptr; 578 Node* base_dest = nullptr; 579 BasicType copy_type = T_ILLEGAL; 580 const Type* value_type = nullptr; 581 bool disjoint_bases = false; 582 583 if (!prepare_array_copy(phase, can_reshape, 584 adr_src, base_src, adr_dest, base_dest, 585 copy_type, value_type, disjoint_bases)) { 586 assert(adr_src == nullptr, "no node can be left behind"); 587 assert(adr_dest == nullptr, "no node can be left behind"); 588 return nullptr; 589 } 590 591 Node* src = in(ArrayCopyNode::Src); 592 Node* dest = in(ArrayCopyNode::Dest); 593 const TypePtr* atp_src = get_address_type(phase, _src_type, src); 594 const TypePtr* atp_dest = get_address_type(phase, _dest_type, dest); 595 Node* in_mem = in(TypeFunc::Memory); 596 597 if (can_reshape) { 598 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); 599 phase->is_IterGVN()->set_delay_transform(true); 600 } 601 602 Node* backward_ctl = phase->C->top(); 603 Node* forward_ctl = phase->C->top(); 604 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl); 605 606 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl, 607 in_mem, 608 atp_src, atp_dest, 609 adr_src, base_src, adr_dest, base_dest, 610 copy_type, value_type, count); 611 612 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl, 613 in_mem, 614 atp_src, atp_dest, 615 adr_src, base_src, adr_dest, base_dest, 616 copy_type, value_type, count); 617 618 Node* ctl = nullptr; 619 if (!forward_ctl->is_top() && !backward_ctl->is_top()) { 620 ctl = new RegionNode(3); 621 ctl->init_req(1, forward_ctl); 622 ctl->init_req(2, backward_ctl); 623 ctl = phase->transform(ctl); 624 MergeMemNode* forward_mm = forward_mem->as_MergeMem(); 625 MergeMemNode* backward_mm = backward_mem->as_MergeMem(); 626 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) { 627 if (mms.memory() != mms.memory2()) { 628 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx())); 629 phi->init_req(1, mms.memory()); 630 phi->init_req(2, mms.memory2()); 631 phi = phase->transform(phi); 632 mms.set_memory(phi); 633 } 634 } 635 mem = forward_mem; 636 } else if (!forward_ctl->is_top()) { 637 ctl = forward_ctl; 638 mem = forward_mem; 639 } else { 640 assert(!backward_ctl->is_top(), "no copy?"); 641 ctl = backward_ctl; 642 mem = backward_mem; 643 } 644 645 if (can_reshape) { 646 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms"); 647 phase->is_IterGVN()->set_delay_transform(false); 648 } 649 650 if (!finish_transform(phase, can_reshape, ctl, mem)) { 651 if (can_reshape) { 652 // put in worklist, so that if it happens to be dead it is removed 653 phase->is_IterGVN()->_worklist.push(mem); 654 } 655 return nullptr; 656 } 657 658 return mem; 659 } 660 661 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 662 Node* dest = in(ArrayCopyNode::Dest); 663 if (dest->is_top()) { 664 return false; 665 } 666 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr(); 667 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded"); 668 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() || 669 _src_type->is_known_instance(), "result of EA not recorded"); 670 671 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) { 672 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); 673 return t_oop->instance_id() == _dest_type->instance_id(); 674 } 675 676 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase); 677 } 678 679 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr* t_oop, Node* n, PhaseValues* phase, CallNode*& call) { 680 if (n != nullptr && 681 n->is_Call() && 682 n->as_Call()->may_modify(t_oop, phase) && 683 (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) { 684 call = n->as_Call(); 685 return true; 686 } 687 return false; 688 } 689 690 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac) { 691 Node* c = mb->in(0); 692 693 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 694 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off 695 c = bs->step_over_gc_barrier(c); 696 697 CallNode* call = nullptr; 698 guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to."); 699 if (c->is_Region()) { 700 for (uint i = 1; i < c->req(); i++) { 701 if (c->in(i) != nullptr) { 702 Node* n = c->in(i)->in(0); 703 if (may_modify_helper(t_oop, n, phase, call)) { 704 ac = call->isa_ArrayCopy(); 705 assert(c == mb->in(0), "only for clone"); 706 return true; 707 } 708 } 709 } 710 } else if (may_modify_helper(t_oop, c->in(0), phase, call)) { 711 ac = call->isa_ArrayCopy(); 712 #ifdef ASSERT 713 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && 714 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks(); 715 assert(c == mb->in(0) || (ac != nullptr && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); 716 #endif 717 return true; 718 } else if (mb->trailing_partial_array_copy()) { 719 return true; 720 } 721 722 return false; 723 } 724 725 // Does this array copy modify offsets between offset_lo and offset_hi 726 // in the destination array 727 // if must_modify is false, return true if the copy could write 728 // between offset_lo and offset_hi 729 // if must_modify is true, return true if the copy is guaranteed to 730 // write between offset_lo and offset_hi 731 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const { 732 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies"); 733 734 Node* dest = in(Dest); 735 Node* dest_pos = in(DestPos); 736 Node* len = in(Length); 737 738 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int(); 739 const TypeInt *len_t = phase->type(len)->isa_int(); 740 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr(); 741 742 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) { 743 return !must_modify; 744 } 745 746 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type(); 747 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT; 748 749 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem); 750 uint elemsize = type2aelembytes(ary_elem); 751 752 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header; 753 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header; 754 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header; 755 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header; 756 757 if (must_modify) { 758 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) { 759 return true; 760 } 761 } else { 762 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) { 763 return true; 764 } 765 } 766 return false; 767 } 768 769 // As an optimization, choose optimum vector size for copy length known at compile time. 770 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) { 771 int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type); 772 if (const_len > 0) { 773 int size_in_bytes = const_len * type2aelembytes(type); 774 if (size_in_bytes <= 16) 775 lane_count = 16/type2aelembytes(type); 776 else if (size_in_bytes > 16 && size_in_bytes <= 32) 777 lane_count = 32/type2aelembytes(type); 778 } 779 return lane_count; 780 }