1 /* 2 * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciFlatArrayKlass.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "gc/shared/c2/cardTableBarrierSetC2.hpp" 30 #include "gc/shared/gc_globals.hpp" 31 #include "opto/arraycopynode.hpp" 32 #include "opto/graphKit.hpp" 33 #include "opto/inlinetypenode.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "utilities/macros.hpp" 36 #include "utilities/powerOfTwo.hpp" 37 38 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) 39 : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM), 40 _kind(None), 41 _alloc_tightly_coupled(alloc_tightly_coupled), 42 _has_negative_length_guard(has_negative_length_guard), 43 _arguments_validated(false), 44 _src_type(TypeOopPtr::BOTTOM), 45 _dest_type(TypeOopPtr::BOTTOM) { 46 init_class_id(Class_ArrayCopy); 47 init_flags(Flag_is_macro); 48 C->add_macro_node(this); 49 } 50 51 uint ArrayCopyNode::size_of() const { return sizeof(*this); } 52 53 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw, 54 Node* src, Node* src_offset, 55 Node* dest, Node* dest_offset, 56 Node* length, 57 bool alloc_tightly_coupled, 58 bool has_negative_length_guard, 59 Node* src_klass, Node* dest_klass, 60 Node* src_length, Node* dest_length) { 61 62 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard); 63 kit->set_predefined_input_for_runtime_call(ac); 64 65 ac->init_req(ArrayCopyNode::Src, src); 66 ac->init_req(ArrayCopyNode::SrcPos, src_offset); 67 ac->init_req(ArrayCopyNode::Dest, dest); 68 ac->init_req(ArrayCopyNode::DestPos, dest_offset); 69 ac->init_req(ArrayCopyNode::Length, length); 70 ac->init_req(ArrayCopyNode::SrcLen, src_length); 71 ac->init_req(ArrayCopyNode::DestLen, dest_length); 72 ac->init_req(ArrayCopyNode::SrcKlass, src_klass); 73 ac->init_req(ArrayCopyNode::DestKlass, dest_klass); 74 75 if (may_throw) { 76 ac->set_req(TypeFunc::I_O , kit->i_o()); 77 kit->add_safepoint_edges(ac, false); 78 } 79 80 return ac; 81 } 82 83 void ArrayCopyNode::connect_outputs(GraphKit* kit, bool deoptimize_on_exception) { 84 kit->set_all_memory_call(this, true); 85 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control))); 86 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O))); 87 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true, deoptimize_on_exception); 88 kit->set_all_memory_call(this); 89 } 90 91 #ifndef PRODUCT 92 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"}; 93 94 void ArrayCopyNode::dump_spec(outputStream *st) const { 95 CallNode::dump_spec(st); 96 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : ""); 97 } 98 99 void ArrayCopyNode::dump_compact_spec(outputStream* st) const { 100 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : ""); 101 } 102 #endif 103 104 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const { 105 // check that length is constant 106 Node* length = in(ArrayCopyNode::Length); 107 const Type* length_type = phase->type(length); 108 109 if (length_type == Type::TOP) { 110 return -1; 111 } 112 113 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type"); 114 115 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1); 116 } 117 118 int ArrayCopyNode::get_count(PhaseGVN *phase) const { 119 if (is_clonebasic()) { 120 Node* src = in(ArrayCopyNode::Src); 121 const Type* src_type = phase->type(src); 122 123 if (src_type == Type::TOP) { 124 return -1; 125 } 126 127 if (src_type->isa_instptr()) { 128 const TypeInstPtr* inst_src = src_type->is_instptr(); 129 ciInstanceKlass* ik = inst_src->instance_klass(); 130 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected 131 // fields into account. They are rare anyway so easier to simply 132 // skip instances with injected fields. 133 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) { 134 return -1; 135 } 136 int nb_fields = ik->nof_nonstatic_fields(); 137 return nb_fields; 138 } else { 139 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 140 assert (ary_src != nullptr, "not an array or instance?"); 141 // clone passes a length as a rounded number of longs. If we're 142 // cloning an array we'll do it element by element. If the 143 // length of the input array is constant, ArrayCopyNode::Length 144 // must be too. Note that the opposite does not need to hold, 145 // because different input array lengths (e.g. int arrays with 146 // 3 or 4 elements) might lead to the same length input 147 // (e.g. 2 double-words). 148 assert(!ary_src->size()->is_con() || (get_length_if_constant(phase) >= 0) || 149 (UseFlatArray && ary_src->elem()->make_oopptr() != nullptr && ary_src->elem()->make_oopptr()->can_be_inline_type()) || 150 phase->is_IterGVN() || phase->C->inlining_incrementally() || StressReflectiveCode, "inconsistent"); 151 if (ary_src->size()->is_con()) { 152 return ary_src->size()->get_con(); 153 } 154 return -1; 155 } 156 } 157 158 return get_length_if_constant(phase); 159 } 160 161 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) { 162 // Pin the load: if this is an array load, it's going to be dependent on a condition that's not a range check for that 163 // access. If that condition is replaced by an identical dominating one, then an unpinned load would risk floating 164 // above runtime checks that guarantee it is within bounds. 165 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY | C2_UNKNOWN_CONTROL_LOAD; 166 C2AccessValuePtr addr(adr, adr_type); 167 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr); 168 Node* res = bs->load_at(access, type); 169 ctl = access.ctl(); 170 return res; 171 } 172 173 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) { 174 DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY; 175 if (is_alloc_tightly_coupled()) { 176 decorators |= C2_TIGHTLY_COUPLED_ALLOC; 177 } 178 C2AccessValuePtr addr(adr, adr_type); 179 C2AccessValue value(val, type); 180 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr); 181 bs->store_at(access, value); 182 ctl = access.ctl(); 183 } 184 185 186 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) { 187 if (!is_clonebasic()) { 188 return nullptr; 189 } 190 191 Node* base_src = in(ArrayCopyNode::Src); 192 Node* base_dest = in(ArrayCopyNode::Dest); 193 Node* ctl = in(TypeFunc::Control); 194 Node* in_mem = in(TypeFunc::Memory); 195 196 const Type* src_type = phase->type(base_src); 197 const TypeInstPtr* inst_src = src_type->isa_instptr(); 198 if (inst_src == nullptr) { 199 return nullptr; 200 } 201 202 MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); 203 phase->record_for_igvn(mem); 204 if (can_reshape) { 205 phase->is_IterGVN()->_worklist.push(mem); 206 } 207 208 209 ciInstanceKlass* ik = inst_src->instance_klass(); 210 211 if (!inst_src->klass_is_exact()) { 212 assert(!ik->is_interface(), "inconsistent klass hierarchy"); 213 if (ik->has_subklass()) { 214 // Concurrent class loading. 215 // Fail fast and return NodeSentinel to indicate that the transform failed. 216 return NodeSentinel; 217 } else { 218 phase->C->dependencies()->assert_leaf_type(ik); 219 } 220 } 221 222 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields"); 223 224 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 225 for (int i = 0; i < count; i++) { 226 ciField* field = ik->nonstatic_field_at(i); 227 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type(); 228 Node* off = phase->MakeConX(field->offset_in_bytes()); 229 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off)); 230 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off)); 231 BasicType bt = field->layout_type(); 232 233 const Type *type; 234 if (bt == T_OBJECT) { 235 if (!field->type()->is_loaded()) { 236 type = TypeInstPtr::BOTTOM; 237 } else { 238 ciType* field_klass = field->type(); 239 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 240 } 241 } else { 242 type = Type::get_const_basic_type(bt); 243 } 244 245 Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt); 246 store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt); 247 } 248 249 if (!finish_transform(phase, can_reshape, ctl, mem)) { 250 // Return NodeSentinel to indicate that the transform failed 251 return NodeSentinel; 252 } 253 254 return mem; 255 } 256 257 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, 258 Node*& adr_src, 259 Node*& base_src, 260 Node*& adr_dest, 261 Node*& base_dest, 262 BasicType& copy_type, 263 const Type*& value_type, 264 bool& disjoint_bases) { 265 base_src = in(ArrayCopyNode::Src); 266 base_dest = in(ArrayCopyNode::Dest); 267 const Type* src_type = phase->type(base_src); 268 const TypeAryPtr* ary_src = src_type->isa_aryptr(); 269 270 Node* src_offset = in(ArrayCopyNode::SrcPos); 271 Node* dest_offset = in(ArrayCopyNode::DestPos); 272 273 if (is_arraycopy() || is_copyofrange() || is_copyof()) { 274 const Type* dest_type = phase->type(base_dest); 275 const TypeAryPtr* ary_dest = dest_type->isa_aryptr(); 276 277 // newly allocated object is guaranteed to not overlap with source object 278 disjoint_bases = is_alloc_tightly_coupled(); 279 if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM || 280 ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) { 281 // We don't know if arguments are arrays 282 return false; 283 } 284 285 BasicType src_elem = ary_src->elem()->array_element_basic_type(); 286 BasicType dest_elem = ary_dest->elem()->array_element_basic_type(); 287 if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; 288 if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT; 289 290 if (src_elem != dest_elem || ary_src->is_flat() != ary_dest->is_flat() || dest_elem == T_VOID) { 291 // We don't know if arguments are arrays of the same type 292 return false; 293 } 294 295 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 296 if ((!ary_dest->is_flat() && bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) || 297 (ary_dest->is_flat() && ary_src->elem()->inline_klass()->contains_oops() && 298 bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), T_OBJECT, false, false, BarrierSetC2::Optimization))) { 299 // It's an object array copy but we can't emit the card marking that is needed 300 return false; 301 } 302 303 value_type = ary_src->elem(); 304 305 uint shift = exact_log2(type2aelembytes(dest_elem)); 306 if (ary_dest->is_flat()) { 307 shift = ary_src->flat_log_elem_size(); 308 } 309 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem); 310 311 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size()); 312 if (src_offset->is_top()) { 313 // Offset is out of bounds (the ArrayCopyNode will be removed) 314 return false; 315 } 316 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size()); 317 if (dest_offset->is_top()) { 318 // Offset is out of bounds (the ArrayCopyNode will be removed) 319 if (can_reshape) { 320 // record src_offset, so it can be deleted later (if it is dead) 321 phase->is_IterGVN()->_worklist.push(src_offset); 322 } 323 return false; 324 } 325 326 Node* hook = new Node(1); 327 hook->init_req(0, dest_offset); 328 329 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift))); 330 331 hook->destruct(phase); 332 333 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift))); 334 335 adr_src = phase->transform(new AddPNode(base_src, base_src, src_scale)); 336 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_scale)); 337 338 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header))); 339 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header))); 340 341 copy_type = dest_elem; 342 } else { 343 assert(ary_src != nullptr, "should be a clone"); 344 assert(is_clonebasic(), "should be"); 345 346 disjoint_bases = true; 347 348 if (ary_src->elem()->make_oopptr() != nullptr && 349 ary_src->elem()->make_oopptr()->can_be_inline_type()) { 350 return false; 351 } 352 353 BasicType elem = ary_src->isa_aryptr()->elem()->array_element_basic_type(); 354 if (is_reference_type(elem, true)) { 355 elem = T_OBJECT; 356 } 357 358 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 359 if ((!ary_src->is_flat() && bs->array_copy_requires_gc_barriers(true, elem, true, is_clone_inst(), BarrierSetC2::Optimization)) || 360 (ary_src->is_flat() && ary_src->elem()->inline_klass()->contains_oops() && 361 bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization))) { 362 // It's an object array copy but we can't emit the card marking that is needed 363 return false; 364 } 365 366 adr_src = phase->transform(new AddPNode(base_src, base_src, src_offset)); 367 adr_dest = phase->transform(new AddPNode(base_dest, base_dest, dest_offset)); 368 369 // The address is offsetted to an aligned address where a raw copy would start. 370 // If the clone copy is decomposed into load-stores - the address is adjusted to 371 // point at where the array starts. 372 const Type* toff = phase->type(src_offset); 373 int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con(); 374 int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset; 375 assert(diff >= 0, "clone should not start after 1st array element"); 376 if (diff > 0) { 377 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff))); 378 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff))); 379 } 380 copy_type = elem; 381 value_type = ary_src->elem(); 382 } 383 return true; 384 } 385 386 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) { 387 if (atp == TypeOopPtr::BOTTOM) { 388 atp = phase->type(n)->isa_ptr(); 389 } 390 // adjust atp to be the correct array element address type 391 return atp->add_offset(Type::OffsetBot)->is_aryptr(); 392 } 393 394 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) { 395 Node* ctl = kit.control(); 396 if (!disjoint_bases && count > 1) { 397 PhaseGVN& gvn = kit.gvn(); 398 Node* src_offset = in(ArrayCopyNode::SrcPos); 399 Node* dest_offset = in(ArrayCopyNode::DestPos); 400 assert(src_offset != nullptr && dest_offset != nullptr, "should be"); 401 Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset)); 402 Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt)); 403 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); 404 405 gvn.transform(iff); 406 407 kit.set_control(gvn.transform(new IfFalseNode(iff))); 408 backward_ctl = gvn.transform(new IfTrueNode(iff)); 409 } 410 } 411 412 void ArrayCopyNode::copy(GraphKit& kit, 413 const TypeAryPtr* atp_src, 414 const TypeAryPtr* atp_dest, 415 int i, 416 Node* base_src, 417 Node* base_dest, 418 Node* adr_src, 419 Node* adr_dest, 420 BasicType copy_type, 421 const Type* value_type) { 422 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 423 Node* ctl = kit.control(); 424 if (atp_dest->is_flat()) { 425 ciInlineKlass* vk = atp_src->elem()->inline_klass(); 426 for (int j = 0; j < vk->nof_nonstatic_fields(); j++) { 427 ciField* field = vk->nonstatic_field_at(j); 428 int off_in_vt = field->offset_in_bytes() - vk->first_field_offset(); 429 Node* off = kit.MakeConX(off_in_vt + i * atp_src->flat_elem_size()); 430 ciType* ft = field->type(); 431 BasicType bt = type2field[ft->basic_type()]; 432 assert(!field->is_flat(), "flat field encountered"); 433 const Type* rt = Type::get_const_type(ft); 434 const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot); 435 assert(!bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), bt, false, false, BarrierSetC2::Optimization), "GC barriers required"); 436 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); 437 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); 438 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, adr_type, rt, bt); 439 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, adr_type, v, rt, bt); 440 } 441 } else { 442 Node* off = kit.MakeConX(type2aelembytes(copy_type) * i); 443 Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off)); 444 Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off)); 445 Node* v = load(bs, &kit.gvn(), ctl, kit.merged_memory(), next_src, atp_src, value_type, copy_type); 446 store(bs, &kit.gvn(), ctl, kit.merged_memory(), next_dest, atp_dest, v, value_type, copy_type); 447 } 448 kit.set_control(ctl); 449 } 450 451 452 void ArrayCopyNode::array_copy_forward(GraphKit& kit, 453 bool can_reshape, 454 const TypeAryPtr* atp_src, 455 const TypeAryPtr* atp_dest, 456 Node* adr_src, 457 Node* base_src, 458 Node* adr_dest, 459 Node* base_dest, 460 BasicType copy_type, 461 const Type* value_type, 462 int count) { 463 if (!kit.stopped()) { 464 // copy forward 465 if (count > 0) { 466 for (int i = 0; i < count; i++) { 467 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); 468 } 469 } else if (can_reshape) { 470 PhaseGVN& gvn = kit.gvn(); 471 assert(gvn.is_IterGVN(), ""); 472 gvn.record_for_igvn(adr_src); 473 gvn.record_for_igvn(adr_dest); 474 } 475 } 476 } 477 478 void ArrayCopyNode::array_copy_backward(GraphKit& kit, 479 bool can_reshape, 480 const TypeAryPtr* atp_src, 481 const TypeAryPtr* atp_dest, 482 Node* adr_src, 483 Node* base_src, 484 Node* adr_dest, 485 Node* base_dest, 486 BasicType copy_type, 487 const Type* value_type, 488 int count) { 489 if (!kit.stopped()) { 490 // copy backward 491 PhaseGVN& gvn = kit.gvn(); 492 493 if (count > 0) { 494 for (int i = count-1; i >= 0; i--) { 495 copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type); 496 } 497 } else if(can_reshape) { 498 PhaseGVN& gvn = kit.gvn(); 499 assert(gvn.is_IterGVN(), ""); 500 gvn.record_for_igvn(adr_src); 501 gvn.record_for_igvn(adr_dest); 502 } 503 } 504 } 505 506 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, 507 Node* ctl, Node *mem) { 508 if (can_reshape) { 509 PhaseIterGVN* igvn = phase->is_IterGVN(); 510 igvn->set_delay_transform(false); 511 if (is_clonebasic()) { 512 Node* out_mem = proj_out(TypeFunc::Memory); 513 514 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 515 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() || 516 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) { 517 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking"); 518 return false; 519 } 520 521 igvn->replace_node(out_mem->raw_out(0), mem); 522 523 Node* out_ctl = proj_out(TypeFunc::Control); 524 igvn->replace_node(out_ctl, ctl); 525 } else { 526 // replace fallthrough projections of the ArrayCopyNode by the 527 // new memory, control and the input IO. 528 CallProjections* callprojs = extract_projections(true, false); 529 530 if (callprojs->fallthrough_ioproj != nullptr) { 531 igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O)); 532 } 533 if (callprojs->fallthrough_memproj != nullptr) { 534 igvn->replace_node(callprojs->fallthrough_memproj, mem); 535 } 536 if (callprojs->fallthrough_catchproj != nullptr) { 537 igvn->replace_node(callprojs->fallthrough_catchproj, ctl); 538 } 539 540 // The ArrayCopyNode is not disconnected. It still has the 541 // projections for the exception case. Replace current 542 // ArrayCopyNode with a dummy new one with a top() control so 543 // that this part of the graph stays consistent but is 544 // eventually removed. 545 546 set_req(0, phase->C->top()); 547 remove_dead_region(phase, can_reshape); 548 } 549 } else { 550 if (in(TypeFunc::Control) != ctl) { 551 // we can't return new memory and control from Ideal at parse time 552 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?"); 553 phase->record_for_igvn(this); 554 return false; 555 } 556 } 557 return true; 558 } 559 560 561 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { 562 // Perform any generic optimizations first 563 Node* result = SafePointNode::Ideal(phase, can_reshape); 564 if (result != nullptr) { 565 return result; 566 } 567 568 if (StressArrayCopyMacroNode && !can_reshape) { 569 phase->record_for_igvn(this); 570 return nullptr; 571 } 572 573 // See if it's a small array copy and we can inline it as 574 // loads/stores 575 // Here we can only do: 576 // - arraycopy if all arguments were validated before and we don't 577 // need card marking 578 // - clone for which we don't need to do card marking 579 580 if (!is_clonebasic() && !is_arraycopy_validated() && 581 !is_copyofrange_validated() && !is_copyof_validated()) { 582 return nullptr; 583 } 584 585 assert(in(TypeFunc::Control) != nullptr && 586 in(TypeFunc::Memory) != nullptr && 587 in(ArrayCopyNode::Src) != nullptr && 588 in(ArrayCopyNode::Dest) != nullptr && 589 in(ArrayCopyNode::Length) != nullptr && 590 in(ArrayCopyNode::SrcPos) != nullptr && 591 in(ArrayCopyNode::DestPos) != nullptr, "broken inputs"); 592 593 if (in(TypeFunc::Control)->is_top() || 594 in(TypeFunc::Memory)->is_top() || 595 phase->type(in(ArrayCopyNode::Src)) == Type::TOP || 596 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP || 597 (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) || 598 (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) { 599 return nullptr; 600 } 601 602 int count = get_count(phase); 603 604 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) { 605 return nullptr; 606 } 607 608 Node* src = in(ArrayCopyNode::Src); 609 Node* dest = in(ArrayCopyNode::Dest); 610 const Type* src_type = phase->type(src); 611 const Type* dest_type = phase->type(dest); 612 613 if (src_type->isa_aryptr() && dest_type->isa_instptr()) { 614 // clone used for load of unknown inline type can't be optimized at 615 // this point 616 return nullptr; 617 } 618 619 Node* mem = try_clone_instance(phase, can_reshape, count); 620 if (mem != nullptr) { 621 return (mem == NodeSentinel) ? nullptr : mem; 622 } 623 624 Node* adr_src = nullptr; 625 Node* base_src = nullptr; 626 Node* adr_dest = nullptr; 627 Node* base_dest = nullptr; 628 BasicType copy_type = T_ILLEGAL; 629 const Type* value_type = nullptr; 630 bool disjoint_bases = false; 631 632 if (!prepare_array_copy(phase, can_reshape, 633 adr_src, base_src, adr_dest, base_dest, 634 copy_type, value_type, disjoint_bases)) { 635 assert(adr_src == nullptr, "no node can be left behind"); 636 assert(adr_dest == nullptr, "no node can be left behind"); 637 return nullptr; 638 } 639 640 JVMState* new_jvms = nullptr; 641 SafePointNode* new_map = nullptr; 642 if (!is_clonebasic()) { 643 new_jvms = jvms()->clone_shallow(phase->C); 644 new_map = new SafePointNode(req(), new_jvms); 645 for (uint i = TypeFunc::FramePtr; i < req(); i++) { 646 new_map->init_req(i, in(i)); 647 } 648 new_jvms->set_map(new_map); 649 } else { 650 new_jvms = new (phase->C) JVMState(0); 651 new_map = new SafePointNode(TypeFunc::Parms, new_jvms); 652 new_jvms->set_map(new_map); 653 } 654 new_map->set_control(in(TypeFunc::Control)); 655 new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory))); 656 new_map->set_i_o(in(TypeFunc::I_O)); 657 phase->record_for_igvn(new_map); 658 659 const TypeAryPtr* atp_src = get_address_type(phase, _src_type, src); 660 const TypeAryPtr* atp_dest = get_address_type(phase, _dest_type, dest); 661 662 if (can_reshape) { 663 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms"); 664 phase->is_IterGVN()->set_delay_transform(true); 665 } 666 667 GraphKit kit(new_jvms, phase); 668 669 SafePointNode* backward_map = nullptr; 670 SafePointNode* forward_map = nullptr; 671 Node* backward_ctl = phase->C->top(); 672 673 array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl); 674 675 { 676 PreserveJVMState pjvms(&kit); 677 678 array_copy_forward(kit, can_reshape, 679 atp_src, atp_dest, 680 adr_src, base_src, adr_dest, base_dest, 681 copy_type, value_type, count); 682 683 forward_map = kit.stop(); 684 } 685 686 kit.set_control(backward_ctl); 687 array_copy_backward(kit, can_reshape, 688 atp_src, atp_dest, 689 adr_src, base_src, adr_dest, base_dest, 690 copy_type, value_type, count); 691 692 backward_map = kit.stop(); 693 694 if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) { 695 assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?"); 696 Node* ctl = new RegionNode(3); 697 Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM); 698 kit.set_map(forward_map); 699 ctl->init_req(1, kit.control()); 700 mem->init_req(1, kit.reset_memory()); 701 kit.set_map(backward_map); 702 ctl->init_req(2, kit.control()); 703 mem->init_req(2, kit.reset_memory()); 704 kit.set_control(phase->transform(ctl)); 705 kit.set_all_memory(phase->transform(mem)); 706 } else if (!forward_map->control()->is_top()) { 707 kit.set_map(forward_map); 708 } else { 709 assert(!backward_map->control()->is_top(), "no copy?"); 710 kit.set_map(backward_map); 711 } 712 713 if (can_reshape) { 714 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms"); 715 phase->is_IterGVN()->set_delay_transform(false); 716 } 717 718 mem = kit.map()->memory(); 719 if (!finish_transform(phase, can_reshape, kit.control(), mem)) { 720 if (!can_reshape) { 721 phase->record_for_igvn(this); 722 } else { 723 // put in worklist, so that if it happens to be dead it is removed 724 phase->is_IterGVN()->_worklist.push(mem); 725 } 726 return nullptr; 727 } 728 729 return mem; 730 } 731 732 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { 733 Node* dest = in(ArrayCopyNode::Dest); 734 if (dest->is_top()) { 735 return false; 736 } 737 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr(); 738 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded"); 739 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() || 740 _src_type->is_known_instance(), "result of EA not recorded"); 741 742 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) { 743 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); 744 return t_oop->instance_id() == _dest_type->instance_id(); 745 } 746 747 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase); 748 } 749 750 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr* t_oop, Node* n, PhaseValues* phase, CallNode*& call) { 751 if (n != nullptr && 752 n->is_Call() && 753 n->as_Call()->may_modify(t_oop, phase) && 754 (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) { 755 call = n->as_Call(); 756 return true; 757 } 758 return false; 759 } 760 761 bool ArrayCopyNode::may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac) { 762 Node* c = mb->in(0); 763 764 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 765 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off 766 c = bs->step_over_gc_barrier(c); 767 768 CallNode* call = nullptr; 769 guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to."); 770 if (c->is_Region()) { 771 for (uint i = 1; i < c->req(); i++) { 772 if (c->in(i) != nullptr) { 773 Node* n = c->in(i)->in(0); 774 if (may_modify_helper(t_oop, n, phase, call)) { 775 ac = call->isa_ArrayCopy(); 776 assert(c == mb->in(0), "only for clone"); 777 return true; 778 } 779 } 780 } 781 } else if (may_modify_helper(t_oop, c->in(0), phase, call)) { 782 ac = call->isa_ArrayCopy(); 783 #ifdef ASSERT 784 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && 785 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks(); 786 assert(c == mb->in(0) || (ac != nullptr && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); 787 #endif 788 return true; 789 } else if (mb->trailing_partial_array_copy()) { 790 return true; 791 } 792 793 return false; 794 } 795 796 // Does this array copy modify offsets between offset_lo and offset_hi 797 // in the destination array 798 // if must_modify is false, return true if the copy could write 799 // between offset_lo and offset_hi 800 // if must_modify is true, return true if the copy is guaranteed to 801 // write between offset_lo and offset_hi 802 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const { 803 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies"); 804 805 Node* dest = in(Dest); 806 Node* dest_pos = in(DestPos); 807 Node* len = in(Length); 808 809 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int(); 810 const TypeInt *len_t = phase->type(len)->isa_int(); 811 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr(); 812 813 if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) { 814 return !must_modify; 815 } 816 817 BasicType ary_elem = ary_t->isa_aryptr()->elem()->array_element_basic_type(); 818 if (is_reference_type(ary_elem, true)) ary_elem = T_OBJECT; 819 820 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem); 821 uint elemsize = ary_t->is_flat() ? ary_t->flat_elem_size() : type2aelembytes(ary_elem); 822 823 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header; 824 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header; 825 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header; 826 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header; 827 828 if (must_modify) { 829 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) { 830 return true; 831 } 832 } else { 833 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) { 834 return true; 835 } 836 } 837 return false; 838 } 839 840 // As an optimization, choose optimum vector size for copy length known at compile time. 841 int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) { 842 int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type); 843 if (const_len > 0) { 844 int size_in_bytes = const_len * type2aelembytes(type); 845 if (size_in_bytes <= 16) 846 lane_count = 16/type2aelembytes(type); 847 else if (size_in_bytes > 16 && size_in_bytes <= 32) 848 lane_count = 32/type2aelembytes(type); 849 } 850 return lane_count; 851 }