1 /*
   2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/c2/cardTableBarrierSetC2.hpp"
  29 #include "opto/arraycopynode.hpp"
  30 #include "opto/graphKit.hpp"
  31 #include "opto/valuetypenode.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "utilities/macros.hpp"
  34 
  35 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
  36   : CallNode(arraycopy_type(), NULL, TypePtr::BOTTOM),
  37     _kind(None),
  38     _alloc_tightly_coupled(alloc_tightly_coupled),
  39     _has_negative_length_guard(has_negative_length_guard),
  40     _arguments_validated(false),
  41     _src_type(TypeOopPtr::BOTTOM),
  42     _dest_type(TypeOopPtr::BOTTOM) {
  43   init_class_id(Class_ArrayCopy);
  44   init_flags(Flag_is_macro);
  45   C->add_macro_node(this);
  46 }
  47 
  48 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
  49 
  50 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
  51                                    Node* src, Node* src_offset,
  52                                    Node* dest, Node* dest_offset,
  53                                    Node* length,
  54                                    bool alloc_tightly_coupled,
  55                                    bool has_negative_length_guard,
  56                                    Node* src_klass, Node* dest_klass,
  57                                    Node* src_length, Node* dest_length) {
  58 
  59   ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard);
  60   Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
  61 
  62   ac->init_req(ArrayCopyNode::Src, src);
  63   ac->init_req(ArrayCopyNode::SrcPos, src_offset);
  64   ac->init_req(ArrayCopyNode::Dest, dest);
  65   ac->init_req(ArrayCopyNode::DestPos, dest_offset);
  66   ac->init_req(ArrayCopyNode::Length, length);
  67   ac->init_req(ArrayCopyNode::SrcLen, src_length);
  68   ac->init_req(ArrayCopyNode::DestLen, dest_length);
  69   ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
  70   ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
  71 
  72   if (may_throw) {
  73     ac->set_req(TypeFunc::I_O , kit->i_o());
  74     kit->add_safepoint_edges(ac, false);
  75   }
  76 
  77   return ac;
  78 }
  79 
  80 void ArrayCopyNode::connect_outputs(GraphKit* kit) {
  81   kit->set_all_memory_call(this, true);
  82   kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
  83   kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
  84   kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
  85   kit->set_all_memory_call(this);
  86 }
  87 
  88 #ifndef PRODUCT
  89 const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
  90 
  91 void ArrayCopyNode::dump_spec(outputStream *st) const {
  92   CallNode::dump_spec(st);
  93   st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
  94 }
  95 
  96 void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
  97   st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
  98 }
  99 #endif
 100 
 101 intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
 102   // check that length is constant
 103   Node* length = in(ArrayCopyNode::Length);
 104   const Type* length_type = phase->type(length);
 105 
 106   if (length_type == Type::TOP) {
 107     return -1;
 108   }
 109 
 110   assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
 111 
 112   return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
 113 }
 114 
 115 int ArrayCopyNode::get_count(PhaseGVN *phase) const {
 116   if (is_clonebasic()) {
 117     Node* src = in(ArrayCopyNode::Src);
 118     const Type* src_type = phase->type(src);
 119 
 120     if (src_type == Type::TOP) {
 121       return -1;
 122     }
 123 
 124     if (src_type->isa_instptr()) {
 125       const TypeInstPtr* inst_src = src_type->is_instptr();
 126       ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
 127       // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
 128       // fields into account. They are rare anyway so easier to simply
 129       // skip instances with injected fields.
 130       if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
 131         return -1;
 132       }
 133       int nb_fields = ik->nof_nonstatic_fields();
 134       return nb_fields;
 135     } else {
 136       const TypeAryPtr* ary_src = src_type->isa_aryptr();
 137       assert (ary_src != NULL, "not an array or instance?");
 138       // clone passes a length as a rounded number of longs. If we're
 139       // cloning an array we'll do it element by element. If the
 140       // length input to ArrayCopyNode is constant, length of input
 141       // array must be too.
 142 
 143       assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() ||
 144              (ValueArrayFlatten && ary_src->elem()->make_oopptr() != NULL && ary_src->elem()->make_oopptr()->can_be_value_type()) ||
 145              phase->is_IterGVN() || phase->C->inlining_incrementally(), "inconsistent");
 146 
 147       if (ary_src->size()->is_con()) {
 148         return ary_src->size()->get_con();
 149       }
 150       return -1;
 151     }
 152   }
 153 
 154   return get_length_if_constant(phase);
 155 }
 156 
 157 Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
 158   DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY;
 159   C2AccessValuePtr addr(adr, adr_type);
 160   C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
 161   Node* res = bs->load_at(access, type);
 162   ctl = access.ctl();
 163   return res;
 164 }
 165 
 166 void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
 167   DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
 168   if (is_alloc_tightly_coupled()) {
 169     decorators |= C2_TIGHTLY_COUPLED_ALLOC;
 170   }
 171   C2AccessValuePtr addr(adr, adr_type);
 172   C2AccessValue value(val, type);
 173   C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
 174   bs->store_at(access, value);
 175   ctl = access.ctl();
 176 }
 177 
 178 
 179 Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
 180   if (!is_clonebasic()) {
 181     return NULL;
 182   }
 183 
 184   Node* src = in(ArrayCopyNode::Src);
 185   Node* dest = in(ArrayCopyNode::Dest);
 186   Node* ctl = in(TypeFunc::Control);
 187   Node* in_mem = in(TypeFunc::Memory);
 188 
 189   const Type* src_type = phase->type(src);
 190 
 191   assert(src->is_AddP(), "should be base + off");
 192   assert(dest->is_AddP(), "should be base + off");
 193   Node* base_src = src->in(AddPNode::Base);
 194   Node* base_dest = dest->in(AddPNode::Base);
 195 
 196   MergeMemNode* mem = MergeMemNode::make(in_mem);
 197 
 198   const TypeInstPtr* inst_src = src_type->isa_instptr();
 199 
 200   if (inst_src == NULL) {
 201     return NULL;
 202   }
 203 
 204   if (!inst_src->klass_is_exact()) {
 205     ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
 206     assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
 207     phase->C->dependencies()->assert_leaf_type(ik);
 208   }
 209 
 210   ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
 211   assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
 212 
 213   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 214   for (int i = 0; i < count; i++) {
 215     ciField* field = ik->nonstatic_field_at(i);
 216     int fieldidx = phase->C->alias_type(field)->index();
 217     const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
 218     Node* off = phase->MakeConX(field->offset());
 219     Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
 220     Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
 221     BasicType bt = field->layout_type();
 222 
 223     const Type *type;
 224     if (bt == T_OBJECT) {
 225       if (!field->type()->is_loaded()) {
 226         type = TypeInstPtr::BOTTOM;
 227       } else {
 228         ciType* field_klass = field->type();
 229         type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 230       }
 231     } else {
 232       type = Type::get_const_basic_type(bt);
 233     }
 234 
 235     Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
 236     store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
 237   }
 238 
 239   if (!finish_transform(phase, can_reshape, ctl, mem)) {
 240     // Return NodeSentinel to indicate that the transform failed
 241     return NodeSentinel;
 242   }
 243 
 244   return mem;
 245 }
 246 
 247 bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
 248                                        Node*& adr_src,
 249                                        Node*& base_src,
 250                                        Node*& adr_dest,
 251                                        Node*& base_dest,
 252                                        BasicType& copy_type,
 253                                        const Type*& value_type,
 254                                        bool& disjoint_bases) {
 255   Node* src = in(ArrayCopyNode::Src);
 256   Node* dest = in(ArrayCopyNode::Dest);
 257   const Type* src_type = phase->type(src);
 258   const TypeAryPtr* ary_src = src_type->isa_aryptr();
 259 
 260   if (is_arraycopy() || is_copyofrange() || is_copyof()) {
 261     const Type* dest_type = phase->type(dest);
 262     const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
 263     Node* src_offset = in(ArrayCopyNode::SrcPos);
 264     Node* dest_offset = in(ArrayCopyNode::DestPos);
 265 
 266     // newly allocated object is guaranteed to not overlap with source object
 267     disjoint_bases = is_alloc_tightly_coupled();
 268 
 269     if (ary_src  == NULL || ary_src->klass()  == NULL ||
 270         ary_dest == NULL || ary_dest->klass() == NULL) {
 271       // We don't know if arguments are arrays
 272       return false;
 273     }
 274 
 275     BasicType src_elem  = ary_src->klass()->as_array_klass()->element_type()->basic_type();
 276     BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type();
 277     if (src_elem  == T_ARRAY ||
 278         (src_elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) {
 279       src_elem  = T_OBJECT;
 280     }
 281     if (dest_elem == T_ARRAY ||
 282         (dest_elem == T_VALUETYPE && ary_dest->klass()->is_obj_array_klass())) {
 283       dest_elem = T_OBJECT;
 284     }
 285 
 286     if (src_elem != dest_elem || dest_elem == T_VOID) {
 287       // We don't know if arguments are arrays of the same type
 288       return false;
 289     }
 290 
 291     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 292     if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, BarrierSetC2::Optimization)) {
 293       // It's an object array copy but we can't emit the card marking
 294       // that is needed
 295       return false;
 296     }
 297 
 298     value_type = ary_src->elem();
 299 
 300     base_src = src;
 301     base_dest = dest;
 302 
 303     uint shift  = exact_log2(type2aelembytes(dest_elem));
 304     if (dest_elem == T_VALUETYPE) {
 305       ciValueArrayKlass* vak = ary_src->klass()->as_value_array_klass();
 306       shift = vak->log2_element_size();
 307     }
 308     uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
 309 
 310     adr_src = src;
 311     adr_dest = dest;
 312 
 313     src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
 314     dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
 315     if (src_offset->is_top() || dest_offset->is_top()) {
 316       // Offset is out of bounds (the ArrayCopyNode will be removed)
 317       return false;
 318     }
 319 
 320     Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
 321     Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
 322 
 323     adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(header)));
 324     adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(header)));
 325 
 326     adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale));
 327     adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale));
 328 
 329     copy_type = dest_elem;
 330   } else {
 331     assert(ary_src != NULL, "should be a clone");
 332     assert(is_clonebasic(), "should be");
 333 
 334     disjoint_bases = true;
 335     assert(src->is_AddP(), "should be base + off");
 336     assert(dest->is_AddP(), "should be base + off");
 337     adr_src = src;
 338     base_src = src->in(AddPNode::Base);
 339     adr_dest = dest;
 340     base_dest = dest->in(AddPNode::Base);
 341 
 342     assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?");
 343 
 344     if (ary_src->elem()->make_oopptr() != NULL &&
 345         ary_src->elem()->make_oopptr()->can_be_value_type()) {
 346       return false;
 347     }
 348 
 349     BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
 350     if (elem == T_ARRAY ||
 351         (elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass())) {
 352       elem = T_OBJECT;
 353     }
 354 
 355     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 356     if (bs->array_copy_requires_gc_barriers(true, elem, true, BarrierSetC2::Optimization)) {
 357       return false;
 358     }
 359 
 360     int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con();
 361     assert(diff >= 0, "clone should not start after 1st array element");
 362     if (diff > 0) {
 363       adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
 364       adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
 365     }
 366 
 367     copy_type = elem;
 368     value_type = ary_src->elem();
 369   }
 370   return true;
 371 }
 372 
 373 const TypeAryPtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) {
 374   const Type* at = phase->type(n);
 375   assert(at != Type::TOP, "unexpected type");
 376   const TypeAryPtr* atp = at->is_aryptr();
 377   // adjust atp to be the correct array element address type
 378   atp = atp->add_offset(Type::OffsetBot)->is_aryptr();
 379   return atp;
 380 }
 381 
 382 void ArrayCopyNode::array_copy_test_overlap(GraphKit& kit, bool disjoint_bases, int count, Node*& backward_ctl) {
 383   Node* ctl = kit.control();
 384   if (!disjoint_bases && count > 1) {
 385     PhaseGVN& gvn = kit.gvn();
 386     Node* src_offset = in(ArrayCopyNode::SrcPos);
 387     Node* dest_offset = in(ArrayCopyNode::DestPos);
 388     assert(src_offset != NULL && dest_offset != NULL, "should be");
 389     Node* cmp = gvn.transform(new CmpINode(src_offset, dest_offset));
 390     Node *bol = gvn.transform(new BoolNode(cmp, BoolTest::lt));
 391     IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
 392 
 393     gvn.transform(iff);
 394 
 395     kit.set_control(gvn.transform(new IfFalseNode(iff)));
 396     backward_ctl = gvn.transform(new IfTrueNode(iff));
 397   }
 398 }
 399 
 400 void ArrayCopyNode::copy(GraphKit& kit,
 401                          const TypeAryPtr* atp_src,
 402                          const TypeAryPtr* atp_dest,
 403                          int i,
 404                          Node* base_src,
 405                          Node* base_dest,
 406                          Node* adr_src,
 407                          Node* adr_dest,
 408                          BasicType copy_type,
 409                          const Type* value_type) {
 410   if (copy_type == T_VALUETYPE) {
 411     ciValueArrayKlass* vak = atp_src->klass()->as_value_array_klass();
 412     ciValueKlass* vk = vak->element_klass()->as_value_klass();
 413     for (int j = 0; j < vk->nof_nonstatic_fields(); j++) {
 414       ciField* field = vk->nonstatic_field_at(j);
 415       int off_in_vt = field->offset() - vk->first_field_offset();
 416       Node* off  = kit.MakeConX(off_in_vt + i * vak->element_byte_size());
 417       ciType* ft = field->type();
 418       BasicType bt = type2field[ft->basic_type()];
 419       assert(!field->is_flattened(), "flattened field encountered");
 420       if (bt == T_VALUETYPE) {
 421         bt = T_OBJECT;
 422       }
 423       const Type* rt = Type::get_const_type(ft);
 424       const TypePtr* adr_type = atp_src->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
 425       Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
 426       Node* v = kit.make_load(kit.control(), next_src, rt, bt, adr_type, MemNode::unordered);
 427 
 428       Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
 429       if (is_java_primitive(bt)) {
 430         kit.store_to_memory(kit.control(), next_dest, v, bt, adr_type, MemNode::unordered);
 431       } else {
 432         const TypeOopPtr* val_type = Type::get_const_type(ft)->is_oopptr();
 433         kit.access_store_at(base_dest, next_dest, adr_type, v,
 434                             val_type, bt, StoreNode::release_if_reference(T_OBJECT));
 435       }
 436     }
 437   } else {
 438     Node* off  = kit.MakeConX(type2aelembytes(copy_type) * i);
 439     Node* next_src = kit.gvn().transform(new AddPNode(base_src, adr_src, off));
 440     Node* v = kit.make_load(kit.control(), next_src, value_type, copy_type, atp_src, MemNode::unordered);
 441     Node* next_dest = kit.gvn().transform(new AddPNode(base_dest, adr_dest, off));
 442     BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 443     if (copy_type == T_OBJECT && (bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization))) {
 444       kit.access_store_at(base_dest, next_dest, atp_dest, v,
 445                           value_type->make_ptr()->is_oopptr(), copy_type,
 446                           StoreNode::release_if_reference(T_OBJECT));
 447     } else {
 448       kit.store_to_memory(kit.control(), next_dest, v, copy_type, atp_dest, MemNode::unordered);
 449     }
 450   }
 451 }
 452 
 453 
 454 void ArrayCopyNode::array_copy_forward(GraphKit& kit,
 455                                        bool can_reshape,
 456                                        const TypeAryPtr* atp_src,
 457                                        const TypeAryPtr* atp_dest,
 458                                        Node* adr_src,
 459                                        Node* base_src,
 460                                        Node* adr_dest,
 461                                        Node* base_dest,
 462                                        BasicType copy_type,
 463                                        const Type* value_type,
 464                                        int count) {
 465   if (!kit.stopped()) {
 466     // copy forward
 467     if (count > 0) {
 468       for (int i = 0; i < count; i++) {
 469         copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
 470       }
 471     } else if(can_reshape) {
 472       PhaseGVN& gvn = kit.gvn();
 473       assert(gvn.is_IterGVN(), "");
 474       gvn.record_for_igvn(adr_src);
 475       gvn.record_for_igvn(adr_dest);
 476     }
 477   }
 478 }
 479 
 480 void ArrayCopyNode::array_copy_backward(GraphKit& kit,
 481                                         bool can_reshape,
 482                                         const TypeAryPtr* atp_src,
 483                                         const TypeAryPtr* atp_dest,
 484                                         Node* adr_src,
 485                                         Node* base_src,
 486                                         Node* adr_dest,
 487                                         Node* base_dest,
 488                                         BasicType copy_type,
 489                                         const Type* value_type,
 490                                         int count) {
 491   if (!kit.stopped()) {
 492     // copy backward
 493     PhaseGVN& gvn = kit.gvn();
 494 
 495     if (count > 0) {
 496       for (int i = count-1; i >= 0; i--) {
 497         copy(kit, atp_src, atp_dest, i, base_src, base_dest, adr_src, adr_dest, copy_type, value_type);
 498       }
 499     } else if(can_reshape) {
 500       PhaseGVN& gvn = kit.gvn();
 501       assert(gvn.is_IterGVN(), "");
 502       gvn.record_for_igvn(adr_src);
 503       gvn.record_for_igvn(adr_dest);
 504     }
 505   }
 506 }
 507 
 508 bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
 509                                      Node* ctl, Node *mem) {
 510   if (can_reshape) {
 511     PhaseIterGVN* igvn = phase->is_IterGVN();
 512     igvn->set_delay_transform(false);
 513     if (is_clonebasic()) {
 514       Node* out_mem = proj_out(TypeFunc::Memory);
 515 
 516       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 517       if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
 518           out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
 519         assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Optimization), "can only happen with card marking");
 520         return false;
 521       }
 522 
 523       igvn->replace_node(out_mem->raw_out(0), mem);
 524 
 525       Node* out_ctl = proj_out(TypeFunc::Control);
 526       igvn->replace_node(out_ctl, ctl);
 527     } else {
 528       // replace fallthrough projections of the ArrayCopyNode by the
 529       // new memory, control and the input IO.
 530       CallProjections* callprojs = extract_projections(true, false);
 531 
 532       if (callprojs->fallthrough_ioproj != NULL) {
 533         igvn->replace_node(callprojs->fallthrough_ioproj, in(TypeFunc::I_O));
 534       }
 535       if (callprojs->fallthrough_memproj != NULL) {
 536         igvn->replace_node(callprojs->fallthrough_memproj, mem);
 537       }
 538       if (callprojs->fallthrough_catchproj != NULL) {
 539         igvn->replace_node(callprojs->fallthrough_catchproj, ctl);
 540       }
 541 
 542       // The ArrayCopyNode is not disconnected. It still has the
 543       // projections for the exception case. Replace current
 544       // ArrayCopyNode with a dummy new one with a top() control so
 545       // that this part of the graph stays consistent but is
 546       // eventually removed.
 547 
 548       set_req(0, phase->C->top());
 549       remove_dead_region(phase, can_reshape);
 550     }
 551   } else {
 552     if (in(TypeFunc::Control) != ctl) {
 553       // we can't return new memory and control from Ideal at parse time
 554 #ifdef ASSERT
 555       Node* src = in(ArrayCopyNode::Src);
 556       const Type* src_type = phase->type(src);
 557       const TypeAryPtr* ary_src = src_type->isa_aryptr();
 558       BasicType elem = ary_src != NULL ? ary_src->klass()->as_array_klass()->element_type()->basic_type() : T_CONFLICT;
 559       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 560       assert(!is_clonebasic() || bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Optimization) ||
 561              (ary_src != NULL && elem == T_VALUETYPE && ary_src->klass()->is_obj_array_klass()), "added control for clone?");
 562 #endif
 563       assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
 564       phase->record_for_igvn(this);
 565       return false;
 566     }
 567   }
 568   return true;
 569 }
 570 
 571 
 572 Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
 573   // Perform any generic optimizations first
 574   Node* result = SafePointNode::Ideal(phase, can_reshape);
 575   if (result != NULL) {
 576     return result;
 577   }
 578 
 579   if (StressArrayCopyMacroNode && !can_reshape) {
 580     phase->record_for_igvn(this);
 581     return NULL;
 582   }
 583 
 584   // See if it's a small array copy and we can inline it as
 585   // loads/stores
 586   // Here we can only do:
 587   // - arraycopy if all arguments were validated before and we don't
 588   // need card marking
 589   // - clone for which we don't need to do card marking
 590 
 591   if (!is_clonebasic() && !is_arraycopy_validated() &&
 592       !is_copyofrange_validated() && !is_copyof_validated()) {
 593     return NULL;
 594   }
 595 
 596   assert(in(TypeFunc::Control) != NULL &&
 597          in(TypeFunc::Memory) != NULL &&
 598          in(ArrayCopyNode::Src) != NULL &&
 599          in(ArrayCopyNode::Dest) != NULL &&
 600          in(ArrayCopyNode::Length) != NULL &&
 601          ((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) ||
 602           is_clonebasic()), "broken inputs");
 603 
 604   if (in(TypeFunc::Control)->is_top() ||
 605       in(TypeFunc::Memory)->is_top() ||
 606       phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
 607       phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
 608       (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
 609       (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
 610     return NULL;
 611   }
 612 
 613   int count = get_count(phase);
 614 
 615   if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
 616     return NULL;
 617   }
 618 
 619   Node* src = in(ArrayCopyNode::Src);
 620   Node* dest = in(ArrayCopyNode::Dest);
 621   const Type* src_type = phase->type(src);
 622   const Type* dest_type = phase->type(dest);
 623 
 624   if (src_type->isa_aryptr() && dest_type->isa_instptr()) {
 625     // clone used for load of unknown value type can't be optimized at
 626     // this point
 627     return NULL;
 628   }
 629 
 630   Node* mem = try_clone_instance(phase, can_reshape, count);
 631   if (mem != NULL) {
 632     return (mem == NodeSentinel) ? NULL : mem;
 633   }
 634 
 635   Node* adr_src = NULL;
 636   Node* base_src = NULL;
 637   Node* adr_dest = NULL;
 638   Node* base_dest = NULL;
 639   BasicType copy_type = T_ILLEGAL;
 640   const Type* value_type = NULL;
 641   bool disjoint_bases = false;
 642 
 643   if (!prepare_array_copy(phase, can_reshape,
 644                           adr_src, base_src, adr_dest, base_dest,
 645                           copy_type, value_type, disjoint_bases)) {
 646     return NULL;
 647   }
 648 
 649   JVMState* new_jvms = NULL;
 650   SafePointNode* new_map = NULL;
 651   if (!is_clonebasic()) {
 652     new_jvms = jvms()->clone_shallow(phase->C);
 653     new_map = new SafePointNode(req(), new_jvms);
 654     for (uint i = TypeFunc::FramePtr; i < req(); i++) {
 655       new_map->init_req(i, in(i));
 656     }
 657     new_jvms->set_map(new_map);
 658   } else {
 659     new_jvms = new (phase->C) JVMState(0);
 660     new_map = new SafePointNode(TypeFunc::Parms, new_jvms);
 661     new_jvms->set_map(new_map);
 662   }
 663   new_map->set_control(in(TypeFunc::Control));
 664   new_map->set_memory(MergeMemNode::make(in(TypeFunc::Memory)));
 665   new_map->set_i_o(in(TypeFunc::I_O));
 666   phase->record_for_igvn(new_map);
 667 
 668   const TypeAryPtr* atp_src = get_address_type(phase, src);
 669   const TypeAryPtr* atp_dest = get_address_type(phase, dest);
 670   uint alias_idx_src = phase->C->get_alias_index(atp_src);
 671   uint alias_idx_dest = phase->C->get_alias_index(atp_dest);
 672 
 673   if (can_reshape) {
 674     assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
 675     phase->is_IterGVN()->set_delay_transform(true);
 676   }
 677 
 678   GraphKit kit(new_jvms, phase);
 679 
 680   SafePointNode* backward_map = NULL;
 681   SafePointNode* forward_map = NULL;
 682   Node* backward_ctl = phase->C->top();
 683 
 684   array_copy_test_overlap(kit, disjoint_bases, count, backward_ctl);
 685 
 686   {
 687     PreserveJVMState pjvms(&kit);
 688 
 689     array_copy_forward(kit, can_reshape,
 690                        atp_src, atp_dest,
 691                        adr_src, base_src, adr_dest, base_dest,
 692                        copy_type, value_type, count);
 693 
 694     forward_map = kit.stop();
 695   }
 696 
 697   kit.set_control(backward_ctl);
 698   array_copy_backward(kit, can_reshape,
 699                       atp_src, atp_dest,
 700                       adr_src, base_src, adr_dest, base_dest,
 701                       copy_type, value_type, count);
 702 
 703   backward_map = kit.stop();
 704 
 705   if (!forward_map->control()->is_top() && !backward_map->control()->is_top()) {
 706     assert(forward_map->i_o() == backward_map->i_o(), "need a phi on IO?");
 707     Node* ctl = new RegionNode(3);
 708     Node* mem = new PhiNode(ctl, Type::MEMORY, TypePtr::BOTTOM);
 709     kit.set_map(forward_map);
 710     ctl->init_req(1, kit.control());
 711     mem->init_req(1, kit.reset_memory());
 712     kit.set_map(backward_map);
 713     ctl->init_req(2, kit.control());
 714     mem->init_req(2, kit.reset_memory());
 715     kit.set_control(phase->transform(ctl));
 716     kit.set_all_memory(phase->transform(mem));
 717   } else if (!forward_map->control()->is_top()) {
 718     kit.set_map(forward_map);
 719   } else {
 720     assert(!backward_map->control()->is_top(), "no copy?");
 721     kit.set_map(backward_map);
 722   }
 723 
 724   if (can_reshape) {
 725     assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
 726     phase->is_IterGVN()->set_delay_transform(false);
 727   }
 728 
 729   mem = kit.map()->memory();
 730   if (!finish_transform(phase, can_reshape, kit.control(), mem)) {
 731     if (!can_reshape) {
 732       phase->record_for_igvn(this);
 733     }
 734     return NULL;
 735   }
 736 
 737   return mem;
 738 }
 739 
 740 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
 741   Node* dest = in(ArrayCopyNode::Dest);
 742   if (dest->is_top()) {
 743     return false;
 744   }
 745   const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
 746   assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
 747   assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
 748          _src_type->is_known_instance(), "result of EA not recorded");
 749 
 750   if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
 751     assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");
 752     return t_oop->instance_id() == _dest_type->instance_id();
 753   }
 754 
 755   return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
 756 }
 757 
 758 bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) {
 759   if (n != NULL &&
 760       n->is_Call() &&
 761       n->as_Call()->may_modify(t_oop, phase) &&
 762       (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) {
 763     call = n->as_Call();
 764     return true;
 765   }
 766   return false;
 767 }
 768 
 769 bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) {
 770 
 771   Node* c = mb->in(0);
 772 
 773   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 774   // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
 775   c = bs->step_over_gc_barrier(c);
 776 
 777   CallNode* call = NULL;
 778   guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to.");
 779   if (c->is_Region()) {
 780     for (uint i = 1; i < c->req(); i++) {
 781       if (c->in(i) != NULL) {
 782         Node* n = c->in(i)->in(0);
 783         if (may_modify_helper(t_oop, n, phase, call)) {
 784           ac = call->isa_ArrayCopy();
 785           assert(c == mb->in(0), "only for clone");
 786           return true;
 787         }
 788       }
 789     }
 790   } else if (may_modify_helper(t_oop, c->in(0), phase, call)) {
 791     ac = call->isa_ArrayCopy();
 792 #ifdef ASSERT
 793     bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 794       static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
 795     assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
 796 #endif
 797     return true;
 798   }
 799 
 800   return false;
 801 }
 802 
 803 // Does this array copy modify offsets between offset_lo and offset_hi
 804 // in the destination array
 805 // if must_modify is false, return true if the copy could write
 806 // between offset_lo and offset_hi
 807 // if must_modify is true, return true if the copy is guaranteed to
 808 // write between offset_lo and offset_hi
 809 bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const {
 810   assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
 811 
 812   Node* dest = in(Dest);
 813   Node* dest_pos = in(DestPos);
 814   Node* len = in(Length);
 815 
 816   const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
 817   const TypeInt *len_t = phase->type(len)->isa_int();
 818   const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
 819 
 820   if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) {
 821     return !must_modify;
 822   }
 823 
 824   ciArrayKlass* klass = ary_t->klass()->as_array_klass();
 825   BasicType ary_elem = klass->element_type()->basic_type();
 826   uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
 827   uint elemsize = type2aelembytes(ary_elem);
 828   if (klass->is_value_array_klass()) {
 829     elemsize = klass->as_value_array_klass()->element_byte_size();
 830   }
 831 
 832   jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
 833   jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
 834   jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
 835   jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
 836 
 837   if (must_modify) {
 838     if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
 839       return true;
 840     }
 841   } else {
 842     if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
 843       return true;
 844     }
 845   }
 846   return false;
 847 }