1 /* 2 * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_ARRAYCOPYNODE_HPP 26 #define SHARE_OPTO_ARRAYCOPYNODE_HPP 27 28 #include "gc/shared/c2/barrierSetC2.hpp" 29 #include "opto/callnode.hpp" 30 31 class GraphKit; 32 33 class ArrayCopyNode : public CallNode { 34 private: 35 36 // What kind of arraycopy variant is this? 37 enum { 38 None, // not set yet 39 ArrayCopy, // System.arraycopy() 40 CloneInst, // A clone of instances 41 CloneArray, // A clone of arrays that don't require a barrier 42 // - depends on GC - some need to treat oop arrays separately 43 CloneOopArray, // An oop array clone that requires GC barriers 44 CopyOf, // Arrays.copyOf() 45 CopyOfRange // Arrays.copyOfRange() 46 } _kind; 47 48 #ifndef PRODUCT 49 static const char* _kind_names[CopyOfRange+1]; 50 #endif 51 // Is the alloc obtained with 52 // AllocateArrayNode::Ideal_array_allocation() tightly coupled 53 // (arraycopy follows immediately the allocation)? 54 // We cache the result of LibraryCallKit::tightly_coupled_allocation 55 // here because it's much easier to find whether there's a tightly 56 // couple allocation at parse time than at macro expansion time. At 57 // macro expansion time, for every use of the allocation node we 58 // would need to figure out whether it happens after the arraycopy (and 59 // can be ignored) or between the allocation and the arraycopy. At 60 // parse time, it's straightforward because whatever happens after 61 // the arraycopy is not parsed yet so doesn't exist when 62 // LibraryCallKit::tightly_coupled_allocation() is called. 63 bool _alloc_tightly_coupled; 64 bool _has_negative_length_guard; 65 66 bool _arguments_validated; 67 68 static const TypeFunc* arraycopy_type() { 69 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 70 fields[Src] = TypeInstPtr::BOTTOM; 71 fields[SrcPos] = TypeInt::INT; 72 fields[Dest] = TypeInstPtr::BOTTOM; 73 fields[DestPos] = TypeInt::INT; 74 fields[Length] = TypeInt::INT; 75 fields[SrcLen] = TypeInt::INT; 76 fields[DestLen] = TypeInt::INT; 77 fields[SrcKlass] = TypeKlassPtr::BOTTOM; 78 fields[DestKlass] = TypeKlassPtr::BOTTOM; 79 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 80 81 // create result type (range) 82 fields = TypeTuple::fields(0); 83 84 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 85 86 return TypeFunc::make(domain, range); 87 } 88 89 ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard); 90 91 intptr_t get_length_if_constant(PhaseGVN *phase) const; 92 int get_count(PhaseGVN *phase) const; 93 static const TypeAryPtr* get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n); 94 95 Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count); 96 bool prepare_array_copy(PhaseGVN *phase, bool can_reshape, 97 Node*& adr_src, Node*& base_src, Node*& adr_dest, Node*& base_dest, 98 BasicType& copy_type, const Type*& value_type, bool& disjoint_bases); 99 void array_copy_test_overlap(GraphKit& kit, 100 bool disjoint_bases, int count, 101 Node*& backward_ctl); 102 void array_copy_forward(GraphKit& kit, bool can_reshape, 103 const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, 104 Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest, 105 BasicType copy_type, const Type* value_type, int count); 106 void array_copy_backward(GraphKit& kit, bool can_reshape, 107 const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, 108 Node* adr_src, Node* base_src, Node* adr_dest, Node* base_dest, 109 BasicType copy_type, const Type* value_type, int count); 110 bool finish_transform(PhaseGVN *phase, bool can_reshape, 111 Node* ctl, Node *mem); 112 void copy(GraphKit& kit, const TypeAryPtr* atp_src, const TypeAryPtr* atp_dest, int i, 113 Node* base_src, Node* base_dest, Node* adr_src, Node* adr_dest, 114 BasicType copy_type, const Type* value_type); 115 116 static bool may_modify_helper(const TypeOopPtr* t_oop, Node* n, PhaseValues* phase, CallNode*& call); 117 public: 118 static Node* load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, const Type *type, BasicType bt); 119 private: 120 void store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt); 121 122 public: 123 124 enum { 125 Src = TypeFunc::Parms, 126 SrcPos, 127 Dest, 128 DestPos, 129 Length, 130 SrcLen, 131 DestLen, 132 SrcKlass, 133 DestKlass, 134 ParmLimit 135 }; 136 137 // Results from escape analysis for non escaping inputs 138 const TypeOopPtr* _src_type; 139 const TypeOopPtr* _dest_type; 140 141 static ArrayCopyNode* make(GraphKit* kit, bool may_throw, 142 Node* src, Node* src_offset, 143 Node* dest, Node* dest_offset, 144 Node* length, 145 bool alloc_tightly_coupled, 146 bool has_negative_length_guard, 147 Node* src_klass = nullptr, Node* dest_klass = nullptr, 148 Node* src_length = nullptr, Node* dest_length = nullptr); 149 150 void connect_outputs(GraphKit* kit, bool deoptimize_on_exception = false); 151 152 bool is_arraycopy() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; } 153 bool is_arraycopy_validated() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; } 154 bool is_clone_inst() const { assert(_kind != None, "should bet set"); return _kind == CloneInst; } 155 // is_clone_array - true for all arrays when using GCs that has no barriers 156 bool is_clone_array() const { assert(_kind != None, "should bet set"); return _kind == CloneArray; } 157 // is_clone_oop_array is used when oop arrays need GC barriers 158 bool is_clone_oop_array() const { assert(_kind != None, "should bet set"); return _kind == CloneOopArray; } 159 // is_clonebasic - is true for any type of clone that doesn't need a writebarrier. 160 bool is_clonebasic() const { assert(_kind != None, "should bet set"); return _kind == CloneInst || _kind == CloneArray; } 161 bool is_copyof() const { assert(_kind != None, "should bet set"); return _kind == CopyOf; } 162 bool is_copyof_validated() const { assert(_kind != None, "should bet set"); return _kind == CopyOf && _arguments_validated; } 163 bool is_copyofrange() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; } 164 bool is_copyofrange_validated() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange && _arguments_validated; } 165 166 void set_arraycopy(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; } 167 void set_clone_inst() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneInst; } 168 void set_clone_array() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneArray; } 169 void set_clone_oop_array() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOopArray; } 170 void set_copyof(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = validated; } 171 void set_copyofrange(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = validated; } 172 173 virtual int Opcode() const; 174 virtual uint size_of() const; // Size is bigger 175 virtual bool guaranteed_safepoint() { return false; } 176 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 177 178 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase); 179 180 bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; } 181 182 bool has_negative_length_guard() const { return _has_negative_length_guard; } 183 184 static bool may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac); 185 186 static int get_partial_inline_vector_lane_count(BasicType type, int const_len); 187 188 bool modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const; 189 190 #ifndef PRODUCT 191 virtual void dump_spec(outputStream *st) const; 192 virtual void dump_compact_spec(outputStream* st) const; 193 #endif 194 }; 195 #endif // SHARE_OPTO_ARRAYCOPYNODE_HPP