9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/memnode.hpp"
33 #include "opto/parse.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
37 #include "runtime/deoptimization.hpp"
38 #include "runtime/handles.inline.hpp"
39
40 //=============================================================================
41 // Helper methods for _get* and _put* bytecodes
42 //=============================================================================
43 void Parse::do_field_access(bool is_get, bool is_field) {
44 bool will_link;
45 ciField* field = iter().get_field(will_link);
46 assert(will_link, "getfield: typeflow responsibility");
47
48 ciInstanceKlass* field_holder = field->holder();
49
50 if (is_field == field->is_static()) {
51 // Interpreter will throw java_lang_IncompatibleClassChangeError
52 // Check this before allowing <clinit> methods to access static fields
53 uncommon_trap(Deoptimization::Reason_unhandled,
54 Deoptimization::Action_none);
55 return;
56 }
57
58 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
59 if (!is_get && field->is_call_site_target() &&
60 !(method()->holder() == field_holder && method()->is_object_initializer())) {
61 uncommon_trap(Deoptimization::Reason_unhandled,
62 Deoptimization::Action_reinterpret,
63 nullptr, "put to call site target field");
64 return;
65 }
66
67 if (C->needs_clinit_barrier(field, method())) {
68 clinit_barrier(field_holder, method());
69 if (stopped()) return;
70 }
71
72 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
73
74 // Note: We do not check for an unloaded field type here any more.
75
76 // Generate code for the object pointer.
77 Node* obj;
78 if (is_field) {
79 int obj_depth = is_get ? 0 : field->type()->size();
80 obj = null_check(peek(obj_depth));
81 // Compile-time detect of null-exception?
82 if (stopped()) return;
83
84 #ifdef ASSERT
85 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
86 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
87 #endif
88
89 if (is_get) {
90 (void) pop(); // pop receiver before getting
91 do_get_xxx(obj, field, is_field);
92 } else {
93 do_put_xxx(obj, field, is_field);
94 (void) pop(); // pop receiver after putting
95 }
96 } else {
97 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
98 obj = _gvn.makecon(tip);
99 if (is_get) {
100 do_get_xxx(obj, field, is_field);
101 } else {
102 do_put_xxx(obj, field, is_field);
103 }
104 }
105 }
106
107
108 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
109 BasicType bt = field->layout_type();
110
111 // Does this field have a constant value? If so, just push the value.
112 if (field->is_constant() &&
113 // Keep consistent with types found by ciTypeFlow: for an
114 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
115 // speculates the field is null. The code in the rest of this
116 // method does the same. We must not bypass it and use a non
117 // null constant here.
118 (bt != T_OBJECT || field->type()->is_loaded())) {
119 // final or stable field
120 Node* con = make_constant_from_field(field, obj);
121 if (con != nullptr) {
122 push_node(field->layout_type(), con);
123 return;
124 }
125 }
126
127 ciType* field_klass = field->type();
128 bool is_vol = field->is_volatile();
129
130 // Compute address and memory type.
131 int offset = field->offset_in_bytes();
132 const TypePtr* adr_type = C->alias_type(field)->adr_type();
133 Node *adr = basic_plus_adr(obj, obj, offset);
134
135 // Build the resultant type of the load
136 const Type *type;
137
138 bool must_assert_null = false;
139
140 DecoratorSet decorators = IN_HEAP;
141 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
142
143 bool is_obj = is_reference_type(bt);
144
145 if (is_obj) {
146 if (!field->type()->is_loaded()) {
147 type = TypeInstPtr::BOTTOM;
148 must_assert_null = true;
149 } else if (field->is_static_constant()) {
150 // This can happen if the constant oop is non-perm.
151 ciObject* con = field->constant_value().as_object();
152 // Do not "join" in the previous type; it doesn't add value,
153 // and may yield a vacuous result if the field is of interface type.
154 if (con->is_null_object()) {
155 type = TypePtr::NULL_PTR;
156 } else {
157 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
158 }
159 assert(type != nullptr, "field singleton type must be consistent");
160 } else {
161 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
162 }
163 } else {
164 type = Type::get_const_basic_type(bt);
165 }
166
167 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
168
169 // Adjust Java stack
170 if (type2size[bt] == 1)
171 push(ld);
172 else
173 push_pair(ld);
174
175 if (must_assert_null) {
176 // Do not take a trap here. It's possible that the program
177 // will never load the field's class, and will happily see
178 // null values in this field forever. Don't stumble into a
179 // trap for such a program, or we might get a long series
180 // of useless recompilations. (Or, we might load a class
181 // which should not be loaded.) If we ever see a non-null
182 // value, we will then trap and recompile. (The trap will
183 // not need to mention the class index, since the class will
184 // already have been loaded if we ever see a non-null value.)
185 // uncommon_trap(iter().get_field_signature_index());
186 if (PrintOpto && (Verbose || WizardMode)) {
187 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
188 }
189 if (C->log() != nullptr) {
190 C->log()->elem("assert_null reason='field' klass='%d'",
191 C->log()->identify(field->type()));
192 }
193 // If there is going to be a trap, put it at the next bytecode:
194 set_bci(iter().next_bci());
195 null_assert(peek());
196 set_bci(iter().cur_bci()); // put it back
197 }
198 }
199
200 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
201 bool is_vol = field->is_volatile();
202
203 // Compute address and memory type.
204 int offset = field->offset_in_bytes();
205 const TypePtr* adr_type = C->alias_type(field)->adr_type();
206 Node* adr = basic_plus_adr(obj, obj, offset);
207 BasicType bt = field->layout_type();
208 // Value to be stored
209 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
210
211 DecoratorSet decorators = IN_HEAP;
212 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
213
214 bool is_obj = is_reference_type(bt);
215
216 // Store the value.
217 const Type* field_type;
218 if (!field->type()->is_loaded()) {
219 field_type = TypeInstPtr::BOTTOM;
220 } else {
221 if (is_obj) {
222 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
223 } else {
224 field_type = Type::BOTTOM;
225 }
226 }
227 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
228
229 if (is_field) {
230 // Remember we wrote a volatile field.
231 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
232 // in constructors which have such stores. See do_exits() in parse1.cpp.
233 if (is_vol) {
234 set_wrote_volatile(true);
235 }
236 set_wrote_fields(true);
237
238 // If the field is final, the rules of Java say we are in <init> or <clinit>.
239 // Note the presence of writes to final non-static fields, so that we
240 // can insert a memory barrier later on to keep the writes from floating
241 // out of the constructor.
242 // Any method can write a @Stable field; insert memory barriers after those also.
243 if (field->is_final()) {
244 set_wrote_final(true);
245 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
246 // Preserve allocation ptr to create precedent edge to it in membar
247 // generated on exit from constructor.
248 // Can't bind stable with its allocation, only record allocation for final field.
249 set_alloc_with_final(obj);
250 }
251 }
252 if (field->is_stable()) {
253 set_wrote_stable(true);
254 }
255 }
256 }
257
258 //=============================================================================
259 void Parse::do_anewarray() {
260 bool will_link;
261 ciKlass* klass = iter().get_klass(will_link);
262
263 // Uncommon Trap when class that array contains is not loaded
264 // we need the loaded class for the rest of graph; do not
265 // initialize the container class (see Java spec)!!!
266 assert(will_link, "anewarray: typeflow responsibility");
267
268 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
269 // Check that array_klass object is loaded
270 if (!array_klass->is_loaded()) {
271 // Generate uncommon_trap for unloaded array_class
272 uncommon_trap(Deoptimization::Reason_unloaded,
273 Deoptimization::Action_reinterpret,
274 array_klass);
275 return;
276 }
277
278 kill_dead_locals();
279
280 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
281 Node* count_val = pop();
282 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
283 push(obj);
284 }
285
286
287 void Parse::do_newarray(BasicType elem_type) {
288 kill_dead_locals();
289
290 Node* count_val = pop();
291 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
292 Node* obj = new_array(makecon(array_klass), count_val, 1);
293 // Push resultant oop onto stack
294 push(obj);
295 }
316 }
317 return array;
318 }
319
320 void Parse::do_multianewarray() {
321 int ndimensions = iter().get_dimensions();
322
323 // the m-dimensional array
324 bool will_link;
325 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
326 assert(will_link, "multianewarray: typeflow responsibility");
327
328 // Note: Array classes are always initialized; no is_initialized check.
329
330 kill_dead_locals();
331
332 // get the lengths from the stack (first dimension is on top)
333 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
334 length[ndimensions] = nullptr; // terminating null for make_runtime_call
335 int j;
336 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
337
338 // The original expression was of this form: new T[length0][length1]...
339 // It is often the case that the lengths are small (except the last).
340 // If that happens, use the fast 1-d creator a constant number of times.
341 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
342 int64_t expand_count = 1; // count of allocations in the expansion
343 int64_t expand_fanout = 1; // running total fanout
344 for (j = 0; j < ndimensions-1; j++) {
345 int dim_con = find_int_con(length[j], -1);
346 // To prevent overflow, we use 64-bit values. Alternatively,
347 // we could clamp dim_con like so:
348 // dim_con = MIN2(dim_con, expand_limit);
349 expand_fanout *= dim_con;
350 expand_count += expand_fanout; // count the level-J sub-arrays
351 if (dim_con <= 0
352 || dim_con > expand_limit
353 || expand_count > expand_limit) {
354 expand_count = 0;
355 break;
356 }
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/flatArrayKlass.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/inlinetypenode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/parse.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
38 #include "opto/subnode.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/handles.inline.hpp"
41
42 //=============================================================================
43 // Helper methods for _get* and _put* bytecodes
44 //=============================================================================
45
46 void Parse::do_field_access(bool is_get, bool is_field) {
47 bool will_link;
48 ciField* field = iter().get_field(will_link);
49 assert(will_link, "getfield: typeflow responsibility");
50
51 ciInstanceKlass* field_holder = field->holder();
52
53 if (is_get && is_field && field_holder->is_inlinetype() && peek()->is_InlineType()) {
54 InlineTypeNode* vt = peek()->as_InlineType();
55 null_check(vt);
56 Node* value = vt->field_value_by_offset(field->offset_in_bytes());
57 if (value->is_InlineType()) {
58 value = value->as_InlineType()->adjust_scalarization_depth(this);
59 }
60 pop();
61 push_node(field->layout_type(), value);
62 return;
63 }
64
65 if (is_field == field->is_static()) {
66 // Interpreter will throw java_lang_IncompatibleClassChangeError
67 // Check this before allowing <clinit> methods to access static fields
68 uncommon_trap(Deoptimization::Reason_unhandled,
69 Deoptimization::Action_none);
70 return;
71 }
72
73 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
74 if (!is_get && field->is_call_site_target() &&
75 !(method()->holder() == field_holder && method()->is_object_constructor())) {
76 uncommon_trap(Deoptimization::Reason_unhandled,
77 Deoptimization::Action_reinterpret,
78 nullptr, "put to call site target field");
79 return;
80 }
81
82 if (C->needs_clinit_barrier(field, method())) {
83 clinit_barrier(field_holder, method());
84 if (stopped()) return;
85 }
86
87 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
88
89 // Note: We do not check for an unloaded field type here any more.
90
91 // Generate code for the object pointer.
92 Node* obj;
93 if (is_field) {
94 int obj_depth = is_get ? 0 : field->type()->size();
95 obj = null_check(peek(obj_depth));
96 // Compile-time detect of null-exception?
97 if (stopped()) return;
98
99 #ifdef ASSERT
100 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
101 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
102 #endif
103
104 if (is_get) {
105 (void) pop(); // pop receiver before getting
106 do_get_xxx(obj, field);
107 } else {
108 do_put_xxx(obj, field, is_field);
109 if (stopped()) {
110 return;
111 }
112 (void) pop(); // pop receiver after putting
113 }
114 } else {
115 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
116 obj = _gvn.makecon(tip);
117 if (is_get) {
118 do_get_xxx(obj, field);
119 } else {
120 do_put_xxx(obj, field, is_field);
121 }
122 }
123 }
124
125 void Parse::do_get_xxx(Node* obj, ciField* field) {
126 BasicType bt = field->layout_type();
127 // Does this field have a constant value? If so, just push the value.
128 if (field->is_constant() && !field->is_flat() &&
129 // Keep consistent with types found by ciTypeFlow: for an
130 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
131 // speculates the field is null. The code in the rest of this
132 // method does the same. We must not bypass it and use a non
133 // null constant here.
134 (bt != T_OBJECT || field->type()->is_loaded())) {
135 // final or stable field
136 Node* con = make_constant_from_field(field, obj);
137 if (con != nullptr) {
138 push_node(field->layout_type(), con);
139 return;
140 }
141 }
142
143 ciType* field_klass = field->type();
144 int offset = field->offset_in_bytes();
145 bool must_assert_null = false;
146
147 Node* ld = nullptr;
148 if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) {
149 // Loading from a field of an empty inline type. Just return the default instance.
150 ld = InlineTypeNode::make_default(_gvn, field_klass->as_inline_klass());
151 } else if (field->is_flat()) {
152 // Loading from a flat inline type field.
153 ld = InlineTypeNode::make_from_flat(this, field_klass->as_inline_klass(), obj, obj, field->holder(), offset);
154 } else {
155 // Build the resultant type of the load
156 const Type* type;
157 if (is_reference_type(bt)) {
158 if (!field_klass->is_loaded()) {
159 type = TypeInstPtr::BOTTOM;
160 must_assert_null = true;
161 } else if (field->is_static_constant()) {
162 // This can happen if the constant oop is non-perm.
163 ciObject* con = field->constant_value().as_object();
164 // Do not "join" in the previous type; it doesn't add value,
165 // and may yield a vacuous result if the field is of interface type.
166 if (con->is_null_object()) {
167 type = TypePtr::NULL_PTR;
168 } else {
169 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
170 }
171 assert(type != nullptr, "field singleton type must be consistent");
172 } else {
173 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
174 if (field->is_null_free() && field->is_static()) {
175 // Check if static inline type field is already initialized
176 ciInstance* mirror = field->holder()->java_mirror();
177 ciObject* val = mirror->field_value(field).as_object();
178 if (!val->is_null_object()) {
179 type = type->join_speculative(TypePtr::NOTNULL);
180 }
181 }
182 }
183 } else {
184 type = Type::get_const_basic_type(bt);
185 }
186 Node* adr = basic_plus_adr(obj, obj, offset);
187 const TypePtr* adr_type = C->alias_type(field)->adr_type();
188 DecoratorSet decorators = IN_HEAP;
189 decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
190 ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
191 if (field_klass->is_inlinetype()) {
192 // Load a non-flattened inline type from memory
193 ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass(), field->is_null_free());
194 }
195 }
196
197 // Adjust Java stack
198 if (type2size[bt] == 1)
199 push(ld);
200 else
201 push_pair(ld);
202
203 if (must_assert_null) {
204 // Do not take a trap here. It's possible that the program
205 // will never load the field's class, and will happily see
206 // null values in this field forever. Don't stumble into a
207 // trap for such a program, or we might get a long series
208 // of useless recompilations. (Or, we might load a class
209 // which should not be loaded.) If we ever see a non-null
210 // value, we will then trap and recompile. (The trap will
211 // not need to mention the class index, since the class will
212 // already have been loaded if we ever see a non-null value.)
213 // uncommon_trap(iter().get_field_signature_index());
214 if (PrintOpto && (Verbose || WizardMode)) {
215 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
216 }
217 if (C->log() != nullptr) {
218 C->log()->elem("assert_null reason='field' klass='%d'",
219 C->log()->identify(field_klass));
220 }
221 // If there is going to be a trap, put it at the next bytecode:
222 set_bci(iter().next_bci());
223 null_assert(peek());
224 set_bci(iter().cur_bci()); // put it back
225 }
226 }
227
228 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
229 bool is_vol = field->is_volatile();
230 int offset = field->offset_in_bytes();
231 BasicType bt = field->layout_type();
232 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
233
234 if (obj->is_InlineType()) {
235 // TODO 8325106 Factor into own method
236 // TODO 8325106 Assert that we only do this in the constructor and align with checks in ::do_call
237 //if (_method->is_object_constructor() && _method->holder()->is_inlinetype()) {
238 assert(obj->as_InlineType()->is_larval(), "must be larval");
239
240 // TODO 8325106 Assert that holder is null-free
241 /*
242 int holder_depth = field->type()->size();
243 null_check(peek(holder_depth));
244 if (stopped()) {
245 return;
246 }
247 */
248
249 if (field->is_null_free()) {
250 PreserveReexecuteState preexecs(this);
251 jvms()->set_should_reexecute(true);
252 int nargs = 1 + field->type()->size();
253 inc_sp(nargs);
254 val = null_check(val);
255 if (stopped()) {
256 return;
257 }
258 }
259 if (!val->is_InlineType() && field->type()->is_inlinetype()) {
260 // Scalarize inline type field value
261 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass(), field->is_null_free());
262 } else if (val->is_InlineType() && !field->is_flat()) {
263 // Field value needs to be allocated because it can be merged with an oop.
264 // Re-execute if buffering triggers deoptimization.
265 PreserveReexecuteState preexecs(this);
266 jvms()->set_should_reexecute(true);
267 int nargs = 1 + field->type()->size();
268 inc_sp(nargs);
269 val = val->as_InlineType()->buffer(this);
270 }
271
272 // Clone the inline type node and set the new field value
273 InlineTypeNode* new_vt = obj->clone()->as_InlineType();
274 new_vt->set_field_value_by_offset(field->offset_in_bytes(), val);
275 {
276 PreserveReexecuteState preexecs(this);
277 jvms()->set_should_reexecute(true);
278 int nargs = 1 + field->type()->size();
279 inc_sp(nargs);
280 new_vt = new_vt->adjust_scalarization_depth(this);
281 }
282
283 // TODO 8325106 needed? I think so, because although we are incrementally inlining, we might not incrementally inline this very method
284 if ((!_caller->has_method() || C->inlining_incrementally()) && new_vt->is_allocated(&gvn())) {
285 // We need to store to the buffer
286 // TODO 8325106 looks like G1BarrierSetC2::g1_can_remove_pre_barrier is not strong enough to remove the pre barrier
287 // TODO is it really guaranteed that the preval is null?
288 new_vt->store(this, new_vt->get_oop(), new_vt->get_oop(), new_vt->bottom_type()->inline_klass(), 0, C2_TIGHTLY_COUPLED_ALLOC | IN_HEAP | MO_UNORDERED, field->offset_in_bytes());
289
290 // Preserve allocation ptr to create precedent edge to it in membar
291 // generated on exit from constructor.
292 AllocateNode* alloc = AllocateNode::Ideal_allocation(new_vt->get_oop());
293 if (alloc != nullptr) {
294 set_alloc_with_final(new_vt->get_oop());
295 }
296 set_wrote_final(true);
297 }
298
299 replace_in_map(obj, _gvn.transform(new_vt));
300 return;
301 }
302
303 if (field->is_null_free()) {
304 PreserveReexecuteState preexecs(this);
305 inc_sp(1);
306 jvms()->set_should_reexecute(true);
307 val = null_check(val);
308 }
309 if (field->is_null_free() && field->type()->as_inline_klass()->is_empty()) {
310 // Storing to a field of an empty inline type. Ignore.
311 return;
312 } else if (field->is_flat()) {
313 // Storing to a flat inline type field.
314 if (!val->is_InlineType()) {
315 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass());
316 }
317 inc_sp(1);
318 val->as_InlineType()->store_flat(this, obj, obj, field->holder(), offset);
319 dec_sp(1);
320 } else {
321 // Store the value.
322 const Type* field_type;
323 if (!field->type()->is_loaded()) {
324 field_type = TypeInstPtr::BOTTOM;
325 } else {
326 if (is_reference_type(bt)) {
327 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
328 } else {
329 field_type = Type::BOTTOM;
330 }
331 }
332 Node* adr = basic_plus_adr(obj, obj, offset);
333 const TypePtr* adr_type = C->alias_type(field)->adr_type();
334 DecoratorSet decorators = IN_HEAP;
335 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
336 inc_sp(1);
337 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
338 dec_sp(1);
339 }
340
341 if (is_field) {
342 // Remember we wrote a volatile field.
343 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
344 // in constructors which have such stores. See do_exits() in parse1.cpp.
345 if (is_vol) {
346 set_wrote_volatile(true);
347 }
348 set_wrote_fields(true);
349
350 // If the field is final, the rules of Java say we are in <init> or <clinit>.
351 // Note the presence of writes to final non-static fields, so that we
352 // can insert a memory barrier later on to keep the writes from floating
353 // out of the constructor.
354 // Any method can write a @Stable field; insert memory barriers after those also.
355 if (field->is_final()) {
356 set_wrote_final(true);
357 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
358 // Preserve allocation ptr to create precedent edge to it in membar
359 // generated on exit from constructor.
360 // Can't bind stable with its allocation, only record allocation for final field.
361 set_alloc_with_final(obj);
362 }
363 }
364 if (field->is_stable()) {
365 set_wrote_stable(true);
366 }
367 }
368 }
369
370 //=============================================================================
371
372 void Parse::do_newarray() {
373 bool will_link;
374 ciKlass* klass = iter().get_klass(will_link);
375
376 // Uncommon Trap when class that array contains is not loaded
377 // we need the loaded class for the rest of graph; do not
378 // initialize the container class (see Java spec)!!!
379 assert(will_link, "newarray: typeflow responsibility");
380
381 ciArrayKlass* array_klass = ciArrayKlass::make(klass);
382
383 // Check that array_klass object is loaded
384 if (!array_klass->is_loaded()) {
385 // Generate uncommon_trap for unloaded array_class
386 uncommon_trap(Deoptimization::Reason_unloaded,
387 Deoptimization::Action_reinterpret,
388 array_klass);
389 return;
390 } else if (array_klass->element_klass() != nullptr &&
391 array_klass->element_klass()->is_inlinetype() &&
392 !array_klass->element_klass()->as_inline_klass()->is_initialized()) {
393 uncommon_trap(Deoptimization::Reason_uninitialized,
394 Deoptimization::Action_reinterpret,
395 nullptr);
396 return;
397 }
398
399 kill_dead_locals();
400
401 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
402 Node* count_val = pop();
403 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
404 push(obj);
405 }
406
407
408 void Parse::do_newarray(BasicType elem_type) {
409 kill_dead_locals();
410
411 Node* count_val = pop();
412 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
413 Node* obj = new_array(makecon(array_klass), count_val, 1);
414 // Push resultant oop onto stack
415 push(obj);
416 }
437 }
438 return array;
439 }
440
441 void Parse::do_multianewarray() {
442 int ndimensions = iter().get_dimensions();
443
444 // the m-dimensional array
445 bool will_link;
446 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
447 assert(will_link, "multianewarray: typeflow responsibility");
448
449 // Note: Array classes are always initialized; no is_initialized check.
450
451 kill_dead_locals();
452
453 // get the lengths from the stack (first dimension is on top)
454 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
455 length[ndimensions] = nullptr; // terminating null for make_runtime_call
456 int j;
457 ciKlass* elem_klass = array_klass;
458 for (j = ndimensions-1; j >= 0; j--) {
459 length[j] = pop();
460 elem_klass = elem_klass->as_array_klass()->element_klass();
461 }
462 if (elem_klass != nullptr && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) {
463 inc_sp(ndimensions);
464 uncommon_trap(Deoptimization::Reason_uninitialized,
465 Deoptimization::Action_reinterpret,
466 nullptr);
467 return;
468 }
469
470 // The original expression was of this form: new T[length0][length1]...
471 // It is often the case that the lengths are small (except the last).
472 // If that happens, use the fast 1-d creator a constant number of times.
473 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
474 int64_t expand_count = 1; // count of allocations in the expansion
475 int64_t expand_fanout = 1; // running total fanout
476 for (j = 0; j < ndimensions-1; j++) {
477 int dim_con = find_int_con(length[j], -1);
478 // To prevent overflow, we use 64-bit values. Alternatively,
479 // we could clamp dim_con like so:
480 // dim_con = MIN2(dim_con, expand_limit);
481 expand_fanout *= dim_con;
482 expand_count += expand_fanout; // count the level-J sub-arrays
483 if (dim_con <= 0
484 || dim_con > expand_limit
485 || expand_count > expand_limit) {
486 expand_count = 0;
487 break;
488 }
|