5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "compiler/compileLog.hpp"
26 #include "interpreter/linkResolver.hpp"
27 #include "memory/universe.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/castnode.hpp"
31 #include "opto/memnode.hpp"
32 #include "opto/parse.hpp"
33 #include "opto/rootnode.hpp"
34 #include "opto/runtime.hpp"
35 #include "opto/subnode.hpp"
36 #include "runtime/deoptimization.hpp"
37 #include "runtime/handles.inline.hpp"
38
39 //=============================================================================
40 // Helper methods for _get* and _put* bytecodes
41 //=============================================================================
42 void Parse::do_field_access(bool is_get, bool is_field) {
43 bool will_link;
44 ciField* field = iter().get_field(will_link);
45 assert(will_link, "getfield: typeflow responsibility");
46
47 ciInstanceKlass* field_holder = field->holder();
48
49 if (is_field == field->is_static()) {
50 // Interpreter will throw java_lang_IncompatibleClassChangeError
51 // Check this before allowing <clinit> methods to access static fields
52 uncommon_trap(Deoptimization::Reason_unhandled,
53 Deoptimization::Action_none);
54 return;
55 }
56
57 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
58 if (!is_get && field->is_call_site_target() &&
59 !(method()->holder() == field_holder && method()->is_object_initializer())) {
60 uncommon_trap(Deoptimization::Reason_unhandled,
61 Deoptimization::Action_reinterpret,
62 nullptr, "put to call site target field");
63 return;
64 }
65
66 if (C->needs_clinit_barrier(field, method())) {
67 clinit_barrier(field_holder, method());
68 if (stopped()) return;
69 }
70
71 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
72
73 // Note: We do not check for an unloaded field type here any more.
74
75 // Generate code for the object pointer.
76 Node* obj;
77 if (is_field) {
78 int obj_depth = is_get ? 0 : field->type()->size();
79 obj = null_check(peek(obj_depth));
80 // Compile-time detect of null-exception?
81 if (stopped()) return;
82
83 #ifdef ASSERT
84 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
85 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
86 #endif
87
88 if (is_get) {
89 (void) pop(); // pop receiver before getting
90 do_get_xxx(obj, field, is_field);
91 } else {
92 do_put_xxx(obj, field, is_field);
93 (void) pop(); // pop receiver after putting
94 }
95 } else {
96 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
97 obj = _gvn.makecon(tip);
98 if (is_get) {
99 do_get_xxx(obj, field, is_field);
100 } else {
101 do_put_xxx(obj, field, is_field);
102 }
103 }
104 }
105
106
107 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
108 BasicType bt = field->layout_type();
109
110 // Does this field have a constant value? If so, just push the value.
111 if (field->is_constant() &&
112 // Keep consistent with types found by ciTypeFlow: for an
113 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
114 // speculates the field is null. The code in the rest of this
115 // method does the same. We must not bypass it and use a non
116 // null constant here.
117 (bt != T_OBJECT || field->type()->is_loaded())) {
118 // final or stable field
119 Node* con = make_constant_from_field(field, obj);
120 if (con != nullptr) {
121 push_node(field->layout_type(), con);
122 return;
123 }
124 }
125
126 ciType* field_klass = field->type();
127 bool is_vol = field->is_volatile();
128
129 // Compute address and memory type.
130 int offset = field->offset_in_bytes();
131 const TypePtr* adr_type = C->alias_type(field)->adr_type();
132 Node *adr = basic_plus_adr(obj, obj, offset);
133 assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
134 "slice of address and input slice don't match");
135
136 // Build the resultant type of the load
137 const Type *type;
138
139 bool must_assert_null = false;
140
141 DecoratorSet decorators = IN_HEAP;
142 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
143
144 bool is_obj = is_reference_type(bt);
145
146 if (is_obj) {
147 if (!field->type()->is_loaded()) {
148 type = TypeInstPtr::BOTTOM;
149 must_assert_null = true;
150 } else if (field->is_static_constant()) {
151 // This can happen if the constant oop is non-perm.
152 ciObject* con = field->constant_value().as_object();
153 // Do not "join" in the previous type; it doesn't add value,
154 // and may yield a vacuous result if the field is of interface type.
155 if (con->is_null_object()) {
156 type = TypePtr::NULL_PTR;
157 } else {
158 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
159 }
160 assert(type != nullptr, "field singleton type must be consistent");
161 } else {
162 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
163 }
164 } else {
165 type = Type::get_const_basic_type(bt);
166 }
167
168 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
169
170 // Adjust Java stack
171 if (type2size[bt] == 1)
172 push(ld);
173 else
174 push_pair(ld);
175
176 if (must_assert_null) {
177 // Do not take a trap here. It's possible that the program
178 // will never load the field's class, and will happily see
179 // null values in this field forever. Don't stumble into a
180 // trap for such a program, or we might get a long series
181 // of useless recompilations. (Or, we might load a class
182 // which should not be loaded.) If we ever see a non-null
183 // value, we will then trap and recompile. (The trap will
184 // not need to mention the class index, since the class will
185 // already have been loaded if we ever see a non-null value.)
186 // uncommon_trap(iter().get_field_signature_index());
187 if (PrintOpto && (Verbose || WizardMode)) {
188 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
189 }
190 if (C->log() != nullptr) {
191 C->log()->elem("assert_null reason='field' klass='%d'",
192 C->log()->identify(field->type()));
193 }
194 // If there is going to be a trap, put it at the next bytecode:
195 set_bci(iter().next_bci());
196 null_assert(peek());
197 set_bci(iter().cur_bci()); // put it back
198 }
199 }
200
201 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
202 bool is_vol = field->is_volatile();
203
204 // Compute address and memory type.
205 int offset = field->offset_in_bytes();
206 const TypePtr* adr_type = C->alias_type(field)->adr_type();
207 Node* adr = basic_plus_adr(obj, obj, offset);
208 assert(C->get_alias_index(adr_type) == C->get_alias_index(_gvn.type(adr)->isa_ptr()),
209 "slice of address and input slice don't match");
210 BasicType bt = field->layout_type();
211 // Value to be stored
212 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
213
214 DecoratorSet decorators = IN_HEAP;
215 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
216
217 bool is_obj = is_reference_type(bt);
218
219 // Store the value.
220 const Type* field_type;
221 if (!field->type()->is_loaded()) {
222 field_type = TypeInstPtr::BOTTOM;
223 } else {
224 if (is_obj) {
225 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
226 } else {
227 field_type = Type::BOTTOM;
228 }
229 }
230 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
231
232 if (is_field) {
233 // Remember we wrote a volatile field.
234 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
235 // in constructors which have such stores. See do_exits() in parse1.cpp.
236 if (is_vol) {
237 set_wrote_volatile(true);
238 }
239 set_wrote_fields(true);
240
241 // If the field is final, the rules of Java say we are in <init> or <clinit>.
242 // If the field is @Stable, we can be in any method, but we only care about
243 // constructors at this point.
244 //
245 // Note the presence of writes to final/@Stable non-static fields, so that we
246 // can insert a memory barrier later on to keep the writes from floating
247 // out of the constructor.
248 if (field->is_final() || field->is_stable()) {
249 if (field->is_final()) {
250 set_wrote_final(true);
251 }
252 if (field->is_stable()) {
253 set_wrote_stable(true);
254 }
255 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
256 // Preserve allocation ptr to create precedent edge to it in membar
257 // generated on exit from constructor.
258 set_alloc_with_final_or_stable(obj);
259 }
260 }
261 }
262 }
263
264 //=============================================================================
265 void Parse::do_anewarray() {
266 bool will_link;
267 ciKlass* klass = iter().get_klass(will_link);
268
269 // Uncommon Trap when class that array contains is not loaded
270 // we need the loaded class for the rest of graph; do not
271 // initialize the container class (see Java spec)!!!
272 assert(will_link, "anewarray: typeflow responsibility");
273
274 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
275 // Check that array_klass object is loaded
276 if (!array_klass->is_loaded()) {
277 // Generate uncommon_trap for unloaded array_class
278 uncommon_trap(Deoptimization::Reason_unloaded,
279 Deoptimization::Action_reinterpret,
280 array_klass);
281 return;
282 }
283
284 kill_dead_locals();
285
286 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
287 Node* count_val = pop();
288 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
289 push(obj);
290 }
291
292
293 void Parse::do_newarray(BasicType elem_type) {
294 kill_dead_locals();
295
296 Node* count_val = pop();
297 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
298 Node* obj = new_array(makecon(array_klass), count_val, 1);
299 // Push resultant oop onto stack
300 push(obj);
301 }
322 }
323 return array;
324 }
325
326 void Parse::do_multianewarray() {
327 int ndimensions = iter().get_dimensions();
328
329 // the m-dimensional array
330 bool will_link;
331 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
332 assert(will_link, "multianewarray: typeflow responsibility");
333
334 // Note: Array classes are always initialized; no is_initialized check.
335
336 kill_dead_locals();
337
338 // get the lengths from the stack (first dimension is on top)
339 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
340 length[ndimensions] = nullptr; // terminating null for make_runtime_call
341 int j;
342 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
343
344 // The original expression was of this form: new T[length0][length1]...
345 // It is often the case that the lengths are small (except the last).
346 // If that happens, use the fast 1-d creator a constant number of times.
347 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
348 int64_t expand_count = 1; // count of allocations in the expansion
349 int64_t expand_fanout = 1; // running total fanout
350 for (j = 0; j < ndimensions-1; j++) {
351 int dim_con = find_int_con(length[j], -1);
352 // To prevent overflow, we use 64-bit values. Alternatively,
353 // we could clamp dim_con like so:
354 // dim_con = MIN2(dim_con, expand_limit);
355 expand_fanout *= dim_con;
356 expand_count += expand_fanout; // count the level-J sub-arrays
357 if (dim_con <= 0
358 || dim_con > expand_limit
359 || expand_count > expand_limit) {
360 expand_count = 0;
361 break;
362 }
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciInstanceKlass.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/accessDecorators.hpp"
30 #include "oops/flatArrayKlass.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/inlinetypenode.hpp"
35 #include "opto/memnode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/subnode.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/handles.inline.hpp"
42
43 //=============================================================================
44 // Helper methods for _get* and _put* bytecodes
45 //=============================================================================
46
47 void Parse::do_field_access(bool is_get, bool is_field) {
48 bool will_link;
49 ciField* field = iter().get_field(will_link);
50 assert(will_link, "getfield: typeflow responsibility");
51
52 if (is_field == field->is_static()) {
53 // Interpreter will throw java_lang_IncompatibleClassChangeError
54 // Check this before allowing <clinit> methods to access static fields
55 uncommon_trap(Deoptimization::Reason_unhandled,
56 Deoptimization::Action_none);
57 return;
58 }
59
60 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
61 ciInstanceKlass* field_holder = field->holder();
62 if (!is_get && field->is_call_site_target() &&
63 !(method()->holder() == field_holder && method()->is_object_constructor())) {
64 uncommon_trap(Deoptimization::Reason_unhandled,
65 Deoptimization::Action_reinterpret,
66 nullptr, "put to call site target field");
67 return;
68 }
69
70 if (C->needs_clinit_barrier(field, method())) {
71 clinit_barrier(field_holder, method());
72 if (stopped()) return;
73 }
74
75 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
76
77 // Note: We do not check for an unloaded field type here any more.
78
79 // Generate code for the object pointer.
80 Node* obj;
81 if (is_field) {
82 int obj_depth = is_get ? 0 : field->type()->size();
83 obj = null_check(peek(obj_depth));
84 // Compile-time detect of null-exception?
85 if (stopped()) return;
86
87 #ifdef ASSERT
88 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
89 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
90 #endif
91
92 if (is_get) {
93 (void) pop(); // pop receiver before getting
94 do_get_xxx(obj, field);
95 } else {
96 do_put_xxx(obj, field, is_field);
97 if (stopped()) {
98 return;
99 }
100 (void) pop(); // pop receiver after putting
101 }
102 } else {
103 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
104 obj = _gvn.makecon(tip);
105 if (is_get) {
106 do_get_xxx(obj, field);
107 } else {
108 do_put_xxx(obj, field, is_field);
109 }
110 }
111 }
112
113 void Parse::do_get_xxx(Node* obj, ciField* field) {
114 obj = cast_to_non_larval(obj);
115 BasicType bt = field->layout_type();
116 // Does this field have a constant value? If so, just push the value.
117 if (field->is_constant() && !field->is_flat() &&
118 // Keep consistent with types found by ciTypeFlow: for an
119 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
120 // speculates the field is null. The code in the rest of this
121 // method does the same. We must not bypass it and use a non
122 // null constant here.
123 (bt != T_OBJECT || field->type()->is_loaded())) {
124 // final or stable field
125 Node* con = make_constant_from_field(field, obj);
126 if (con != nullptr) {
127 push_node(field->layout_type(), con);
128 return;
129 }
130 }
131
132 if (obj->is_InlineType()) {
133 InlineTypeNode* vt = obj->as_InlineType();
134 Node* value = vt->field_value_by_offset(field->offset_in_bytes(), false);
135 if (value->is_InlineType()) {
136 value = value->as_InlineType()->adjust_scalarization_depth(this);
137 }
138 push_node(field->layout_type(), value);
139 return;
140 }
141
142 ciType* field_klass = field->type();
143 field_klass = improve_abstract_inline_type_klass(field_klass);
144 int offset = field->offset_in_bytes();
145 bool must_assert_null = false;
146 Node* adr = basic_plus_adr(obj, obj, offset);
147
148 Node* ld = nullptr;
149 if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) {
150 // Loading from a field of an empty inline type. Just return the default instance.
151 ld = InlineTypeNode::make_all_zero(_gvn, field_klass->as_inline_klass());
152 } else if (field->is_flat()) {
153 // Loading from a flat inline type field.
154 ciInlineKlass* vk = field->type()->as_inline_klass();
155 bool is_immutable = field->is_final() && field->is_strict();
156 bool atomic = vk->must_be_atomic() || !field->is_null_free();
157 ld = InlineTypeNode::make_from_flat(this, field_klass->as_inline_klass(), obj, adr, atomic, is_immutable, field->is_null_free(), IN_HEAP | MO_UNORDERED);
158 } else {
159 // Build the resultant type of the load
160 const Type* type;
161 if (is_reference_type(bt)) {
162 if (!field_klass->is_loaded()) {
163 type = TypeInstPtr::BOTTOM;
164 must_assert_null = true;
165 } else if (field->is_static_constant()) {
166 // This can happen if the constant oop is non-perm.
167 ciObject* con = field->constant_value().as_object();
168 // Do not "join" in the previous type; it doesn't add value,
169 // and may yield a vacuous result if the field is of interface type.
170 if (con->is_null_object()) {
171 type = TypePtr::NULL_PTR;
172 } else {
173 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
174 }
175 assert(type != nullptr, "field singleton type must be consistent");
176 } else {
177 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
178 if (field->is_null_free()) {
179 type = type->join_speculative(TypePtr::NOTNULL);
180 }
181 }
182 } else {
183 type = Type::get_const_basic_type(bt);
184 }
185
186 const TypePtr* adr_type = C->alias_type(field)->adr_type();
187 DecoratorSet decorators = IN_HEAP;
188 decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
189 ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
190 if (field_klass->is_inlinetype()) {
191 // Load a non-flattened inline type from memory
192 ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass());
193 }
194 }
195
196 // Adjust Java stack
197 if (type2size[bt] == 1)
198 push(ld);
199 else
200 push_pair(ld);
201
202 if (must_assert_null) {
203 // Do not take a trap here. It's possible that the program
204 // will never load the field's class, and will happily see
205 // null values in this field forever. Don't stumble into a
206 // trap for such a program, or we might get a long series
207 // of useless recompilations. (Or, we might load a class
208 // which should not be loaded.) If we ever see a non-null
209 // value, we will then trap and recompile. (The trap will
210 // not need to mention the class index, since the class will
211 // already have been loaded if we ever see a non-null value.)
212 // uncommon_trap(iter().get_field_signature_index());
213 if (PrintOpto && (Verbose || WizardMode)) {
214 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
215 }
216 if (C->log() != nullptr) {
217 C->log()->elem("assert_null reason='field' klass='%d'",
218 C->log()->identify(field_klass));
219 }
220 // If there is going to be a trap, put it at the next bytecode:
221 set_bci(iter().next_bci());
222 null_assert(peek());
223 set_bci(iter().cur_bci()); // put it back
224 }
225 }
226
227 // If the field klass is an abstract value klass (for which we do not know the layout, yet), it could have a unique
228 // concrete sub klass for which we have a fixed layout. This allows us to use InlineTypeNodes instead.
229 ciType* Parse::improve_abstract_inline_type_klass(ciType* field_klass) {
230 Dependencies* dependencies = C->dependencies();
231 if (UseUniqueSubclasses && dependencies != nullptr && field_klass->is_instance_klass()) {
232 ciInstanceKlass* instance_klass = field_klass->as_instance_klass();
233 if (instance_klass->is_loaded() && instance_klass->is_abstract_value_klass()) {
234 ciInstanceKlass* sub_klass = instance_klass->unique_concrete_subklass();
235 if (sub_klass != nullptr && sub_klass != field_klass) {
236 field_klass = sub_klass;
237 dependencies->assert_abstract_with_unique_concrete_subtype(instance_klass, sub_klass);
238 }
239 }
240 }
241 return field_klass;
242 }
243
244 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
245 bool is_vol = field->is_volatile();
246 int offset = field->offset_in_bytes();
247
248 BasicType bt = field->layout_type();
249 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
250 if (field->is_null_free()) {
251 PreserveReexecuteState preexecs(this);
252 jvms()->set_should_reexecute(true);
253 inc_sp(1);
254 val = null_check(val);
255 if (stopped()) {
256 return;
257 }
258 }
259
260 val = cast_to_non_larval(val);
261 Node* adr = basic_plus_adr(obj, obj, offset);
262
263 // We cannot store into a non-larval object, so obj must not be an InlineTypeNode
264 assert(!obj->is_InlineType(), "InlineTypeNodes are non-larval value objects");
265 if (field->is_null_free() && field->type()->as_inline_klass()->is_empty() && (!method()->is_object_constructor() || field->is_flat())) {
266 // Storing to a field of an empty, null-free inline type that is already initialized. Ignore.
267 return;
268 } else if (field->is_flat()) {
269 // Storing to a flat inline type field.
270 ciInlineKlass* vk = field->type()->as_inline_klass();
271 if (!val->is_InlineType()) {
272 assert(gvn().type(val) == TypePtr::NULL_PTR, "Unexpected value");
273 val = InlineTypeNode::make_null(gvn(), vk);
274 }
275 inc_sp(1);
276 bool is_immutable = field->is_final() && field->is_strict();
277 bool atomic = vk->must_be_atomic() || !field->is_null_free();
278 val->as_InlineType()->store_flat(this, obj, adr, atomic, is_immutable, field->is_null_free(), IN_HEAP | MO_UNORDERED);
279 dec_sp(1);
280 } else {
281 // Store the value.
282 const Type* field_type;
283 if (!field->type()->is_loaded()) {
284 field_type = TypeInstPtr::BOTTOM;
285 } else {
286 if (is_reference_type(bt)) {
287 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
288 } else {
289 field_type = Type::BOTTOM;
290 }
291 }
292
293 const TypePtr* adr_type = C->alias_type(field)->adr_type();
294 DecoratorSet decorators = IN_HEAP;
295 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
296 inc_sp(1);
297 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
298 dec_sp(1);
299 }
300
301 if (is_field) {
302 // Remember we wrote a volatile field.
303 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
304 // in constructors which have such stores. See do_exits() in parse1.cpp.
305 if (is_vol) {
306 set_wrote_volatile(true);
307 }
308 set_wrote_fields(true);
309
310 // If the field is final, the rules of Java say we are in <init> or <clinit>.
311 // If the field is @Stable, we can be in any method, but we only care about
312 // constructors at this point.
313 //
314 // Note the presence of writes to final/@Stable non-static fields, so that we
315 // can insert a memory barrier later on to keep the writes from floating
316 // out of the constructor.
317 if (field->is_final() || field->is_stable()) {
318 if (field->is_final()) {
319 set_wrote_final(true);
320 }
321 if (field->is_stable()) {
322 set_wrote_stable(true);
323 }
324 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
325 // Preserve allocation ptr to create precedent edge to it in membar
326 // generated on exit from constructor.
327 set_alloc_with_final_or_stable(obj);
328 }
329 }
330 }
331 }
332
333 //=============================================================================
334
335 void Parse::do_newarray() {
336 bool will_link;
337 ciKlass* klass = iter().get_klass(will_link);
338
339 // Uncommon Trap when class that array contains is not loaded
340 // we need the loaded class for the rest of graph; do not
341 // initialize the container class (see Java spec)!!!
342 assert(will_link, "newarray: typeflow responsibility");
343
344 ciArrayKlass* array_klass = ciArrayKlass::make(klass);
345
346 // Check that array_klass object is loaded
347 if (!array_klass->is_loaded()) {
348 // Generate uncommon_trap for unloaded array_class
349 uncommon_trap(Deoptimization::Reason_unloaded,
350 Deoptimization::Action_reinterpret,
351 array_klass);
352 return;
353 } else if (array_klass->element_klass() != nullptr &&
354 array_klass->element_klass()->is_inlinetype() &&
355 !array_klass->element_klass()->as_inline_klass()->is_initialized()) {
356 uncommon_trap(Deoptimization::Reason_uninitialized,
357 Deoptimization::Action_reinterpret,
358 nullptr);
359 return;
360 }
361
362 kill_dead_locals();
363
364 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
365 Node* count_val = pop();
366 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
367 push(obj);
368 }
369
370
371 void Parse::do_newarray(BasicType elem_type) {
372 kill_dead_locals();
373
374 Node* count_val = pop();
375 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
376 Node* obj = new_array(makecon(array_klass), count_val, 1);
377 // Push resultant oop onto stack
378 push(obj);
379 }
400 }
401 return array;
402 }
403
404 void Parse::do_multianewarray() {
405 int ndimensions = iter().get_dimensions();
406
407 // the m-dimensional array
408 bool will_link;
409 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
410 assert(will_link, "multianewarray: typeflow responsibility");
411
412 // Note: Array classes are always initialized; no is_initialized check.
413
414 kill_dead_locals();
415
416 // get the lengths from the stack (first dimension is on top)
417 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
418 length[ndimensions] = nullptr; // terminating null for make_runtime_call
419 int j;
420 ciKlass* elem_klass = array_klass;
421 for (j = ndimensions-1; j >= 0; j--) {
422 length[j] = pop();
423 elem_klass = elem_klass->as_array_klass()->element_klass();
424 }
425 if (elem_klass != nullptr && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) {
426 inc_sp(ndimensions);
427 uncommon_trap(Deoptimization::Reason_uninitialized,
428 Deoptimization::Action_reinterpret,
429 nullptr);
430 return;
431 }
432
433 // The original expression was of this form: new T[length0][length1]...
434 // It is often the case that the lengths are small (except the last).
435 // If that happens, use the fast 1-d creator a constant number of times.
436 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
437 int64_t expand_count = 1; // count of allocations in the expansion
438 int64_t expand_fanout = 1; // running total fanout
439 for (j = 0; j < ndimensions-1; j++) {
440 int dim_con = find_int_con(length[j], -1);
441 // To prevent overflow, we use 64-bit values. Alternatively,
442 // we could clamp dim_con like so:
443 // dim_con = MIN2(dim_con, expand_limit);
444 expand_fanout *= dim_con;
445 expand_count += expand_fanout; // count the level-J sub-arrays
446 if (dim_con <= 0
447 || dim_con > expand_limit
448 || expand_count > expand_limit) {
449 expand_count = 0;
450 break;
451 }
|