9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/memnode.hpp"
33 #include "opto/parse.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
37 #include "runtime/deoptimization.hpp"
38 #include "runtime/handles.inline.hpp"
39
40 //=============================================================================
41 // Helper methods for _get* and _put* bytecodes
42 //=============================================================================
43 void Parse::do_field_access(bool is_get, bool is_field) {
44 bool will_link;
45 ciField* field = iter().get_field(will_link);
46 assert(will_link, "getfield: typeflow responsibility");
47
48 ciInstanceKlass* field_holder = field->holder();
49
50 if (is_field == field->is_static()) {
51 // Interpreter will throw java_lang_IncompatibleClassChangeError
52 // Check this before allowing <clinit> methods to access static fields
53 uncommon_trap(Deoptimization::Reason_unhandled,
54 Deoptimization::Action_none);
55 return;
56 }
57
58 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
59 if (!is_get && field->is_call_site_target() &&
60 !(method()->holder() == field_holder && method()->is_object_initializer())) {
61 uncommon_trap(Deoptimization::Reason_unhandled,
62 Deoptimization::Action_reinterpret,
63 nullptr, "put to call site target field");
64 return;
65 }
66
67 if (C->needs_clinit_barrier(field, method())) {
68 clinit_barrier(field_holder, method());
69 if (stopped()) return;
70 }
71
72 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
73
74 // Note: We do not check for an unloaded field type here any more.
75
76 // Generate code for the object pointer.
77 Node* obj;
78 if (is_field) {
79 int obj_depth = is_get ? 0 : field->type()->size();
80 obj = null_check(peek(obj_depth));
81 // Compile-time detect of null-exception?
82 if (stopped()) return;
83
84 #ifdef ASSERT
85 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
86 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
87 #endif
88
89 if (is_get) {
90 (void) pop(); // pop receiver before getting
91 do_get_xxx(obj, field, is_field);
92 } else {
93 do_put_xxx(obj, field, is_field);
94 (void) pop(); // pop receiver after putting
95 }
96 } else {
97 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
98 obj = _gvn.makecon(tip);
99 if (is_get) {
100 do_get_xxx(obj, field, is_field);
101 } else {
102 do_put_xxx(obj, field, is_field);
103 }
104 }
105 }
106
107
108 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
109 BasicType bt = field->layout_type();
110
111 // Does this field have a constant value? If so, just push the value.
112 if (field->is_constant() &&
113 // Keep consistent with types found by ciTypeFlow: for an
114 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
115 // speculates the field is null. The code in the rest of this
116 // method does the same. We must not bypass it and use a non
117 // null constant here.
118 (bt != T_OBJECT || field->type()->is_loaded())) {
119 // final or stable field
120 Node* con = make_constant_from_field(field, obj);
121 if (con != nullptr) {
122 push_node(field->layout_type(), con);
123 return;
124 }
125 }
126
127 ciType* field_klass = field->type();
128 bool is_vol = field->is_volatile();
129
130 // Compute address and memory type.
131 int offset = field->offset_in_bytes();
132 const TypePtr* adr_type = C->alias_type(field)->adr_type();
133 Node *adr = basic_plus_adr(obj, obj, offset);
134
135 // Build the resultant type of the load
136 const Type *type;
137
138 bool must_assert_null = false;
139
140 DecoratorSet decorators = IN_HEAP;
141 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
142
143 bool is_obj = is_reference_type(bt);
144
145 if (is_obj) {
146 if (!field->type()->is_loaded()) {
147 type = TypeInstPtr::BOTTOM;
148 must_assert_null = true;
149 } else if (field->is_static_constant()) {
150 // This can happen if the constant oop is non-perm.
151 ciObject* con = field->constant_value().as_object();
152 // Do not "join" in the previous type; it doesn't add value,
153 // and may yield a vacuous result if the field is of interface type.
154 if (con->is_null_object()) {
155 type = TypePtr::NULL_PTR;
156 } else {
157 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
158 }
159 assert(type != nullptr, "field singleton type must be consistent");
160 } else {
161 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
162 }
163 } else {
164 type = Type::get_const_basic_type(bt);
165 }
166
167 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
168
169 // Adjust Java stack
170 if (type2size[bt] == 1)
171 push(ld);
172 else
173 push_pair(ld);
174
175 if (must_assert_null) {
176 // Do not take a trap here. It's possible that the program
177 // will never load the field's class, and will happily see
178 // null values in this field forever. Don't stumble into a
179 // trap for such a program, or we might get a long series
180 // of useless recompilations. (Or, we might load a class
181 // which should not be loaded.) If we ever see a non-null
182 // value, we will then trap and recompile. (The trap will
183 // not need to mention the class index, since the class will
184 // already have been loaded if we ever see a non-null value.)
185 // uncommon_trap(iter().get_field_signature_index());
186 if (PrintOpto && (Verbose || WizardMode)) {
187 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
188 }
189 if (C->log() != nullptr) {
190 C->log()->elem("assert_null reason='field' klass='%d'",
191 C->log()->identify(field->type()));
192 }
193 // If there is going to be a trap, put it at the next bytecode:
194 set_bci(iter().next_bci());
195 null_assert(peek());
196 set_bci(iter().cur_bci()); // put it back
197 }
198 }
199
200 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
201 bool is_vol = field->is_volatile();
202
203 // Compute address and memory type.
204 int offset = field->offset_in_bytes();
205 const TypePtr* adr_type = C->alias_type(field)->adr_type();
206 Node* adr = basic_plus_adr(obj, obj, offset);
207 BasicType bt = field->layout_type();
208 // Value to be stored
209 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
210
211 DecoratorSet decorators = IN_HEAP;
212 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
213
214 bool is_obj = is_reference_type(bt);
215
216 // Store the value.
217 const Type* field_type;
218 if (!field->type()->is_loaded()) {
219 field_type = TypeInstPtr::BOTTOM;
220 } else {
221 if (is_obj) {
222 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
223 } else {
224 field_type = Type::BOTTOM;
225 }
226 }
227 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
228
229 if (is_field) {
230 // Remember we wrote a volatile field.
231 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
232 // in constructors which have such stores. See do_exits() in parse1.cpp.
233 if (is_vol) {
234 set_wrote_volatile(true);
235 }
236 set_wrote_fields(true);
237
238 // If the field is final, the rules of Java say we are in <init> or <clinit>.
239 // Note the presence of writes to final non-static fields, so that we
240 // can insert a memory barrier later on to keep the writes from floating
241 // out of the constructor.
242 // Any method can write a @Stable field; insert memory barriers after those also.
243 if (field->is_final()) {
244 set_wrote_final(true);
245 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
246 // Preserve allocation ptr to create precedent edge to it in membar
247 // generated on exit from constructor.
248 // Can't bind stable with its allocation, only record allocation for final field.
249 set_alloc_with_final(obj);
250 }
251 }
252 if (field->is_stable()) {
253 set_wrote_stable(true);
254 }
255 }
256 }
257
258 //=============================================================================
259 void Parse::do_anewarray() {
260 bool will_link;
261 ciKlass* klass = iter().get_klass(will_link);
262
263 // Uncommon Trap when class that array contains is not loaded
264 // we need the loaded class for the rest of graph; do not
265 // initialize the container class (see Java spec)!!!
266 assert(will_link, "anewarray: typeflow responsibility");
267
268 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
269 // Check that array_klass object is loaded
270 if (!array_klass->is_loaded()) {
271 // Generate uncommon_trap for unloaded array_class
272 uncommon_trap(Deoptimization::Reason_unloaded,
273 Deoptimization::Action_reinterpret,
274 array_klass);
275 return;
276 }
277
278 kill_dead_locals();
279
280 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
281 Node* count_val = pop();
282 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
283 push(obj);
284 }
285
286
287 void Parse::do_newarray(BasicType elem_type) {
288 kill_dead_locals();
289
290 Node* count_val = pop();
291 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
292 Node* obj = new_array(makecon(array_klass), count_val, 1);
293 // Push resultant oop onto stack
294 push(obj);
295 }
316 }
317 return array;
318 }
319
320 void Parse::do_multianewarray() {
321 int ndimensions = iter().get_dimensions();
322
323 // the m-dimensional array
324 bool will_link;
325 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
326 assert(will_link, "multianewarray: typeflow responsibility");
327
328 // Note: Array classes are always initialized; no is_initialized check.
329
330 kill_dead_locals();
331
332 // get the lengths from the stack (first dimension is on top)
333 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
334 length[ndimensions] = nullptr; // terminating null for make_runtime_call
335 int j;
336 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
337
338 // The original expression was of this form: new T[length0][length1]...
339 // It is often the case that the lengths are small (except the last).
340 // If that happens, use the fast 1-d creator a constant number of times.
341 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
342 int64_t expand_count = 1; // count of allocations in the expansion
343 int64_t expand_fanout = 1; // running total fanout
344 for (j = 0; j < ndimensions-1; j++) {
345 int dim_con = find_int_con(length[j], -1);
346 // To prevent overflow, we use 64-bit values. Alternatively,
347 // we could clamp dim_con like so:
348 // dim_con = MIN2(dim_con, expand_limit);
349 expand_fanout *= dim_con;
350 expand_count += expand_fanout; // count the level-J sub-arrays
351 if (dim_con <= 0
352 || dim_con > expand_limit
353 || expand_count > expand_limit) {
354 expand_count = 0;
355 break;
356 }
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/flatArrayKlass.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/inlinetypenode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/parse.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
38 #include "opto/subnode.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/handles.inline.hpp"
41
42 //=============================================================================
43 // Helper methods for _get* and _put* bytecodes
44 //=============================================================================
45
46 void Parse::do_field_access(bool is_get, bool is_field) {
47 bool will_link;
48 ciField* field = iter().get_field(will_link);
49 assert(will_link, "getfield: typeflow responsibility");
50
51 ciInstanceKlass* field_holder = field->holder();
52
53 if (is_get && is_field && field_holder->is_inlinetype() && peek()->is_InlineType()) {
54 InlineTypeNode* vt = peek()->as_InlineType();
55 null_check(vt);
56 Node* value = vt->field_value_by_offset(field->offset_in_bytes());
57 if (value->is_InlineType()) {
58 value = value->as_InlineType()->adjust_scalarization_depth(this);
59 }
60 pop();
61 push_node(field->layout_type(), value);
62 return;
63 }
64
65 if (is_field == field->is_static()) {
66 // Interpreter will throw java_lang_IncompatibleClassChangeError
67 // Check this before allowing <clinit> methods to access static fields
68 uncommon_trap(Deoptimization::Reason_unhandled,
69 Deoptimization::Action_none);
70 return;
71 }
72
73 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
74 if (!is_get && field->is_call_site_target() &&
75 !(method()->holder() == field_holder && method()->is_object_constructor())) {
76 uncommon_trap(Deoptimization::Reason_unhandled,
77 Deoptimization::Action_reinterpret,
78 nullptr, "put to call site target field");
79 return;
80 }
81
82 if (C->needs_clinit_barrier(field, method())) {
83 clinit_barrier(field_holder, method());
84 if (stopped()) return;
85 }
86
87 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
88
89 // Note: We do not check for an unloaded field type here any more.
90
91 // Generate code for the object pointer.
92 Node* obj;
93 if (is_field) {
94 int obj_depth = is_get ? 0 : field->type()->size();
95 obj = null_check(peek(obj_depth));
96 // Compile-time detect of null-exception?
97 if (stopped()) return;
98
99 #ifdef ASSERT
100 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
101 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
102 #endif
103
104 if (is_get) {
105 (void) pop(); // pop receiver before getting
106 do_get_xxx(obj, field);
107 } else {
108 do_put_xxx(obj, field, is_field);
109 if (stopped()) {
110 return;
111 }
112 (void) pop(); // pop receiver after putting
113 }
114 } else {
115 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
116 obj = _gvn.makecon(tip);
117 if (is_get) {
118 do_get_xxx(obj, field);
119 } else {
120 do_put_xxx(obj, field, is_field);
121 }
122 }
123 }
124
125 void Parse::do_get_xxx(Node* obj, ciField* field) {
126 BasicType bt = field->layout_type();
127 // Does this field have a constant value? If so, just push the value.
128 if (field->is_constant() && !field->is_flat() &&
129 // Keep consistent with types found by ciTypeFlow: for an
130 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
131 // speculates the field is null. The code in the rest of this
132 // method does the same. We must not bypass it and use a non
133 // null constant here.
134 (bt != T_OBJECT || field->type()->is_loaded())) {
135 // final or stable field
136 Node* con = make_constant_from_field(field, obj);
137 if (con != nullptr) {
138 push_node(field->layout_type(), con);
139 return;
140 }
141 }
142
143 ciType* field_klass = field->type();
144 field_klass = improve_abstract_inline_type_klass(field_klass);
145 int offset = field->offset_in_bytes();
146 bool must_assert_null = false;
147
148 Node* ld = nullptr;
149 if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) {
150 // Loading from a field of an empty inline type. Just return the default instance.
151 ld = InlineTypeNode::make_default(_gvn, field_klass->as_inline_klass());
152 } else if (field->is_flat()) {
153 // Loading from a flat inline type field.
154 ld = InlineTypeNode::make_from_flat(this, field_klass->as_inline_klass(), obj, obj, field->holder(), offset);
155 } else {
156 // Build the resultant type of the load
157 const Type* type;
158 if (is_reference_type(bt)) {
159 if (!field_klass->is_loaded()) {
160 type = TypeInstPtr::BOTTOM;
161 must_assert_null = true;
162 } else if (field->is_static_constant()) {
163 // This can happen if the constant oop is non-perm.
164 ciObject* con = field->constant_value().as_object();
165 // Do not "join" in the previous type; it doesn't add value,
166 // and may yield a vacuous result if the field is of interface type.
167 if (con->is_null_object()) {
168 type = TypePtr::NULL_PTR;
169 } else {
170 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
171 }
172 assert(type != nullptr, "field singleton type must be consistent");
173 } else {
174 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
175 if (field->is_null_free() && field->is_static()) {
176 // Check if static inline type field is already initialized
177 ciInstance* mirror = field->holder()->java_mirror();
178 ciObject* val = mirror->field_value(field).as_object();
179 if (!val->is_null_object()) {
180 type = type->join_speculative(TypePtr::NOTNULL);
181 }
182 }
183 }
184 } else {
185 type = Type::get_const_basic_type(bt);
186 }
187 Node* adr = basic_plus_adr(obj, obj, offset);
188 const TypePtr* adr_type = C->alias_type(field)->adr_type();
189 DecoratorSet decorators = IN_HEAP;
190 decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
191 ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
192 if (field_klass->is_inlinetype()) {
193 // Load a non-flattened inline type from memory
194 ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass(), field->is_null_free());
195 }
196 }
197
198 // Adjust Java stack
199 if (type2size[bt] == 1)
200 push(ld);
201 else
202 push_pair(ld);
203
204 if (must_assert_null) {
205 // Do not take a trap here. It's possible that the program
206 // will never load the field's class, and will happily see
207 // null values in this field forever. Don't stumble into a
208 // trap for such a program, or we might get a long series
209 // of useless recompilations. (Or, we might load a class
210 // which should not be loaded.) If we ever see a non-null
211 // value, we will then trap and recompile. (The trap will
212 // not need to mention the class index, since the class will
213 // already have been loaded if we ever see a non-null value.)
214 // uncommon_trap(iter().get_field_signature_index());
215 if (PrintOpto && (Verbose || WizardMode)) {
216 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
217 }
218 if (C->log() != nullptr) {
219 C->log()->elem("assert_null reason='field' klass='%d'",
220 C->log()->identify(field_klass));
221 }
222 // If there is going to be a trap, put it at the next bytecode:
223 set_bci(iter().next_bci());
224 null_assert(peek());
225 set_bci(iter().cur_bci()); // put it back
226 }
227 }
228
229 // If the field klass is an abstract value klass (for which we do not know the layout, yet), it could have a unique
230 // concrete sub klass for which we have a fixed layout. This allows us to use InlineTypeNodes instead.
231 ciType* Parse::improve_abstract_inline_type_klass(ciType* field_klass) {
232 Dependencies* dependencies = C->dependencies();
233 if (UseUniqueSubclasses && dependencies != nullptr && field_klass->is_instance_klass()) {
234 ciInstanceKlass* instance_klass = field_klass->as_instance_klass();
235 if (instance_klass->is_loaded() && instance_klass->is_abstract_value_klass()) {
236 ciInstanceKlass* sub_klass = instance_klass->unique_concrete_subklass();
237 if (sub_klass != nullptr && sub_klass != field_klass) {
238 field_klass = sub_klass;
239 dependencies->assert_abstract_with_unique_concrete_subtype(instance_klass, sub_klass);
240 }
241 }
242 }
243 return field_klass;
244 }
245
246 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
247 bool is_vol = field->is_volatile();
248 int offset = field->offset_in_bytes();
249 BasicType bt = field->layout_type();
250 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
251
252 if (obj->is_InlineType()) {
253 // TODO 8325106 Factor into own method
254 // TODO 8325106 Assert that we only do this in the constructor and align with checks in ::do_call
255 //if (_method->is_object_constructor() && _method->holder()->is_inlinetype()) {
256 assert(obj->as_InlineType()->is_larval(), "must be larval");
257
258 // TODO 8325106 Assert that holder is null-free
259 /*
260 int holder_depth = field->type()->size();
261 null_check(peek(holder_depth));
262 if (stopped()) {
263 return;
264 }
265 */
266
267 if (field->is_null_free()) {
268 PreserveReexecuteState preexecs(this);
269 jvms()->set_should_reexecute(true);
270 inc_sp(1);
271 val = null_check(val);
272 if (stopped()) {
273 return;
274 }
275 }
276 if (!val->is_InlineType() && field->type()->is_inlinetype()) {
277 // Scalarize inline type field value
278 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass(), field->is_null_free());
279 } else if (val->is_InlineType() && !field->is_flat()) {
280 // Field value needs to be allocated because it can be merged with an oop.
281 // Re-execute if buffering triggers deoptimization.
282 PreserveReexecuteState preexecs(this);
283 jvms()->set_should_reexecute(true);
284 inc_sp(1);
285 val = val->as_InlineType()->buffer(this);
286 }
287
288 // Clone the inline type node and set the new field value
289 InlineTypeNode* new_vt = obj->as_InlineType()->clone_if_required(&_gvn, _map);
290 new_vt->set_field_value_by_offset(field->offset_in_bytes(), val);
291 {
292 PreserveReexecuteState preexecs(this);
293 jvms()->set_should_reexecute(true);
294 inc_sp(1);
295 new_vt = new_vt->adjust_scalarization_depth(this);
296 }
297
298 // TODO 8325106 Double check and explain these checks
299 if ((!_caller->has_method() || C->inlining_incrementally() || _caller->method()->is_object_constructor()) && new_vt->is_allocated(&gvn())) {
300 assert(new_vt->as_InlineType()->is_allocated(&gvn()), "must be buffered");
301 // We need to store to the buffer
302 // TODO 8325106 looks like G1BarrierSetC2::g1_can_remove_pre_barrier is not strong enough to remove the pre barrier
303 // TODO is it really guaranteed that the preval is null?
304 new_vt->store(this, new_vt->get_oop(), new_vt->get_oop(), new_vt->bottom_type()->inline_klass(), 0, C2_TIGHTLY_COUPLED_ALLOC | IN_HEAP | MO_UNORDERED, field->offset_in_bytes());
305
306 // Preserve allocation ptr to create precedent edge to it in membar
307 // generated on exit from constructor.
308 AllocateNode* alloc = AllocateNode::Ideal_allocation(new_vt->get_oop());
309 if (alloc != nullptr) {
310 set_alloc_with_final(new_vt->get_oop());
311 }
312 set_wrote_final(true);
313 }
314
315 replace_in_map(obj, _gvn.transform(new_vt));
316 return;
317 }
318
319 if (field->is_null_free()) {
320 PreserveReexecuteState preexecs(this);
321 inc_sp(1);
322 jvms()->set_should_reexecute(true);
323 val = null_check(val);
324 }
325 if (field->is_null_free() && field->type()->as_inline_klass()->is_empty()) {
326 // Storing to a field of an empty inline type. Ignore.
327 return;
328 } else if (field->is_flat()) {
329 // Storing to a flat inline type field.
330 if (!val->is_InlineType()) {
331 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass());
332 }
333 inc_sp(1);
334 val->as_InlineType()->store_flat(this, obj, obj, field->holder(), offset);
335 dec_sp(1);
336 } else {
337 // Store the value.
338 const Type* field_type;
339 if (!field->type()->is_loaded()) {
340 field_type = TypeInstPtr::BOTTOM;
341 } else {
342 if (is_reference_type(bt)) {
343 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
344 } else {
345 field_type = Type::BOTTOM;
346 }
347 }
348 Node* adr = basic_plus_adr(obj, obj, offset);
349 const TypePtr* adr_type = C->alias_type(field)->adr_type();
350 DecoratorSet decorators = IN_HEAP;
351 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
352 inc_sp(1);
353 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
354 dec_sp(1);
355 }
356
357 if (is_field) {
358 // Remember we wrote a volatile field.
359 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
360 // in constructors which have such stores. See do_exits() in parse1.cpp.
361 if (is_vol) {
362 set_wrote_volatile(true);
363 }
364 set_wrote_fields(true);
365
366 // If the field is final, the rules of Java say we are in <init> or <clinit>.
367 // Note the presence of writes to final non-static fields, so that we
368 // can insert a memory barrier later on to keep the writes from floating
369 // out of the constructor.
370 // Any method can write a @Stable field; insert memory barriers after those also.
371 if (field->is_final()) {
372 set_wrote_final(true);
373 if (AllocateNode::Ideal_allocation(obj) != nullptr) {
374 // Preserve allocation ptr to create precedent edge to it in membar
375 // generated on exit from constructor.
376 // Can't bind stable with its allocation, only record allocation for final field.
377 set_alloc_with_final(obj);
378 }
379 }
380 if (field->is_stable()) {
381 set_wrote_stable(true);
382 }
383 }
384 }
385
386 //=============================================================================
387
388 void Parse::do_newarray() {
389 bool will_link;
390 ciKlass* klass = iter().get_klass(will_link);
391
392 // Uncommon Trap when class that array contains is not loaded
393 // we need the loaded class for the rest of graph; do not
394 // initialize the container class (see Java spec)!!!
395 assert(will_link, "newarray: typeflow responsibility");
396
397 ciArrayKlass* array_klass = ciArrayKlass::make(klass);
398
399 // Check that array_klass object is loaded
400 if (!array_klass->is_loaded()) {
401 // Generate uncommon_trap for unloaded array_class
402 uncommon_trap(Deoptimization::Reason_unloaded,
403 Deoptimization::Action_reinterpret,
404 array_klass);
405 return;
406 } else if (array_klass->element_klass() != nullptr &&
407 array_klass->element_klass()->is_inlinetype() &&
408 !array_klass->element_klass()->as_inline_klass()->is_initialized()) {
409 uncommon_trap(Deoptimization::Reason_uninitialized,
410 Deoptimization::Action_reinterpret,
411 nullptr);
412 return;
413 }
414
415 kill_dead_locals();
416
417 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass, Type::trust_interfaces);
418 Node* count_val = pop();
419 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
420 push(obj);
421 }
422
423
424 void Parse::do_newarray(BasicType elem_type) {
425 kill_dead_locals();
426
427 Node* count_val = pop();
428 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
429 Node* obj = new_array(makecon(array_klass), count_val, 1);
430 // Push resultant oop onto stack
431 push(obj);
432 }
453 }
454 return array;
455 }
456
457 void Parse::do_multianewarray() {
458 int ndimensions = iter().get_dimensions();
459
460 // the m-dimensional array
461 bool will_link;
462 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
463 assert(will_link, "multianewarray: typeflow responsibility");
464
465 // Note: Array classes are always initialized; no is_initialized check.
466
467 kill_dead_locals();
468
469 // get the lengths from the stack (first dimension is on top)
470 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
471 length[ndimensions] = nullptr; // terminating null for make_runtime_call
472 int j;
473 ciKlass* elem_klass = array_klass;
474 for (j = ndimensions-1; j >= 0; j--) {
475 length[j] = pop();
476 elem_klass = elem_klass->as_array_klass()->element_klass();
477 }
478 if (elem_klass != nullptr && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) {
479 inc_sp(ndimensions);
480 uncommon_trap(Deoptimization::Reason_uninitialized,
481 Deoptimization::Action_reinterpret,
482 nullptr);
483 return;
484 }
485
486 // The original expression was of this form: new T[length0][length1]...
487 // It is often the case that the lengths are small (except the last).
488 // If that happens, use the fast 1-d creator a constant number of times.
489 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
490 int64_t expand_count = 1; // count of allocations in the expansion
491 int64_t expand_fanout = 1; // running total fanout
492 for (j = 0; j < ndimensions-1; j++) {
493 int dim_con = find_int_con(length[j], -1);
494 // To prevent overflow, we use 64-bit values. Alternatively,
495 // we could clamp dim_con like so:
496 // dim_con = MIN2(dim_con, expand_limit);
497 expand_fanout *= dim_con;
498 expand_count += expand_fanout; // count the level-J sub-arrays
499 if (dim_con <= 0
500 || dim_con > expand_limit
501 || expand_count > expand_limit) {
502 expand_count = 0;
503 break;
504 }
|