9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/objArrayKlass.hpp"
30 #include "opto/addnode.hpp"
31 #include "opto/castnode.hpp"
32 #include "opto/memnode.hpp"
33 #include "opto/parse.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
37 #include "runtime/deoptimization.hpp"
38 #include "runtime/handles.inline.hpp"
39
40 //=============================================================================
41 // Helper methods for _get* and _put* bytecodes
42 //=============================================================================
43 void Parse::do_field_access(bool is_get, bool is_field) {
44 bool will_link;
45 ciField* field = iter().get_field(will_link);
46 assert(will_link, "getfield: typeflow responsibility");
47
48 ciInstanceKlass* field_holder = field->holder();
49
50 if (is_field == field->is_static()) {
51 // Interpreter will throw java_lang_IncompatibleClassChangeError
52 // Check this before allowing <clinit> methods to access static fields
53 uncommon_trap(Deoptimization::Reason_unhandled,
54 Deoptimization::Action_none);
55 return;
56 }
57
58 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
59 if (!is_get && field->is_call_site_target() &&
60 !(method()->holder() == field_holder && method()->is_object_initializer())) {
61 uncommon_trap(Deoptimization::Reason_unhandled,
62 Deoptimization::Action_reinterpret,
63 NULL, "put to call site target field");
64 return;
65 }
66
67 if (C->needs_clinit_barrier(field, method())) {
68 clinit_barrier(field_holder, method());
69 if (stopped()) return;
70 }
71
72 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
73
74 // Note: We do not check for an unloaded field type here any more.
75
76 // Generate code for the object pointer.
77 Node* obj;
78 if (is_field) {
79 int obj_depth = is_get ? 0 : field->type()->size();
80 obj = null_check(peek(obj_depth));
81 // Compile-time detect of null-exception?
82 if (stopped()) return;
83
84 #ifdef ASSERT
85 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
86 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
87 #endif
88
89 if (is_get) {
90 (void) pop(); // pop receiver before getting
91 do_get_xxx(obj, field, is_field);
92 } else {
93 do_put_xxx(obj, field, is_field);
94 (void) pop(); // pop receiver after putting
95 }
96 } else {
97 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
98 obj = _gvn.makecon(tip);
99 if (is_get) {
100 do_get_xxx(obj, field, is_field);
101 } else {
102 do_put_xxx(obj, field, is_field);
103 }
104 }
105 }
106
107
108 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
109 BasicType bt = field->layout_type();
110
111 // Does this field have a constant value? If so, just push the value.
112 if (field->is_constant() &&
113 // Keep consistent with types found by ciTypeFlow: for an
114 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
115 // speculates the field is null. The code in the rest of this
116 // method does the same. We must not bypass it and use a non
117 // null constant here.
118 (bt != T_OBJECT || field->type()->is_loaded())) {
119 // final or stable field
120 Node* con = make_constant_from_field(field, obj);
121 if (con != NULL) {
122 push_node(field->layout_type(), con);
123 return;
124 }
125 }
126
127 ciType* field_klass = field->type();
128 bool is_vol = field->is_volatile();
129
130 // Compute address and memory type.
131 int offset = field->offset_in_bytes();
132 const TypePtr* adr_type = C->alias_type(field)->adr_type();
133 Node *adr = basic_plus_adr(obj, obj, offset);
134
135 // Build the resultant type of the load
136 const Type *type;
137
138 bool must_assert_null = false;
139
140 DecoratorSet decorators = IN_HEAP;
141 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
142
143 bool is_obj = is_reference_type(bt);
144
145 if (is_obj) {
146 if (!field->type()->is_loaded()) {
147 type = TypeInstPtr::BOTTOM;
148 must_assert_null = true;
149 } else if (field->is_static_constant()) {
150 // This can happen if the constant oop is non-perm.
151 ciObject* con = field->constant_value().as_object();
152 // Do not "join" in the previous type; it doesn't add value,
153 // and may yield a vacuous result if the field is of interface type.
154 if (con->is_null_object()) {
155 type = TypePtr::NULL_PTR;
156 } else {
157 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
158 }
159 assert(type != NULL, "field singleton type must be consistent");
160 } else {
161 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
162 }
163 } else {
164 type = Type::get_const_basic_type(bt);
165 }
166
167 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
168
169 // Adjust Java stack
170 if (type2size[bt] == 1)
171 push(ld);
172 else
173 push_pair(ld);
174
175 if (must_assert_null) {
176 // Do not take a trap here. It's possible that the program
177 // will never load the field's class, and will happily see
178 // null values in this field forever. Don't stumble into a
179 // trap for such a program, or we might get a long series
180 // of useless recompilations. (Or, we might load a class
181 // which should not be loaded.) If we ever see a non-null
182 // value, we will then trap and recompile. (The trap will
183 // not need to mention the class index, since the class will
184 // already have been loaded if we ever see a non-null value.)
185 // uncommon_trap(iter().get_field_signature_index());
186 if (PrintOpto && (Verbose || WizardMode)) {
187 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
188 }
189 if (C->log() != NULL) {
190 C->log()->elem("assert_null reason='field' klass='%d'",
191 C->log()->identify(field->type()));
192 }
193 // If there is going to be a trap, put it at the next bytecode:
194 set_bci(iter().next_bci());
195 null_assert(peek());
196 set_bci(iter().cur_bci()); // put it back
197 }
198 }
199
200 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
201 bool is_vol = field->is_volatile();
202
203 // Compute address and memory type.
204 int offset = field->offset_in_bytes();
205 const TypePtr* adr_type = C->alias_type(field)->adr_type();
206 Node* adr = basic_plus_adr(obj, obj, offset);
207 BasicType bt = field->layout_type();
208 // Value to be stored
209 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
210
211 DecoratorSet decorators = IN_HEAP;
212 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
213
214 bool is_obj = is_reference_type(bt);
215
216 // Store the value.
217 const Type* field_type;
218 if (!field->type()->is_loaded()) {
219 field_type = TypeInstPtr::BOTTOM;
220 } else {
221 if (is_obj) {
222 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
223 } else {
224 field_type = Type::BOTTOM;
225 }
226 }
227 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
228
229 if (is_field) {
230 // Remember we wrote a volatile field.
231 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
232 // in constructors which have such stores. See do_exits() in parse1.cpp.
233 if (is_vol) {
234 set_wrote_volatile(true);
235 }
236 set_wrote_fields(true);
237
238 // If the field is final, the rules of Java say we are in <init> or <clinit>.
239 // Note the presence of writes to final non-static fields, so that we
240 // can insert a memory barrier later on to keep the writes from floating
241 // out of the constructor.
242 // Any method can write a @Stable field; insert memory barriers after those also.
243 if (field->is_final()) {
244 set_wrote_final(true);
245 if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
246 // Preserve allocation ptr to create precedent edge to it in membar
247 // generated on exit from constructor.
248 // Can't bind stable with its allocation, only record allocation for final field.
249 set_alloc_with_final(obj);
250 }
251 }
252 if (field->is_stable()) {
253 set_wrote_stable(true);
254 }
255 }
256 }
257
258 //=============================================================================
259 void Parse::do_anewarray() {
260 bool will_link;
261 ciKlass* klass = iter().get_klass(will_link);
262
263 // Uncommon Trap when class that array contains is not loaded
264 // we need the loaded class for the rest of graph; do not
265 // initialize the container class (see Java spec)!!!
266 assert(will_link, "anewarray: typeflow responsibility");
267
268 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
269 // Check that array_klass object is loaded
270 if (!array_klass->is_loaded()) {
271 // Generate uncommon_trap for unloaded array_class
272 uncommon_trap(Deoptimization::Reason_unloaded,
273 Deoptimization::Action_reinterpret,
274 array_klass);
275 return;
276 }
277
278 kill_dead_locals();
279
280 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
281 Node* count_val = pop();
282 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
283 push(obj);
284 }
285
286
287 void Parse::do_newarray(BasicType elem_type) {
288 kill_dead_locals();
289
290 Node* count_val = pop();
291 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
292 Node* obj = new_array(makecon(array_klass), count_val, 1);
293 // Push resultant oop onto stack
294 push(obj);
295 }
316 }
317 return array;
318 }
319
320 void Parse::do_multianewarray() {
321 int ndimensions = iter().get_dimensions();
322
323 // the m-dimensional array
324 bool will_link;
325 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
326 assert(will_link, "multianewarray: typeflow responsibility");
327
328 // Note: Array classes are always initialized; no is_initialized check.
329
330 kill_dead_locals();
331
332 // get the lengths from the stack (first dimension is on top)
333 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
334 length[ndimensions] = NULL; // terminating null for make_runtime_call
335 int j;
336 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
337
338 // The original expression was of this form: new T[length0][length1]...
339 // It is often the case that the lengths are small (except the last).
340 // If that happens, use the fast 1-d creator a constant number of times.
341 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
342 int expand_count = 1; // count of allocations in the expansion
343 int expand_fanout = 1; // running total fanout
344 for (j = 0; j < ndimensions-1; j++) {
345 int dim_con = find_int_con(length[j], -1);
346 expand_fanout *= dim_con;
347 expand_count += expand_fanout; // count the level-J sub-arrays
348 if (dim_con <= 0
349 || dim_con > expand_limit
350 || expand_count > expand_limit) {
351 expand_count = 0;
352 break;
353 }
354 }
355
356 // Can use multianewarray instead of [a]newarray if only one dimension,
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "interpreter/linkResolver.hpp"
28 #include "memory/universe.hpp"
29 #include "oops/flatArrayKlass.hpp"
30 #include "oops/objArrayKlass.hpp"
31 #include "opto/addnode.hpp"
32 #include "opto/castnode.hpp"
33 #include "opto/inlinetypenode.hpp"
34 #include "opto/memnode.hpp"
35 #include "opto/parse.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
38 #include "opto/subnode.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/handles.inline.hpp"
41
42 //=============================================================================
43 // Helper methods for _get* and _put* bytecodes
44 //=============================================================================
45
46 void Parse::do_field_access(bool is_get, bool is_field) {
47 bool will_link;
48 ciField* field = iter().get_field(will_link);
49 assert(will_link, "getfield: typeflow responsibility");
50
51 ciInstanceKlass* field_holder = field->holder();
52
53 if (is_field && field_holder->is_inlinetype() && peek()->is_InlineTypeBase()) {
54 assert(is_get, "inline type field store not supported");
55 InlineTypeBaseNode* vt = peek()->as_InlineTypeBase();
56 null_check(vt);
57 pop();
58 Node* value = vt->field_value_by_offset(field->offset());
59 push_node(field->layout_type(), value);
60 return;
61 }
62
63 if (is_field == field->is_static()) {
64 // Interpreter will throw java_lang_IncompatibleClassChangeError
65 // Check this before allowing <clinit> methods to access static fields
66 uncommon_trap(Deoptimization::Reason_unhandled,
67 Deoptimization::Action_none);
68 return;
69 }
70
71 // Deoptimize on putfield writes to call site target field outside of CallSite ctor.
72 if (!is_get && field->is_call_site_target() &&
73 !(method()->holder() == field_holder && method()->is_object_constructor())) {
74 uncommon_trap(Deoptimization::Reason_unhandled,
75 Deoptimization::Action_reinterpret,
76 NULL, "put to call site target field");
77 return;
78 }
79
80 if (C->needs_clinit_barrier(field, method())) {
81 clinit_barrier(field_holder, method());
82 if (stopped()) return;
83 }
84
85 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
86
87 // Note: We do not check for an unloaded field type here any more.
88
89 // Generate code for the object pointer.
90 Node* obj;
91 if (is_field) {
92 int obj_depth = is_get ? 0 : field->type()->size();
93 obj = null_check(peek(obj_depth));
94 // Compile-time detect of null-exception?
95 if (stopped()) return;
96
97 #ifdef ASSERT
98 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
99 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
100 #endif
101
102 if (is_get) {
103 (void) pop(); // pop receiver before getting
104 do_get_xxx(obj, field);
105 } else {
106 do_put_xxx(obj, field, is_field);
107 if (stopped()) {
108 return;
109 }
110 (void) pop(); // pop receiver after putting
111 }
112 } else {
113 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
114 obj = _gvn.makecon(tip);
115 if (is_get) {
116 do_get_xxx(obj, field);
117 } else {
118 do_put_xxx(obj, field, is_field);
119 }
120 }
121 }
122
123 void Parse::do_get_xxx(Node* obj, ciField* field) {
124 BasicType bt = field->layout_type();
125 // Does this field have a constant value? If so, just push the value.
126 if (field->is_constant() && !field->is_flattened() &&
127 // Keep consistent with types found by ciTypeFlow: for an
128 // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
129 // speculates the field is null. The code in the rest of this
130 // method does the same. We must not bypass it and use a non
131 // null constant here.
132 (bt != T_OBJECT || field->type()->is_loaded())) {
133 // final or stable field
134 Node* con = make_constant_from_field(field, obj);
135 if (con != NULL) {
136 push_node(field->layout_type(), con);
137 return;
138 }
139 }
140
141 ciType* field_klass = field->type();
142 int offset = field->offset_in_bytes();
143 bool must_assert_null = false;
144
145 Node* ld = NULL;
146 if (field->is_null_free() && field_klass->as_inline_klass()->is_empty()) {
147 // Loading from a field of an empty inline type. Just return the default instance.
148 ld = InlineTypeNode::make_default(_gvn, field_klass->as_inline_klass());
149 } else if (field->is_flattened()) {
150 // Loading from a flattened inline type field.
151 ld = InlineTypeNode::make_from_flattened(this, field_klass->as_inline_klass(), obj, obj, field->holder(), offset);
152 } else {
153 // Build the resultant type of the load
154 const Type* type;
155 if (is_reference_type(bt)) {
156 if (!field_klass->is_loaded()) {
157 type = TypeInstPtr::BOTTOM;
158 must_assert_null = true;
159 } else if (field->is_static_constant()) {
160 // This can happen if the constant oop is non-perm.
161 ciObject* con = field->constant_value().as_object();
162 // Do not "join" in the previous type; it doesn't add value,
163 // and may yield a vacuous result if the field is of interface type.
164 if (con->is_null_object()) {
165 type = TypePtr::NULL_PTR;
166 } else {
167 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
168 }
169 assert(type != NULL, "field singleton type must be consistent");
170 } else {
171 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
172 if (field->is_null_free() && field->is_static()) {
173 // Check if static inline type field is already initialized
174 ciInstance* mirror = field->holder()->java_mirror();
175 ciObject* val = mirror->field_value(field).as_object();
176 if (!val->is_null_object()) {
177 type = type->join_speculative(TypePtr::NOTNULL);
178 }
179 }
180 }
181 } else {
182 type = Type::get_const_basic_type(bt);
183 }
184 Node* adr = basic_plus_adr(obj, obj, offset);
185 const TypePtr* adr_type = C->alias_type(field)->adr_type();
186 DecoratorSet decorators = IN_HEAP;
187 decorators |= field->is_volatile() ? MO_SEQ_CST : MO_UNORDERED;
188 ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
189 if (field_klass->is_inlinetype()) {
190 // Load a non-flattened inline type from memory
191 ld = InlineTypeNode::make_from_oop(this, ld, field_klass->as_inline_klass(), field->is_null_free());
192 }
193 }
194
195 // Adjust Java stack
196 if (type2size[bt] == 1)
197 push(ld);
198 else
199 push_pair(ld);
200
201 if (must_assert_null) {
202 // Do not take a trap here. It's possible that the program
203 // will never load the field's class, and will happily see
204 // null values in this field forever. Don't stumble into a
205 // trap for such a program, or we might get a long series
206 // of useless recompilations. (Or, we might load a class
207 // which should not be loaded.) If we ever see a non-null
208 // value, we will then trap and recompile. (The trap will
209 // not need to mention the class index, since the class will
210 // already have been loaded if we ever see a non-null value.)
211 // uncommon_trap(iter().get_field_signature_index());
212 if (PrintOpto && (Verbose || WizardMode)) {
213 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
214 }
215 if (C->log() != NULL) {
216 C->log()->elem("assert_null reason='field' klass='%d'",
217 C->log()->identify(field_klass));
218 }
219 // If there is going to be a trap, put it at the next bytecode:
220 set_bci(iter().next_bci());
221 null_assert(peek());
222 set_bci(iter().cur_bci()); // put it back
223 }
224 }
225
226 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
227 bool is_vol = field->is_volatile();
228 int offset = field->offset_in_bytes();
229 BasicType bt = field->layout_type();
230 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
231
232 assert(!field->is_null_free() || val->is_InlineType() || !gvn().type(val)->maybe_null(), "Null store to inline type field");
233 if (field->is_null_free() && field->type()->as_inline_klass()->is_empty()) {
234 // Storing to a field of an empty inline type. Ignore.
235 return;
236 } else if (field->is_flattened()) {
237 // Storing to a flattened inline type field.
238 if (!val->is_InlineType()) {
239 val = InlineTypeNode::make_from_oop(this, val, field->type()->as_inline_klass());
240 }
241 inc_sp(1);
242 val->as_InlineTypeBase()->store_flattened(this, obj, obj, field->holder(), offset);
243 dec_sp(1);
244 } else {
245 // Store the value.
246 const Type* field_type;
247 if (!field->type()->is_loaded()) {
248 field_type = TypeInstPtr::BOTTOM;
249 } else {
250 if (is_reference_type(bt)) {
251 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
252 } else {
253 field_type = Type::BOTTOM;
254 }
255 }
256 Node* adr = basic_plus_adr(obj, obj, offset);
257 const TypePtr* adr_type = C->alias_type(field)->adr_type();
258 DecoratorSet decorators = IN_HEAP;
259 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
260 inc_sp(1);
261 access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
262 dec_sp(1);
263 }
264
265 if (is_field) {
266 // Remember we wrote a volatile field.
267 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
268 // in constructors which have such stores. See do_exits() in parse1.cpp.
269 if (is_vol) {
270 set_wrote_volatile(true);
271 }
272 set_wrote_fields(true);
273
274 // If the field is final, the rules of Java say we are in <init> or <clinit>.
275 // Note the presence of writes to final non-static fields, so that we
276 // can insert a memory barrier later on to keep the writes from floating
277 // out of the constructor.
278 // Any method can write a @Stable field; insert memory barriers after those also.
279 if (field->is_final()) {
280 set_wrote_final(true);
281 if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
282 // Preserve allocation ptr to create precedent edge to it in membar
283 // generated on exit from constructor.
284 // Can't bind stable with its allocation, only record allocation for final field.
285 set_alloc_with_final(obj);
286 }
287 }
288 if (field->is_stable()) {
289 set_wrote_stable(true);
290 }
291 }
292 }
293
294 //=============================================================================
295
296 void Parse::do_newarray() {
297 bool will_link;
298 ciKlass* klass = iter().get_klass(will_link);
299 bool null_free = iter().has_Q_signature();
300
301 // Uncommon Trap when class that array contains is not loaded
302 // we need the loaded class for the rest of graph; do not
303 // initialize the container class (see Java spec)!!!
304 assert(will_link, "newarray: typeflow responsibility");
305
306 ciArrayKlass* array_klass = ciArrayKlass::make(klass, null_free);
307
308 // Check that array_klass object is loaded
309 if (!array_klass->is_loaded()) {
310 // Generate uncommon_trap for unloaded array_class
311 uncommon_trap(Deoptimization::Reason_unloaded,
312 Deoptimization::Action_reinterpret,
313 array_klass);
314 return;
315 } else if (array_klass->element_klass() != NULL &&
316 array_klass->element_klass()->is_inlinetype() &&
317 !array_klass->element_klass()->as_inline_klass()->is_initialized()) {
318 uncommon_trap(Deoptimization::Reason_uninitialized,
319 Deoptimization::Action_reinterpret,
320 NULL);
321 return;
322 }
323
324 kill_dead_locals();
325
326 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
327 Node* count_val = pop();
328 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
329 push(obj);
330 }
331
332
333 void Parse::do_newarray(BasicType elem_type) {
334 kill_dead_locals();
335
336 Node* count_val = pop();
337 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
338 Node* obj = new_array(makecon(array_klass), count_val, 1);
339 // Push resultant oop onto stack
340 push(obj);
341 }
362 }
363 return array;
364 }
365
366 void Parse::do_multianewarray() {
367 int ndimensions = iter().get_dimensions();
368
369 // the m-dimensional array
370 bool will_link;
371 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
372 assert(will_link, "multianewarray: typeflow responsibility");
373
374 // Note: Array classes are always initialized; no is_initialized check.
375
376 kill_dead_locals();
377
378 // get the lengths from the stack (first dimension is on top)
379 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
380 length[ndimensions] = NULL; // terminating null for make_runtime_call
381 int j;
382 ciKlass* elem_klass = array_klass;
383 for (j = ndimensions-1; j >= 0; j--) {
384 length[j] = pop();
385 elem_klass = elem_klass->as_array_klass()->element_klass();
386 }
387 if (elem_klass != NULL && elem_klass->is_inlinetype() && !elem_klass->as_inline_klass()->is_initialized()) {
388 inc_sp(ndimensions);
389 uncommon_trap(Deoptimization::Reason_uninitialized,
390 Deoptimization::Action_reinterpret,
391 NULL);
392 return;
393 }
394
395 // The original expression was of this form: new T[length0][length1]...
396 // It is often the case that the lengths are small (except the last).
397 // If that happens, use the fast 1-d creator a constant number of times.
398 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
399 int expand_count = 1; // count of allocations in the expansion
400 int expand_fanout = 1; // running total fanout
401 for (j = 0; j < ndimensions-1; j++) {
402 int dim_con = find_int_con(length[j], -1);
403 expand_fanout *= dim_con;
404 expand_count += expand_fanout; // count the level-J sub-arrays
405 if (dim_con <= 0
406 || dim_con > expand_limit
407 || expand_count > expand_limit) {
408 expand_count = 0;
409 break;
410 }
411 }
412
413 // Can use multianewarray instead of [a]newarray if only one dimension,
|