16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/powerOfTwo.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43
44 #ifdef ASSERT
45 #define __ gen()->lir(__FILE__, __LINE__)->
46 #else
47 #define __ gen()->lir()->
48 #endif
49
50 // Item will be loaded into a byte register; Intel only
51 void LIRItem::load_byte_item() {
52 load_item();
53 }
54
55
86 case longTag: opr = FrameMap::long0_opr; break;
87 case floatTag: opr = FrameMap::fpu0_float_opr; break;
88 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
89
90 case addressTag:
91 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
92 }
93
94 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
95 return opr;
96 }
97
98
99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
100 LIR_Opr reg = new_register(T_INT);
101 set_vreg_flag(reg, LIRGenerator::byte_reg);
102 return reg;
103 }
104
105
106 //--------- loading items into registers --------------------------------
107
108
109 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
110 if (v->type()->as_IntConstant() != NULL) {
111 return v->type()->as_IntConstant()->value() == 0L;
112 } else if (v->type()->as_LongConstant() != NULL) {
113 return v->type()->as_LongConstant()->value() == 0L;
114 } else if (v->type()->as_ObjectConstant() != NULL) {
115 return v->type()->as_ObjectConstant()->value()->is_null_object();
116 } else {
117 return false;
118 }
119 }
120
121 bool LIRGenerator::can_inline_as_constant(Value v) const {
122 // FIXME: Just a guess
123 if (v->type()->as_IntConstant() != NULL) {
124 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
125 } else if (v->type()->as_LongConstant() != NULL) {
297 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
298 LIR_Opr tmp1 = new_register(objectType);
299 LIR_Opr tmp2 = new_register(objectType);
300 LIR_Opr tmp3 = new_register(objectType);
301 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
302 }
303
304 //----------------------------------------------------------------------
305 // visitor functions
306 //----------------------------------------------------------------------
307
308 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
309 assert(x->is_pinned(),"");
310 LIRItem obj(x->obj(), this);
311 obj.load_item();
312
313 set_no_result(x);
314
315 // "lock" stores the address of the monitor stack slot, so this is not an oop
316 LIR_Opr lock = new_register(T_INT);
317
318 CodeEmitInfo* info_for_exception = NULL;
319 if (x->needs_null_check()) {
320 info_for_exception = state_for(x);
321 }
322 // this CodeEmitInfo must not have the xhandlers because here the
323 // object is already locked (xhandlers expect object to be unlocked)
324 CodeEmitInfo* info = state_for(x, x->state(), true);
325 monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
326 x->monitor_no(), info_for_exception, info);
327 }
328
329
330 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
331 assert(x->is_pinned(),"");
332
333 LIRItem obj(x->obj(), this);
334 obj.dont_load_item();
335
336 LIR_Opr lock = new_register(T_INT);
337 LIR_Opr obj_temp = new_register(T_INT);
338 set_no_result(x);
339 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
340 }
341
342 void LIRGenerator::do_NegateOp(NegateOp* x) {
343
344 LIRItem from(x->x(), this);
345 from.load_item();
346 LIR_Opr result = rlock_result(x);
1092
1093 // arguments of lir_convert
1094 LIR_Opr conv_input = input;
1095 LIR_Opr conv_result = result;
1096
1097 __ convert(x->op(), conv_input, conv_result);
1098
1099 assert(result->is_virtual(), "result must be virtual register");
1100 set_result(x, result);
1101 }
1102
1103 void LIRGenerator::do_NewInstance(NewInstance* x) {
1104 #ifndef PRODUCT
1105 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1106 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1107 }
1108 #endif
1109 CodeEmitInfo* info = state_for(x, x->state());
1110 LIR_Opr reg = result_register_for(x->type());
1111 new_instance(reg, x->klass(), x->is_unresolved(),
1112 FrameMap::r10_oop_opr,
1113 FrameMap::r11_oop_opr,
1114 FrameMap::r4_oop_opr,
1115 LIR_OprFact::illegalOpr,
1116 FrameMap::r3_metadata_opr, info);
1117 LIR_Opr result = rlock_result(x);
1118 __ move(reg, result);
1119 }
1120
1121 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1122 CodeEmitInfo* info = state_for(x, x->state());
1123
1124 LIRItem length(x->length(), this);
1125 length.load_item_force(FrameMap::r19_opr);
1126
1127 LIR_Opr reg = result_register_for(x->type());
1128 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1129 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1130 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1131 LIR_Opr tmp4 = reg;
1132 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1133 LIR_Opr len = length.result();
1134 BasicType elem_type = x->elt_type();
1135
1136 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1137
1138 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1146 LIRItem length(x->length(), this);
1147 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1148 // and therefore provide the state before the parameters have been consumed
1149 CodeEmitInfo* patching_info = NULL;
1150 if (!x->klass()->is_loaded() || PatchALot) {
1151 patching_info = state_for(x, x->state_before());
1152 }
1153
1154 CodeEmitInfo* info = state_for(x, x->state());
1155
1156 LIR_Opr reg = result_register_for(x->type());
1157 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1158 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1159 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1160 LIR_Opr tmp4 = reg;
1161 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1162
1163 length.load_item_force(FrameMap::r19_opr);
1164 LIR_Opr len = length.result();
1165
1166 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1167 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1168 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1169 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1170 }
1171 klass2reg_with_patching(klass_reg, obj, patching_info);
1172 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1173
1174 LIR_Opr result = rlock_result(x);
1175 __ move(reg, result);
1176 }
1177
1178
1179 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1180 Values* dims = x->dims();
1181 int i = dims->length();
1182 LIRItemList* items = new LIRItemList(i, i, NULL);
1183 while (i-- > 0) {
1184 LIRItem* size = new LIRItem(dims->at(i), this);
1185 items->at_put(i, size);
1186 }
1187
1188 // Evaluate state_for early since it may emit code.
1189 CodeEmitInfo* patching_info = NULL;
1190 if (!x->klass()->is_loaded() || PatchALot) {
1191 patching_info = state_for(x, x->state_before());
1192
1228
1229 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1230 // nothing to do for now
1231 }
1232
1233 void LIRGenerator::do_CheckCast(CheckCast* x) {
1234 LIRItem obj(x->obj(), this);
1235
1236 CodeEmitInfo* patching_info = NULL;
1237 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1238 // must do this before locking the destination register as an oop register,
1239 // and before the obj is loaded (the latter is for deoptimization)
1240 patching_info = state_for(x, x->state_before());
1241 }
1242 obj.load_item();
1243
1244 // info for exceptions
1245 CodeEmitInfo* info_for_exception =
1246 (x->needs_exception_state() ? state_for(x) :
1247 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1248
1249 CodeStub* stub;
1250 if (x->is_incompatible_class_change_check()) {
1251 assert(patching_info == NULL, "can't patch this");
1252 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1253 } else if (x->is_invokespecial_receiver_check()) {
1254 assert(patching_info == NULL, "can't patch this");
1255 stub = new DeoptimizeStub(info_for_exception,
1256 Deoptimization::Reason_class_check,
1257 Deoptimization::Action_none);
1258 } else {
1259 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1260 }
1261 LIR_Opr reg = rlock_result(x);
1262 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1263 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1264 tmp3 = new_register(objectType);
1265 }
1266 __ checkcast(reg, obj.result(), x->klass(),
1267 new_register(objectType), new_register(objectType), tmp3,
1268 x->direct_compare(), info_for_exception, patching_info, stub,
1269 x->profiled_method(), x->profiled_bci());
1270 }
1271
1272 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1273 LIRItem obj(x->obj(), this);
1274
1275 // result and test object may not be in same register
1276 LIR_Opr reg = rlock_result(x);
1277 CodeEmitInfo* patching_info = NULL;
1278 if ((!x->klass()->is_loaded() || PatchALot)) {
1279 // must do this before locking the destination register as an oop register
1280 patching_info = state_for(x, x->state_before());
1281 }
1282 obj.load_item();
1283 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1284 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1285 tmp3 = new_register(objectType);
1286 }
1287 __ instanceof(reg, obj.result(), x->klass(),
1288 new_register(objectType), new_register(objectType), tmp3,
1289 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1327 } else {
1328 yin->load_item();
1329 }
1330 } else {
1331 yin->load_item();
1332 }
1333
1334 set_no_result(x);
1335
1336 LIR_Opr left = xin->result();
1337 LIR_Opr right = yin->result();
1338
1339 // add safepoint before generating condition code so it can be recomputed
1340 if (x->is_safepoint()) {
1341 // increment backedge counter if needed
1342 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1343 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1344 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1345 }
1346
1347 __ cmp(lir_cond(cond), left, right);
1348 // Generate branch profiling. Profiling code doesn't kill flags.
1349 profile_branch(x, cond);
1350 move_to_phi(x->state());
1351 if (x->x()->type()->is_float_kind()) {
1352 __ branch(lir_cond(cond), x->tsux(), x->usux());
1353 } else {
1354 __ branch(lir_cond(cond), x->tsux());
1355 }
1356 assert(x->default_sux() == x->fsux(), "wrong destination above");
1357 __ jump(x->default_sux());
1358 }
1359
1360 LIR_Opr LIRGenerator::getThreadPointer() {
1361 return FrameMap::as_pointer_opr(rthread);
1362 }
1363
1364 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1365
1366 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1367 CodeEmitInfo* info) {
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "ci/ciTypeArrayKlass.hpp"
39 #include "compiler/compilerDefinitions.inline.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/powerOfTwo.hpp"
43 #include "vmreg_aarch64.inline.hpp"
44
45 #ifdef ASSERT
46 #define __ gen()->lir(__FILE__, __LINE__)->
47 #else
48 #define __ gen()->lir()->
49 #endif
50
51 // Item will be loaded into a byte register; Intel only
52 void LIRItem::load_byte_item() {
53 load_item();
54 }
55
56
87 case longTag: opr = FrameMap::long0_opr; break;
88 case floatTag: opr = FrameMap::fpu0_float_opr; break;
89 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
90
91 case addressTag:
92 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
93 }
94
95 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
96 return opr;
97 }
98
99
100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
101 LIR_Opr reg = new_register(T_INT);
102 set_vreg_flag(reg, LIRGenerator::byte_reg);
103 return reg;
104 }
105
106
107 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
108 tmp1 = new_register(T_INT);
109 tmp2 = LIR_OprFact::illegalOpr;
110 }
111
112
113 //--------- loading items into registers --------------------------------
114
115
116 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
117 if (v->type()->as_IntConstant() != NULL) {
118 return v->type()->as_IntConstant()->value() == 0L;
119 } else if (v->type()->as_LongConstant() != NULL) {
120 return v->type()->as_LongConstant()->value() == 0L;
121 } else if (v->type()->as_ObjectConstant() != NULL) {
122 return v->type()->as_ObjectConstant()->value()->is_null_object();
123 } else {
124 return false;
125 }
126 }
127
128 bool LIRGenerator::can_inline_as_constant(Value v) const {
129 // FIXME: Just a guess
130 if (v->type()->as_IntConstant() != NULL) {
131 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
132 } else if (v->type()->as_LongConstant() != NULL) {
304 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
305 LIR_Opr tmp1 = new_register(objectType);
306 LIR_Opr tmp2 = new_register(objectType);
307 LIR_Opr tmp3 = new_register(objectType);
308 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
309 }
310
311 //----------------------------------------------------------------------
312 // visitor functions
313 //----------------------------------------------------------------------
314
315 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
316 assert(x->is_pinned(),"");
317 LIRItem obj(x->obj(), this);
318 obj.load_item();
319
320 set_no_result(x);
321
322 // "lock" stores the address of the monitor stack slot, so this is not an oop
323 LIR_Opr lock = new_register(T_INT);
324 // Need a scratch register for inline type
325 LIR_Opr scratch = LIR_OprFact::illegalOpr;
326 if (EnableValhalla && x->maybe_inlinetype()) {
327 scratch = new_register(T_INT);
328 }
329
330 CodeEmitInfo* info_for_exception = NULL;
331 if (x->needs_null_check()) {
332 info_for_exception = state_for(x);
333 }
334
335 CodeStub* throw_imse_stub =
336 x->maybe_inlinetype() ?
337 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
338 NULL;
339
340 // this CodeEmitInfo must not have the xhandlers because here the
341 // object is already locked (xhandlers expect object to be unlocked)
342 CodeEmitInfo* info = state_for(x, x->state(), true);
343 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
344 x->monitor_no(), info_for_exception, info, throw_imse_stub);
345 }
346
347
348 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
349 assert(x->is_pinned(),"");
350
351 LIRItem obj(x->obj(), this);
352 obj.dont_load_item();
353
354 LIR_Opr lock = new_register(T_INT);
355 LIR_Opr obj_temp = new_register(T_INT);
356 set_no_result(x);
357 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
358 }
359
360 void LIRGenerator::do_NegateOp(NegateOp* x) {
361
362 LIRItem from(x->x(), this);
363 from.load_item();
364 LIR_Opr result = rlock_result(x);
1110
1111 // arguments of lir_convert
1112 LIR_Opr conv_input = input;
1113 LIR_Opr conv_result = result;
1114
1115 __ convert(x->op(), conv_input, conv_result);
1116
1117 assert(result->is_virtual(), "result must be virtual register");
1118 set_result(x, result);
1119 }
1120
1121 void LIRGenerator::do_NewInstance(NewInstance* x) {
1122 #ifndef PRODUCT
1123 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1124 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1125 }
1126 #endif
1127 CodeEmitInfo* info = state_for(x, x->state());
1128 LIR_Opr reg = result_register_for(x->type());
1129 new_instance(reg, x->klass(), x->is_unresolved(),
1130 /* allow_inline */ false,
1131 FrameMap::r10_oop_opr,
1132 FrameMap::r11_oop_opr,
1133 FrameMap::r4_oop_opr,
1134 LIR_OprFact::illegalOpr,
1135 FrameMap::r3_metadata_opr, info);
1136 LIR_Opr result = rlock_result(x);
1137 __ move(reg, result);
1138 }
1139
1140 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1141 // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1142 CodeEmitInfo* info = state_for(x, x->state_before());
1143 x->set_to_object_type();
1144 LIR_Opr reg = result_register_for(x->type());
1145 new_instance(reg, x->klass(), false,
1146 /* allow_inline */ true,
1147 FrameMap::r10_oop_opr,
1148 FrameMap::r11_oop_opr,
1149 FrameMap::r4_oop_opr,
1150 LIR_OprFact::illegalOpr,
1151 FrameMap::r3_metadata_opr, info);
1152 LIR_Opr result = rlock_result(x);
1153 __ move(reg, result);
1154
1155 }
1156
1157 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1158 CodeEmitInfo* info = state_for(x, x->state());
1159
1160 LIRItem length(x->length(), this);
1161 length.load_item_force(FrameMap::r19_opr);
1162
1163 LIR_Opr reg = result_register_for(x->type());
1164 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1165 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1166 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1167 LIR_Opr tmp4 = reg;
1168 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1169 LIR_Opr len = length.result();
1170 BasicType elem_type = x->elt_type();
1171
1172 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1173
1174 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1182 LIRItem length(x->length(), this);
1183 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1184 // and therefore provide the state before the parameters have been consumed
1185 CodeEmitInfo* patching_info = NULL;
1186 if (!x->klass()->is_loaded() || PatchALot) {
1187 patching_info = state_for(x, x->state_before());
1188 }
1189
1190 CodeEmitInfo* info = state_for(x, x->state());
1191
1192 LIR_Opr reg = result_register_for(x->type());
1193 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1194 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1195 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1196 LIR_Opr tmp4 = reg;
1197 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1198
1199 length.load_item_force(FrameMap::r19_opr);
1200 LIR_Opr len = length.result();
1201
1202 ciKlass* obj = (ciKlass*) x->exact_type();
1203 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1204 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1205 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1206 }
1207
1208 klass2reg_with_patching(klass_reg, obj, patching_info);
1209 if (x->is_null_free()) {
1210 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_PRIMITIVE_OBJECT, klass_reg, slow_path);
1211 } else {
1212 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1213 }
1214
1215 LIR_Opr result = rlock_result(x);
1216 __ move(reg, result);
1217 }
1218
1219
1220 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1221 Values* dims = x->dims();
1222 int i = dims->length();
1223 LIRItemList* items = new LIRItemList(i, i, NULL);
1224 while (i-- > 0) {
1225 LIRItem* size = new LIRItem(dims->at(i), this);
1226 items->at_put(i, size);
1227 }
1228
1229 // Evaluate state_for early since it may emit code.
1230 CodeEmitInfo* patching_info = NULL;
1231 if (!x->klass()->is_loaded() || PatchALot) {
1232 patching_info = state_for(x, x->state_before());
1233
1269
1270 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1271 // nothing to do for now
1272 }
1273
1274 void LIRGenerator::do_CheckCast(CheckCast* x) {
1275 LIRItem obj(x->obj(), this);
1276
1277 CodeEmitInfo* patching_info = NULL;
1278 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1279 // must do this before locking the destination register as an oop register,
1280 // and before the obj is loaded (the latter is for deoptimization)
1281 patching_info = state_for(x, x->state_before());
1282 }
1283 obj.load_item();
1284
1285 // info for exceptions
1286 CodeEmitInfo* info_for_exception =
1287 (x->needs_exception_state() ? state_for(x) :
1288 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1289 if (x->is_null_free()) {
1290 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1291 }
1292
1293 CodeStub* stub;
1294 if (x->is_incompatible_class_change_check()) {
1295 assert(patching_info == NULL, "can't patch this");
1296 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1297 } else if (x->is_invokespecial_receiver_check()) {
1298 assert(patching_info == NULL, "can't patch this");
1299 stub = new DeoptimizeStub(info_for_exception,
1300 Deoptimization::Reason_class_check,
1301 Deoptimization::Action_none);
1302 } else {
1303 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1304 }
1305 LIR_Opr reg = rlock_result(x);
1306 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1307 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1308 tmp3 = new_register(objectType);
1309 }
1310
1311
1312 __ checkcast(reg, obj.result(), x->klass(),
1313 new_register(objectType), new_register(objectType), tmp3,
1314 x->direct_compare(), info_for_exception, patching_info, stub,
1315 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1316
1317 }
1318
1319 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1320 LIRItem obj(x->obj(), this);
1321
1322 // result and test object may not be in same register
1323 LIR_Opr reg = rlock_result(x);
1324 CodeEmitInfo* patching_info = NULL;
1325 if ((!x->klass()->is_loaded() || PatchALot)) {
1326 // must do this before locking the destination register as an oop register
1327 patching_info = state_for(x, x->state_before());
1328 }
1329 obj.load_item();
1330 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1331 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1332 tmp3 = new_register(objectType);
1333 }
1334 __ instanceof(reg, obj.result(), x->klass(),
1335 new_register(objectType), new_register(objectType), tmp3,
1336 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1374 } else {
1375 yin->load_item();
1376 }
1377 } else {
1378 yin->load_item();
1379 }
1380
1381 set_no_result(x);
1382
1383 LIR_Opr left = xin->result();
1384 LIR_Opr right = yin->result();
1385
1386 // add safepoint before generating condition code so it can be recomputed
1387 if (x->is_safepoint()) {
1388 // increment backedge counter if needed
1389 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1390 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1391 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1392 }
1393
1394 if (x->substitutability_check()) {
1395 substitutability_check(x, *xin, *yin);
1396 } else {
1397 __ cmp(lir_cond(cond), left, right);
1398 }
1399
1400 // Generate branch profiling. Profiling code doesn't kill flags.
1401 profile_branch(x, cond);
1402 move_to_phi(x->state());
1403 if (x->x()->type()->is_float_kind()) {
1404 __ branch(lir_cond(cond), x->tsux(), x->usux());
1405 } else {
1406 __ branch(lir_cond(cond), x->tsux());
1407 }
1408 assert(x->default_sux() == x->fsux(), "wrong destination above");
1409 __ jump(x->default_sux());
1410 }
1411
1412 LIR_Opr LIRGenerator::getThreadPointer() {
1413 return FrameMap::as_pointer_opr(rthread);
1414 }
1415
1416 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1417
1418 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1419 CodeEmitInfo* info) {
|