16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
55 void LIRItem::load_nonconstant() {
85 case longTag: opr = FrameMap::long0_opr; break;
86 case floatTag: opr = FrameMap::fpu0_float_opr; break;
87 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89 case addressTag:
90 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91 }
92
93 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94 return opr;
95 }
96
97
98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 LIR_Opr reg = new_register(T_INT);
100 set_vreg_flag(reg, LIRGenerator::byte_reg);
101 return reg;
102 }
103
104
105 //--------- loading items into registers --------------------------------
106
107
108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109 if (v->type()->as_IntConstant() != NULL) {
110 return v->type()->as_IntConstant()->value() == 0L;
111 } else if (v->type()->as_LongConstant() != NULL) {
112 return v->type()->as_LongConstant()->value() == 0L;
113 } else if (v->type()->as_ObjectConstant() != NULL) {
114 return v->type()->as_ObjectConstant()->value()->is_null_object();
115 } else {
116 return false;
117 }
118 }
119
120 bool LIRGenerator::can_inline_as_constant(Value v) const {
121 // FIXME: Just a guess
122 if (v->type()->as_IntConstant() != NULL) {
123 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124 } else if (v->type()->as_LongConstant() != NULL) {
296 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
297 LIR_Opr tmp1 = new_register(objectType);
298 LIR_Opr tmp2 = new_register(objectType);
299 LIR_Opr tmp3 = new_register(objectType);
300 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
301 }
302
303 //----------------------------------------------------------------------
304 // visitor functions
305 //----------------------------------------------------------------------
306
307 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
308 assert(x->is_pinned(),"");
309 LIRItem obj(x->obj(), this);
310 obj.load_item();
311
312 set_no_result(x);
313
314 // "lock" stores the address of the monitor stack slot, so this is not an oop
315 LIR_Opr lock = new_register(T_INT);
316
317 CodeEmitInfo* info_for_exception = NULL;
318 if (x->needs_null_check()) {
319 info_for_exception = state_for(x);
320 }
321 // this CodeEmitInfo must not have the xhandlers because here the
322 // object is already locked (xhandlers expect object to be unlocked)
323 CodeEmitInfo* info = state_for(x, x->state(), true);
324 monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
325 x->monitor_no(), info_for_exception, info);
326 }
327
328
329 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
330 assert(x->is_pinned(),"");
331
332 LIRItem obj(x->obj(), this);
333 obj.dont_load_item();
334
335 LIR_Opr lock = new_register(T_INT);
336 LIR_Opr obj_temp = new_register(T_INT);
337 set_no_result(x);
338 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
339 }
340
341
342 void LIRGenerator::do_NegateOp(NegateOp* x) {
343
344 LIRItem from(x->x(), this);
345 from.load_item();
1094
1095 // arguments of lir_convert
1096 LIR_Opr conv_input = input;
1097 LIR_Opr conv_result = result;
1098
1099 __ convert(x->op(), conv_input, conv_result);
1100
1101 assert(result->is_virtual(), "result must be virtual register");
1102 set_result(x, result);
1103 }
1104
1105 void LIRGenerator::do_NewInstance(NewInstance* x) {
1106 #ifndef PRODUCT
1107 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1108 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1109 }
1110 #endif
1111 CodeEmitInfo* info = state_for(x, x->state());
1112 LIR_Opr reg = result_register_for(x->type());
1113 new_instance(reg, x->klass(), x->is_unresolved(),
1114 FrameMap::r10_oop_opr,
1115 FrameMap::r11_oop_opr,
1116 FrameMap::r4_oop_opr,
1117 LIR_OprFact::illegalOpr,
1118 FrameMap::r3_metadata_opr, info);
1119 LIR_Opr result = rlock_result(x);
1120 __ move(reg, result);
1121 }
1122
1123 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1124 CodeEmitInfo* info = state_for(x, x->state());
1125
1126 LIRItem length(x->length(), this);
1127 length.load_item_force(FrameMap::r19_opr);
1128
1129 LIR_Opr reg = result_register_for(x->type());
1130 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1131 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1132 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1133 LIR_Opr tmp4 = reg;
1134 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1135 LIR_Opr len = length.result();
1136 BasicType elem_type = x->elt_type();
1137
1138 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1139
1140 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1148 LIRItem length(x->length(), this);
1149 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1150 // and therefore provide the state before the parameters have been consumed
1151 CodeEmitInfo* patching_info = NULL;
1152 if (!x->klass()->is_loaded() || PatchALot) {
1153 patching_info = state_for(x, x->state_before());
1154 }
1155
1156 CodeEmitInfo* info = state_for(x, x->state());
1157
1158 LIR_Opr reg = result_register_for(x->type());
1159 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1160 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1161 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1162 LIR_Opr tmp4 = reg;
1163 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1164
1165 length.load_item_force(FrameMap::r19_opr);
1166 LIR_Opr len = length.result();
1167
1168 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1169 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1170 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1171 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1172 }
1173 klass2reg_with_patching(klass_reg, obj, patching_info);
1174 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1175
1176 LIR_Opr result = rlock_result(x);
1177 __ move(reg, result);
1178 }
1179
1180
1181 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1182 Values* dims = x->dims();
1183 int i = dims->length();
1184 LIRItemList* items = new LIRItemList(i, i, NULL);
1185 while (i-- > 0) {
1186 LIRItem* size = new LIRItem(dims->at(i), this);
1187 items->at_put(i, size);
1188 }
1189
1190 // Evaluate state_for early since it may emit code.
1191 CodeEmitInfo* patching_info = NULL;
1192 if (!x->klass()->is_loaded() || PatchALot) {
1193 patching_info = state_for(x, x->state_before());
1194
1230
1231 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1232 // nothing to do for now
1233 }
1234
1235 void LIRGenerator::do_CheckCast(CheckCast* x) {
1236 LIRItem obj(x->obj(), this);
1237
1238 CodeEmitInfo* patching_info = NULL;
1239 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1240 // must do this before locking the destination register as an oop register,
1241 // and before the obj is loaded (the latter is for deoptimization)
1242 patching_info = state_for(x, x->state_before());
1243 }
1244 obj.load_item();
1245
1246 // info for exceptions
1247 CodeEmitInfo* info_for_exception =
1248 (x->needs_exception_state() ? state_for(x) :
1249 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1250
1251 CodeStub* stub;
1252 if (x->is_incompatible_class_change_check()) {
1253 assert(patching_info == NULL, "can't patch this");
1254 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1255 } else if (x->is_invokespecial_receiver_check()) {
1256 assert(patching_info == NULL, "can't patch this");
1257 stub = new DeoptimizeStub(info_for_exception,
1258 Deoptimization::Reason_class_check,
1259 Deoptimization::Action_none);
1260 } else {
1261 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1262 }
1263 LIR_Opr reg = rlock_result(x);
1264 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1265 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1266 tmp3 = new_register(objectType);
1267 }
1268 __ checkcast(reg, obj.result(), x->klass(),
1269 new_register(objectType), new_register(objectType), tmp3,
1270 x->direct_compare(), info_for_exception, patching_info, stub,
1271 x->profiled_method(), x->profiled_bci());
1272 }
1273
1274 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1275 LIRItem obj(x->obj(), this);
1276
1277 // result and test object may not be in same register
1278 LIR_Opr reg = rlock_result(x);
1279 CodeEmitInfo* patching_info = NULL;
1280 if ((!x->klass()->is_loaded() || PatchALot)) {
1281 // must do this before locking the destination register as an oop register
1282 patching_info = state_for(x, x->state_before());
1283 }
1284 obj.load_item();
1285 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1286 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1287 tmp3 = new_register(objectType);
1288 }
1289 __ instanceof(reg, obj.result(), x->klass(),
1290 new_register(objectType), new_register(objectType), tmp3,
1291 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1329 } else {
1330 yin->load_item();
1331 }
1332 } else {
1333 yin->load_item();
1334 }
1335
1336 set_no_result(x);
1337
1338 LIR_Opr left = xin->result();
1339 LIR_Opr right = yin->result();
1340
1341 // add safepoint before generating condition code so it can be recomputed
1342 if (x->is_safepoint()) {
1343 // increment backedge counter if needed
1344 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1345 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1346 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1347 }
1348
1349 __ cmp(lir_cond(cond), left, right);
1350 // Generate branch profiling. Profiling code doesn't kill flags.
1351 profile_branch(x, cond);
1352 move_to_phi(x->state());
1353 if (x->x()->type()->is_float_kind()) {
1354 __ branch(lir_cond(cond), x->tsux(), x->usux());
1355 } else {
1356 __ branch(lir_cond(cond), x->tsux());
1357 }
1358 assert(x->default_sux() == x->fsux(), "wrong destination above");
1359 __ jump(x->default_sux());
1360 }
1361
1362 LIR_Opr LIRGenerator::getThreadPointer() {
1363 return FrameMap::as_pointer_opr(rthread);
1364 }
1365
1366 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1367
1368 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1369 CodeEmitInfo* info) {
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "ci/ciTypeArrayKlass.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/powerOfTwo.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43
44 #ifdef ASSERT
45 #define __ gen()->lir(__FILE__, __LINE__)->
46 #else
47 #define __ gen()->lir()->
48 #endif
49
50 // Item will be loaded into a byte register; Intel only
51 void LIRItem::load_byte_item() {
52 load_item();
53 }
54
55
56 void LIRItem::load_nonconstant() {
86 case longTag: opr = FrameMap::long0_opr; break;
87 case floatTag: opr = FrameMap::fpu0_float_opr; break;
88 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
89
90 case addressTag:
91 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
92 }
93
94 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
95 return opr;
96 }
97
98
99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
100 LIR_Opr reg = new_register(T_INT);
101 set_vreg_flag(reg, LIRGenerator::byte_reg);
102 return reg;
103 }
104
105
106 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
107 tmp1 = new_register(T_INT);
108 tmp2 = LIR_OprFact::illegalOpr;
109 }
110
111
112 //--------- loading items into registers --------------------------------
113
114
115 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
116 if (v->type()->as_IntConstant() != NULL) {
117 return v->type()->as_IntConstant()->value() == 0L;
118 } else if (v->type()->as_LongConstant() != NULL) {
119 return v->type()->as_LongConstant()->value() == 0L;
120 } else if (v->type()->as_ObjectConstant() != NULL) {
121 return v->type()->as_ObjectConstant()->value()->is_null_object();
122 } else {
123 return false;
124 }
125 }
126
127 bool LIRGenerator::can_inline_as_constant(Value v) const {
128 // FIXME: Just a guess
129 if (v->type()->as_IntConstant() != NULL) {
130 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
131 } else if (v->type()->as_LongConstant() != NULL) {
303 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
304 LIR_Opr tmp1 = new_register(objectType);
305 LIR_Opr tmp2 = new_register(objectType);
306 LIR_Opr tmp3 = new_register(objectType);
307 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
308 }
309
310 //----------------------------------------------------------------------
311 // visitor functions
312 //----------------------------------------------------------------------
313
314 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
315 assert(x->is_pinned(),"");
316 LIRItem obj(x->obj(), this);
317 obj.load_item();
318
319 set_no_result(x);
320
321 // "lock" stores the address of the monitor stack slot, so this is not an oop
322 LIR_Opr lock = new_register(T_INT);
323 // Need a scratch register for inline type
324 LIR_Opr scratch = LIR_OprFact::illegalOpr;
325 if (EnableValhalla && x->maybe_inlinetype()) {
326 scratch = new_register(T_INT);
327 }
328
329 CodeEmitInfo* info_for_exception = NULL;
330 if (x->needs_null_check()) {
331 info_for_exception = state_for(x);
332 }
333
334 CodeStub* throw_imse_stub =
335 x->maybe_inlinetype() ?
336 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
337 NULL;
338
339 // this CodeEmitInfo must not have the xhandlers because here the
340 // object is already locked (xhandlers expect object to be unlocked)
341 CodeEmitInfo* info = state_for(x, x->state(), true);
342 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
343 x->monitor_no(), info_for_exception, info, throw_imse_stub);
344 }
345
346
347 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
348 assert(x->is_pinned(),"");
349
350 LIRItem obj(x->obj(), this);
351 obj.dont_load_item();
352
353 LIR_Opr lock = new_register(T_INT);
354 LIR_Opr obj_temp = new_register(T_INT);
355 set_no_result(x);
356 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
357 }
358
359
360 void LIRGenerator::do_NegateOp(NegateOp* x) {
361
362 LIRItem from(x->x(), this);
363 from.load_item();
1112
1113 // arguments of lir_convert
1114 LIR_Opr conv_input = input;
1115 LIR_Opr conv_result = result;
1116
1117 __ convert(x->op(), conv_input, conv_result);
1118
1119 assert(result->is_virtual(), "result must be virtual register");
1120 set_result(x, result);
1121 }
1122
1123 void LIRGenerator::do_NewInstance(NewInstance* x) {
1124 #ifndef PRODUCT
1125 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1126 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1127 }
1128 #endif
1129 CodeEmitInfo* info = state_for(x, x->state());
1130 LIR_Opr reg = result_register_for(x->type());
1131 new_instance(reg, x->klass(), x->is_unresolved(),
1132 /* allow_inline */ false,
1133 FrameMap::r10_oop_opr,
1134 FrameMap::r11_oop_opr,
1135 FrameMap::r4_oop_opr,
1136 LIR_OprFact::illegalOpr,
1137 FrameMap::r3_metadata_opr, info);
1138 LIR_Opr result = rlock_result(x);
1139 __ move(reg, result);
1140 }
1141
1142 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1143 // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1144 CodeEmitInfo* info = state_for(x, x->state_before());
1145 x->set_to_object_type();
1146 LIR_Opr reg = result_register_for(x->type());
1147 new_instance(reg, x->klass(), false,
1148 /* allow_inline */ true,
1149 FrameMap::r10_oop_opr,
1150 FrameMap::r11_oop_opr,
1151 FrameMap::r4_oop_opr,
1152 LIR_OprFact::illegalOpr,
1153 FrameMap::r3_metadata_opr, info);
1154 LIR_Opr result = rlock_result(x);
1155 __ move(reg, result);
1156
1157 }
1158
1159 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1160 CodeEmitInfo* info = state_for(x, x->state());
1161
1162 LIRItem length(x->length(), this);
1163 length.load_item_force(FrameMap::r19_opr);
1164
1165 LIR_Opr reg = result_register_for(x->type());
1166 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1167 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1168 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1169 LIR_Opr tmp4 = reg;
1170 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1171 LIR_Opr len = length.result();
1172 BasicType elem_type = x->elt_type();
1173
1174 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1175
1176 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1184 LIRItem length(x->length(), this);
1185 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1186 // and therefore provide the state before the parameters have been consumed
1187 CodeEmitInfo* patching_info = NULL;
1188 if (!x->klass()->is_loaded() || PatchALot) {
1189 patching_info = state_for(x, x->state_before());
1190 }
1191
1192 CodeEmitInfo* info = state_for(x, x->state());
1193
1194 LIR_Opr reg = result_register_for(x->type());
1195 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1196 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1197 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1198 LIR_Opr tmp4 = reg;
1199 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1200
1201 length.load_item_force(FrameMap::r19_opr);
1202 LIR_Opr len = length.result();
1203
1204 ciKlass* obj = (ciKlass*) x->exact_type();
1205 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1206 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1207 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1208 }
1209
1210 klass2reg_with_patching(klass_reg, obj, patching_info);
1211 if (x->is_null_free()) {
1212 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_PRIMITIVE_OBJECT, klass_reg, slow_path);
1213 } else {
1214 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1215 }
1216
1217 LIR_Opr result = rlock_result(x);
1218 __ move(reg, result);
1219 }
1220
1221
1222 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1223 Values* dims = x->dims();
1224 int i = dims->length();
1225 LIRItemList* items = new LIRItemList(i, i, NULL);
1226 while (i-- > 0) {
1227 LIRItem* size = new LIRItem(dims->at(i), this);
1228 items->at_put(i, size);
1229 }
1230
1231 // Evaluate state_for early since it may emit code.
1232 CodeEmitInfo* patching_info = NULL;
1233 if (!x->klass()->is_loaded() || PatchALot) {
1234 patching_info = state_for(x, x->state_before());
1235
1271
1272 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1273 // nothing to do for now
1274 }
1275
1276 void LIRGenerator::do_CheckCast(CheckCast* x) {
1277 LIRItem obj(x->obj(), this);
1278
1279 CodeEmitInfo* patching_info = NULL;
1280 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1281 // must do this before locking the destination register as an oop register,
1282 // and before the obj is loaded (the latter is for deoptimization)
1283 patching_info = state_for(x, x->state_before());
1284 }
1285 obj.load_item();
1286
1287 // info for exceptions
1288 CodeEmitInfo* info_for_exception =
1289 (x->needs_exception_state() ? state_for(x) :
1290 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1291 if (x->is_null_free()) {
1292 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1293 }
1294
1295 CodeStub* stub;
1296 if (x->is_incompatible_class_change_check()) {
1297 assert(patching_info == NULL, "can't patch this");
1298 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1299 } else if (x->is_invokespecial_receiver_check()) {
1300 assert(patching_info == NULL, "can't patch this");
1301 stub = new DeoptimizeStub(info_for_exception,
1302 Deoptimization::Reason_class_check,
1303 Deoptimization::Action_none);
1304 } else {
1305 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1306 }
1307 LIR_Opr reg = rlock_result(x);
1308 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1309 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1310 tmp3 = new_register(objectType);
1311 }
1312
1313
1314 __ checkcast(reg, obj.result(), x->klass(),
1315 new_register(objectType), new_register(objectType), tmp3,
1316 x->direct_compare(), info_for_exception, patching_info, stub,
1317 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1318
1319 }
1320
1321 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1322 LIRItem obj(x->obj(), this);
1323
1324 // result and test object may not be in same register
1325 LIR_Opr reg = rlock_result(x);
1326 CodeEmitInfo* patching_info = NULL;
1327 if ((!x->klass()->is_loaded() || PatchALot)) {
1328 // must do this before locking the destination register as an oop register
1329 patching_info = state_for(x, x->state_before());
1330 }
1331 obj.load_item();
1332 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1333 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1334 tmp3 = new_register(objectType);
1335 }
1336 __ instanceof(reg, obj.result(), x->klass(),
1337 new_register(objectType), new_register(objectType), tmp3,
1338 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1376 } else {
1377 yin->load_item();
1378 }
1379 } else {
1380 yin->load_item();
1381 }
1382
1383 set_no_result(x);
1384
1385 LIR_Opr left = xin->result();
1386 LIR_Opr right = yin->result();
1387
1388 // add safepoint before generating condition code so it can be recomputed
1389 if (x->is_safepoint()) {
1390 // increment backedge counter if needed
1391 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1392 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1393 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1394 }
1395
1396 if (x->substitutability_check()) {
1397 substitutability_check(x, *xin, *yin);
1398 } else {
1399 __ cmp(lir_cond(cond), left, right);
1400 }
1401
1402 // Generate branch profiling. Profiling code doesn't kill flags.
1403 profile_branch(x, cond);
1404 move_to_phi(x->state());
1405 if (x->x()->type()->is_float_kind()) {
1406 __ branch(lir_cond(cond), x->tsux(), x->usux());
1407 } else {
1408 __ branch(lir_cond(cond), x->tsux());
1409 }
1410 assert(x->default_sux() == x->fsux(), "wrong destination above");
1411 __ jump(x->default_sux());
1412 }
1413
1414 LIR_Opr LIRGenerator::getThreadPointer() {
1415 return FrameMap::as_pointer_opr(rthread);
1416 }
1417
1418 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1419
1420 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1421 CodeEmitInfo* info) {
|