16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/powerOfTwo.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43
44 #ifdef ASSERT
45 #define __ gen()->lir(__FILE__, __LINE__)->
46 #else
47 #define __ gen()->lir()->
48 #endif
49
50 // Item will be loaded into a byte register; Intel only
51 void LIRItem::load_byte_item() {
52 load_item();
53 }
54
55
86 case longTag: opr = FrameMap::long0_opr; break;
87 case floatTag: opr = FrameMap::fpu0_float_opr; break;
88 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
89
90 case addressTag:
91 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
92 }
93
94 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
95 return opr;
96 }
97
98
99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
100 LIR_Opr reg = new_register(T_INT);
101 set_vreg_flag(reg, LIRGenerator::byte_reg);
102 return reg;
103 }
104
105
106 //--------- loading items into registers --------------------------------
107
108
109 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
110 if (v->type()->as_IntConstant() != nullptr) {
111 return v->type()->as_IntConstant()->value() == 0L;
112 } else if (v->type()->as_LongConstant() != nullptr) {
113 return v->type()->as_LongConstant()->value() == 0L;
114 } else if (v->type()->as_ObjectConstant() != nullptr) {
115 return v->type()->as_ObjectConstant()->value()->is_null_object();
116 } else {
117 return false;
118 }
119 }
120
121 bool LIRGenerator::can_inline_as_constant(Value v) const {
122 // FIXME: Just a guess
123 if (v->type()->as_IntConstant() != nullptr) {
124 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
125 } else if (v->type()->as_LongConstant() != nullptr) {
303
304 //----------------------------------------------------------------------
305 // visitor functions
306 //----------------------------------------------------------------------
307
308 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
309 assert(x->is_pinned(),"");
310 LIRItem obj(x->obj(), this);
311 obj.load_item();
312
313 set_no_result(x);
314
315 // "lock" stores the address of the monitor stack slot, so this is not an oop
316 LIR_Opr lock = new_register(T_INT);
317 LIR_Opr scratch = new_register(T_INT);
318
319 CodeEmitInfo* info_for_exception = nullptr;
320 if (x->needs_null_check()) {
321 info_for_exception = state_for(x);
322 }
323 // this CodeEmitInfo must not have the xhandlers because here the
324 // object is already locked (xhandlers expect object to be unlocked)
325 CodeEmitInfo* info = state_for(x, x->state(), true);
326 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
327 x->monitor_no(), info_for_exception, info);
328 }
329
330
331 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
332 assert(x->is_pinned(),"");
333
334 LIRItem obj(x->obj(), this);
335 obj.dont_load_item();
336
337 LIR_Opr lock = new_register(T_INT);
338 LIR_Opr obj_temp = new_register(T_INT);
339 LIR_Opr scratch = new_register(T_INT);
340 set_no_result(x);
341 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
342 }
343
344 void LIRGenerator::do_NegateOp(NegateOp* x) {
345
346 LIRItem from(x->x(), this);
347 from.load_item();
1106 value.load_item();
1107 LIR_Opr input = value.result();
1108 LIR_Opr result = rlock(x);
1109
1110 // arguments of lir_convert
1111 LIR_Opr conv_input = input;
1112 LIR_Opr conv_result = result;
1113
1114 __ convert(x->op(), conv_input, conv_result);
1115
1116 assert(result->is_virtual(), "result must be virtual register");
1117 set_result(x, result);
1118 }
1119
1120 void LIRGenerator::do_NewInstance(NewInstance* x) {
1121 #ifndef PRODUCT
1122 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1123 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1124 }
1125 #endif
1126 CodeEmitInfo* info = state_for(x, x->state());
1127 LIR_Opr reg = result_register_for(x->type());
1128 new_instance(reg, x->klass(), x->is_unresolved(),
1129 FrameMap::r10_oop_opr,
1130 FrameMap::r11_oop_opr,
1131 FrameMap::r4_oop_opr,
1132 LIR_OprFact::illegalOpr,
1133 FrameMap::r3_metadata_opr, info);
1134 LIR_Opr result = rlock_result(x);
1135 __ move(reg, result);
1136 }
1137
1138 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1139 CodeEmitInfo* info = state_for(x, x->state());
1140
1141 LIRItem length(x->length(), this);
1142 length.load_item_force(FrameMap::r19_opr);
1143
1144 LIR_Opr reg = result_register_for(x->type());
1145 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1146 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1147 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1148 LIR_Opr tmp4 = reg;
1149 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1150 LIR_Opr len = length.result();
1151 BasicType elem_type = x->elt_type();
1152
1153 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1154
1155 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1156 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1157
1158 LIR_Opr result = rlock_result(x);
1159 __ move(reg, result);
1160 }
1161
1162 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1163 LIRItem length(x->length(), this);
1164 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1165 // and therefore provide the state before the parameters have been consumed
1166 CodeEmitInfo* patching_info = nullptr;
1167 if (!x->klass()->is_loaded() || PatchALot) {
1168 patching_info = state_for(x, x->state_before());
1169 }
1170
1171 CodeEmitInfo* info = state_for(x, x->state());
1172
1173 LIR_Opr reg = result_register_for(x->type());
1174 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1175 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1176 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1177 LIR_Opr tmp4 = reg;
1178 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1179
1180 length.load_item_force(FrameMap::r19_opr);
1181 LIR_Opr len = length.result();
1182
1183 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1184 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1185 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1186 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1187 }
1188 klass2reg_with_patching(klass_reg, obj, patching_info);
1189 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1190
1191 LIR_Opr result = rlock_result(x);
1192 __ move(reg, result);
1193 }
1194
1195
1196 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1197 Values* dims = x->dims();
1198 int i = dims->length();
1199 LIRItemList* items = new LIRItemList(i, i, nullptr);
1200 while (i-- > 0) {
1201 LIRItem* size = new LIRItem(dims->at(i), this);
1202 items->at_put(i, size);
1203 }
1204
1205 // Evaluate state_for early since it may emit code.
1206 CodeEmitInfo* patching_info = nullptr;
1207 if (!x->klass()->is_loaded() || PatchALot) {
1208 patching_info = state_for(x, x->state_before());
1209
1245
1246 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1247 // nothing to do for now
1248 }
1249
1250 void LIRGenerator::do_CheckCast(CheckCast* x) {
1251 LIRItem obj(x->obj(), this);
1252
1253 CodeEmitInfo* patching_info = nullptr;
1254 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1255 // must do this before locking the destination register as an oop register,
1256 // and before the obj is loaded (the latter is for deoptimization)
1257 patching_info = state_for(x, x->state_before());
1258 }
1259 obj.load_item();
1260
1261 // info for exceptions
1262 CodeEmitInfo* info_for_exception =
1263 (x->needs_exception_state() ? state_for(x) :
1264 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1265
1266 CodeStub* stub;
1267 if (x->is_incompatible_class_change_check()) {
1268 assert(patching_info == nullptr, "can't patch this");
1269 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1270 } else if (x->is_invokespecial_receiver_check()) {
1271 assert(patching_info == nullptr, "can't patch this");
1272 stub = new DeoptimizeStub(info_for_exception,
1273 Deoptimization::Reason_class_check,
1274 Deoptimization::Action_none);
1275 } else {
1276 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1277 }
1278 LIR_Opr reg = rlock_result(x);
1279 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1280 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1281 tmp3 = new_register(objectType);
1282 }
1283 __ checkcast(reg, obj.result(), x->klass(),
1284 new_register(objectType), new_register(objectType), tmp3,
1285 x->direct_compare(), info_for_exception, patching_info, stub,
1286 x->profiled_method(), x->profiled_bci());
1287 }
1288
1289 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1290 LIRItem obj(x->obj(), this);
1291
1292 // result and test object may not be in same register
1293 LIR_Opr reg = rlock_result(x);
1294 CodeEmitInfo* patching_info = nullptr;
1295 if ((!x->klass()->is_loaded() || PatchALot)) {
1296 // must do this before locking the destination register as an oop register
1297 patching_info = state_for(x, x->state_before());
1298 }
1299 obj.load_item();
1300 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1301 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1302 tmp3 = new_register(objectType);
1303 }
1304 __ instanceof(reg, obj.result(), x->klass(),
1305 new_register(objectType), new_register(objectType), tmp3,
1306 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1344 } else {
1345 yin->load_item();
1346 }
1347 } else {
1348 yin->load_item();
1349 }
1350
1351 set_no_result(x);
1352
1353 LIR_Opr left = xin->result();
1354 LIR_Opr right = yin->result();
1355
1356 // add safepoint before generating condition code so it can be recomputed
1357 if (x->is_safepoint()) {
1358 // increment backedge counter if needed
1359 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1360 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1361 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1362 }
1363
1364 __ cmp(lir_cond(cond), left, right);
1365 // Generate branch profiling. Profiling code doesn't kill flags.
1366 profile_branch(x, cond);
1367 move_to_phi(x->state());
1368 if (x->x()->type()->is_float_kind()) {
1369 __ branch(lir_cond(cond), x->tsux(), x->usux());
1370 } else {
1371 __ branch(lir_cond(cond), x->tsux());
1372 }
1373 assert(x->default_sux() == x->fsux(), "wrong destination above");
1374 __ jump(x->default_sux());
1375 }
1376
1377 LIR_Opr LIRGenerator::getThreadPointer() {
1378 return FrameMap::as_pointer_opr(rthread);
1379 }
1380
1381 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1382
1383 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1384 CodeEmitInfo* info) {
|
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "c1/c1_Compilation.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_Instruction.hpp"
31 #include "c1/c1_LIRAssembler.hpp"
32 #include "c1/c1_LIRGenerator.hpp"
33 #include "c1/c1_Runtime1.hpp"
34 #include "c1/c1_ValueStack.hpp"
35 #include "ci/ciArray.hpp"
36 #include "ci/ciInlineKlass.hpp"
37 #include "ci/ciObjArrayKlass.hpp"
38 #include "ci/ciTypeArrayKlass.hpp"
39 #include "compiler/compilerDefinitions.inline.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/powerOfTwo.hpp"
43 #include "vmreg_aarch64.inline.hpp"
44
45 #ifdef ASSERT
46 #define __ gen()->lir(__FILE__, __LINE__)->
47 #else
48 #define __ gen()->lir()->
49 #endif
50
51 // Item will be loaded into a byte register; Intel only
52 void LIRItem::load_byte_item() {
53 load_item();
54 }
55
56
87 case longTag: opr = FrameMap::long0_opr; break;
88 case floatTag: opr = FrameMap::fpu0_float_opr; break;
89 case doubleTag: opr = FrameMap::fpu0_double_opr; break;
90
91 case addressTag:
92 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
93 }
94
95 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
96 return opr;
97 }
98
99
100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
101 LIR_Opr reg = new_register(T_INT);
102 set_vreg_flag(reg, LIRGenerator::byte_reg);
103 return reg;
104 }
105
106
107 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
108 tmp1 = new_register(T_INT);
109 tmp2 = LIR_OprFact::illegalOpr;
110 }
111
112
113 //--------- loading items into registers --------------------------------
114
115
116 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
117 if (v->type()->as_IntConstant() != nullptr) {
118 return v->type()->as_IntConstant()->value() == 0L;
119 } else if (v->type()->as_LongConstant() != nullptr) {
120 return v->type()->as_LongConstant()->value() == 0L;
121 } else if (v->type()->as_ObjectConstant() != nullptr) {
122 return v->type()->as_ObjectConstant()->value()->is_null_object();
123 } else {
124 return false;
125 }
126 }
127
128 bool LIRGenerator::can_inline_as_constant(Value v) const {
129 // FIXME: Just a guess
130 if (v->type()->as_IntConstant() != nullptr) {
131 return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
132 } else if (v->type()->as_LongConstant() != nullptr) {
310
311 //----------------------------------------------------------------------
312 // visitor functions
313 //----------------------------------------------------------------------
314
315 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
316 assert(x->is_pinned(),"");
317 LIRItem obj(x->obj(), this);
318 obj.load_item();
319
320 set_no_result(x);
321
322 // "lock" stores the address of the monitor stack slot, so this is not an oop
323 LIR_Opr lock = new_register(T_INT);
324 LIR_Opr scratch = new_register(T_INT);
325
326 CodeEmitInfo* info_for_exception = nullptr;
327 if (x->needs_null_check()) {
328 info_for_exception = state_for(x);
329 }
330
331 CodeStub* throw_imse_stub =
332 x->maybe_inlinetype() ?
333 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
334 nullptr;
335
336 // this CodeEmitInfo must not have the xhandlers because here the
337 // object is already locked (xhandlers expect object to be unlocked)
338 CodeEmitInfo* info = state_for(x, x->state(), true);
339 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
340 x->monitor_no(), info_for_exception, info, throw_imse_stub);
341 }
342
343
344 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
345 assert(x->is_pinned(),"");
346
347 LIRItem obj(x->obj(), this);
348 obj.dont_load_item();
349
350 LIR_Opr lock = new_register(T_INT);
351 LIR_Opr obj_temp = new_register(T_INT);
352 LIR_Opr scratch = new_register(T_INT);
353 set_no_result(x);
354 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
355 }
356
357 void LIRGenerator::do_NegateOp(NegateOp* x) {
358
359 LIRItem from(x->x(), this);
360 from.load_item();
1119 value.load_item();
1120 LIR_Opr input = value.result();
1121 LIR_Opr result = rlock(x);
1122
1123 // arguments of lir_convert
1124 LIR_Opr conv_input = input;
1125 LIR_Opr conv_result = result;
1126
1127 __ convert(x->op(), conv_input, conv_result);
1128
1129 assert(result->is_virtual(), "result must be virtual register");
1130 set_result(x, result);
1131 }
1132
1133 void LIRGenerator::do_NewInstance(NewInstance* x) {
1134 #ifndef PRODUCT
1135 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1136 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1137 }
1138 #endif
1139 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1140 LIR_Opr reg = result_register_for(x->type());
1141 new_instance(reg, x->klass(), x->is_unresolved(),
1142 /* allow_inline */ false,
1143 FrameMap::r10_oop_opr,
1144 FrameMap::r11_oop_opr,
1145 FrameMap::r4_oop_opr,
1146 LIR_OprFact::illegalOpr,
1147 FrameMap::r3_metadata_opr, info);
1148 LIR_Opr result = rlock_result(x);
1149 __ move(reg, result);
1150 }
1151
1152 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1153 CodeEmitInfo* info = state_for(x, x->state());
1154
1155 LIRItem length(x->length(), this);
1156 length.load_item_force(FrameMap::r19_opr);
1157
1158 LIR_Opr reg = result_register_for(x->type());
1159 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1160 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1161 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1162 LIR_Opr tmp4 = reg;
1163 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1164 LIR_Opr len = length.result();
1165 BasicType elem_type = x->elt_type();
1166
1167 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1168
1169 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1170 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, false);
1171
1172 LIR_Opr result = rlock_result(x);
1173 __ move(reg, result);
1174 }
1175
1176 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1177 LIRItem length(x->length(), this);
1178 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1179 // and therefore provide the state before the parameters have been consumed
1180 CodeEmitInfo* patching_info = nullptr;
1181 if (!x->klass()->is_loaded() || PatchALot) {
1182 patching_info = state_for(x, x->state_before());
1183 }
1184
1185 CodeEmitInfo* info = state_for(x, x->state());
1186
1187 LIR_Opr reg = result_register_for(x->type());
1188 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1189 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1190 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1191 LIR_Opr tmp4 = reg;
1192 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1193
1194 length.load_item_force(FrameMap::r19_opr);
1195 LIR_Opr len = length.result();
1196
1197 ciKlass* obj = (ciKlass*) x->exact_type();
1198 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1199 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1200 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1201 }
1202
1203 klass2reg_with_patching(klass_reg, obj, patching_info);
1204 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, x->is_null_free());
1205
1206 LIR_Opr result = rlock_result(x);
1207 __ move(reg, result);
1208 }
1209
1210
1211 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1212 Values* dims = x->dims();
1213 int i = dims->length();
1214 LIRItemList* items = new LIRItemList(i, i, nullptr);
1215 while (i-- > 0) {
1216 LIRItem* size = new LIRItem(dims->at(i), this);
1217 items->at_put(i, size);
1218 }
1219
1220 // Evaluate state_for early since it may emit code.
1221 CodeEmitInfo* patching_info = nullptr;
1222 if (!x->klass()->is_loaded() || PatchALot) {
1223 patching_info = state_for(x, x->state_before());
1224
1260
1261 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1262 // nothing to do for now
1263 }
1264
1265 void LIRGenerator::do_CheckCast(CheckCast* x) {
1266 LIRItem obj(x->obj(), this);
1267
1268 CodeEmitInfo* patching_info = nullptr;
1269 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1270 // must do this before locking the destination register as an oop register,
1271 // and before the obj is loaded (the latter is for deoptimization)
1272 patching_info = state_for(x, x->state_before());
1273 }
1274 obj.load_item();
1275
1276 // info for exceptions
1277 CodeEmitInfo* info_for_exception =
1278 (x->needs_exception_state() ? state_for(x) :
1279 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1280 if (x->is_null_free()) {
1281 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1282 }
1283
1284 CodeStub* stub;
1285 if (x->is_incompatible_class_change_check()) {
1286 assert(patching_info == nullptr, "can't patch this");
1287 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1288 } else if (x->is_invokespecial_receiver_check()) {
1289 assert(patching_info == nullptr, "can't patch this");
1290 stub = new DeoptimizeStub(info_for_exception,
1291 Deoptimization::Reason_class_check,
1292 Deoptimization::Action_none);
1293 } else {
1294 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1295 }
1296 LIR_Opr reg = rlock_result(x);
1297 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1298 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1299 tmp3 = new_register(objectType);
1300 }
1301
1302
1303 __ checkcast(reg, obj.result(), x->klass(),
1304 new_register(objectType), new_register(objectType), tmp3,
1305 x->direct_compare(), info_for_exception, patching_info, stub,
1306 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1307
1308 }
1309
1310 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1311 LIRItem obj(x->obj(), this);
1312
1313 // result and test object may not be in same register
1314 LIR_Opr reg = rlock_result(x);
1315 CodeEmitInfo* patching_info = nullptr;
1316 if ((!x->klass()->is_loaded() || PatchALot)) {
1317 // must do this before locking the destination register as an oop register
1318 patching_info = state_for(x, x->state_before());
1319 }
1320 obj.load_item();
1321 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1322 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1323 tmp3 = new_register(objectType);
1324 }
1325 __ instanceof(reg, obj.result(), x->klass(),
1326 new_register(objectType), new_register(objectType), tmp3,
1327 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1365 } else {
1366 yin->load_item();
1367 }
1368 } else {
1369 yin->load_item();
1370 }
1371
1372 set_no_result(x);
1373
1374 LIR_Opr left = xin->result();
1375 LIR_Opr right = yin->result();
1376
1377 // add safepoint before generating condition code so it can be recomputed
1378 if (x->is_safepoint()) {
1379 // increment backedge counter if needed
1380 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1381 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1382 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1383 }
1384
1385 if (x->substitutability_check()) {
1386 substitutability_check(x, *xin, *yin);
1387 } else {
1388 __ cmp(lir_cond(cond), left, right);
1389 }
1390
1391 // Generate branch profiling. Profiling code doesn't kill flags.
1392 profile_branch(x, cond);
1393 move_to_phi(x->state());
1394 if (x->x()->type()->is_float_kind()) {
1395 __ branch(lir_cond(cond), x->tsux(), x->usux());
1396 } else {
1397 __ branch(lir_cond(cond), x->tsux());
1398 }
1399 assert(x->default_sux() == x->fsux(), "wrong destination above");
1400 __ jump(x->default_sux());
1401 }
1402
1403 LIR_Opr LIRGenerator::getThreadPointer() {
1404 return FrameMap::as_pointer_opr(rthread);
1405 }
1406
1407 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1408
1409 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1410 CodeEmitInfo* info) {
|