15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "compiler/compilerDefinitions.inline.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_aarch64.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 }
53
54
306
307 //----------------------------------------------------------------------
308 // visitor functions
309 //----------------------------------------------------------------------
310
311 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
312 assert(x->is_pinned(),"");
313 LIRItem obj(x->obj(), this);
314 obj.load_item();
315
316 set_no_result(x);
317
318 // "lock" stores the address of the monitor stack slot, so this is not an oop
319 LIR_Opr lock = new_register(T_INT);
320 LIR_Opr scratch = new_register(T_INT);
321
322 CodeEmitInfo* info_for_exception = nullptr;
323 if (x->needs_null_check()) {
324 info_for_exception = state_for(x);
325 }
326 // this CodeEmitInfo must not have the xhandlers because here the
327 // object is already locked (xhandlers expect object to be unlocked)
328 CodeEmitInfo* info = state_for(x, x->state(), true);
329 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
330 x->monitor_no(), info_for_exception, info);
331 }
332
333
334 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
335 assert(x->is_pinned(),"");
336
337 LIRItem obj(x->obj(), this);
338 obj.dont_load_item();
339
340 LIR_Opr lock = new_register(T_INT);
341 LIR_Opr obj_temp = new_register(T_INT);
342 LIR_Opr scratch = new_register(T_INT);
343 set_no_result(x);
344 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
345 }
346
347 void LIRGenerator::do_NegateOp(NegateOp* x) {
348
349 LIRItem from(x->x(), this);
350 from.load_item();
1110 value.load_item();
1111 LIR_Opr input = value.result();
1112 LIR_Opr result = rlock(x);
1113
1114 // arguments of lir_convert
1115 LIR_Opr conv_input = input;
1116 LIR_Opr conv_result = result;
1117
1118 __ convert(x->op(), conv_input, conv_result);
1119
1120 assert(result->is_virtual(), "result must be virtual register");
1121 set_result(x, result);
1122 }
1123
1124 void LIRGenerator::do_NewInstance(NewInstance* x) {
1125 #ifndef PRODUCT
1126 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1127 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1128 }
1129 #endif
1130 CodeEmitInfo* info = state_for(x, x->state());
1131 LIR_Opr reg = result_register_for(x->type());
1132 new_instance(reg, x->klass(), x->is_unresolved(),
1133 FrameMap::r10_oop_opr,
1134 FrameMap::r11_oop_opr,
1135 FrameMap::r4_oop_opr,
1136 LIR_OprFact::illegalOpr,
1137 FrameMap::r3_metadata_opr, info);
1138 LIR_Opr result = rlock_result(x);
1139 __ move(reg, result);
1140 }
1141
1142 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1143 CodeEmitInfo* info = nullptr;
1144 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1145 info = state_for(x, x->state_before());
1146 info->set_force_reexecute();
1147 } else {
1148 info = state_for(x, x->state());
1149 }
1150
1151 LIRItem length(x->length(), this);
1152 length.load_item_force(FrameMap::r19_opr);
1153
1154 LIR_Opr reg = result_register_for(x->type());
1155 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1156 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1157 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1173 LIRItem length(x->length(), this);
1174 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1175 // and therefore provide the state before the parameters have been consumed
1176 CodeEmitInfo* patching_info = nullptr;
1177 if (!x->klass()->is_loaded() || PatchALot) {
1178 patching_info = state_for(x, x->state_before());
1179 }
1180
1181 CodeEmitInfo* info = state_for(x, x->state());
1182
1183 LIR_Opr reg = result_register_for(x->type());
1184 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1185 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1186 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1187 LIR_Opr tmp4 = reg;
1188 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1189
1190 length.load_item_force(FrameMap::r19_opr);
1191 LIR_Opr len = length.result();
1192
1193 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1194 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1195 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1196 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1197 }
1198 klass2reg_with_patching(klass_reg, obj, patching_info);
1199 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1200
1201 LIR_Opr result = rlock_result(x);
1202 __ move(reg, result);
1203 }
1204
1205
1206 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1207 Values* dims = x->dims();
1208 int i = dims->length();
1209 LIRItemList* items = new LIRItemList(i, i, nullptr);
1210 while (i-- > 0) {
1211 LIRItem* size = new LIRItem(dims->at(i), this);
1212 items->at_put(i, size);
1213 }
1214
1215 // Evaluate state_for early since it may emit code.
1216 CodeEmitInfo* patching_info = nullptr;
1217 if (!x->klass()->is_loaded() || PatchALot) {
1218 patching_info = state_for(x, x->state_before());
1219
1274 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1275
1276 CodeStub* stub;
1277 if (x->is_incompatible_class_change_check()) {
1278 assert(patching_info == nullptr, "can't patch this");
1279 stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1280 } else if (x->is_invokespecial_receiver_check()) {
1281 assert(patching_info == nullptr, "can't patch this");
1282 stub = new DeoptimizeStub(info_for_exception,
1283 Deoptimization::Reason_class_check,
1284 Deoptimization::Action_none);
1285 } else {
1286 stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception);
1287 }
1288 LIR_Opr reg = rlock_result(x);
1289 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1290 tmp3 = new_register(objectType);
1291 __ checkcast(reg, obj.result(), x->klass(),
1292 new_register(objectType), new_register(objectType), tmp3,
1293 x->direct_compare(), info_for_exception, patching_info, stub,
1294 x->profiled_method(), x->profiled_bci());
1295 }
1296
1297 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1298 LIRItem obj(x->obj(), this);
1299
1300 // result and test object may not be in same register
1301 LIR_Opr reg = rlock_result(x);
1302 CodeEmitInfo* patching_info = nullptr;
1303 if ((!x->klass()->is_loaded() || PatchALot)) {
1304 // must do this before locking the destination register as an oop register
1305 patching_info = state_for(x, x->state_before());
1306 }
1307 obj.load_item();
1308 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1309 tmp3 = new_register(objectType);
1310 __ instanceof(reg, obj.result(), x->klass(),
1311 new_register(objectType), new_register(objectType), tmp3,
1312 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1313 }
1314
1355 } else {
1356 yin->load_item();
1357 }
1358 } else {
1359 yin->load_item();
1360 }
1361
1362 set_no_result(x);
1363
1364 LIR_Opr left = xin->result();
1365 LIR_Opr right = yin->result();
1366
1367 // add safepoint before generating condition code so it can be recomputed
1368 if (x->is_safepoint()) {
1369 // increment backedge counter if needed
1370 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1371 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1372 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1373 }
1374
1375 __ cmp(lir_cond(cond), left, right);
1376 // Generate branch profiling. Profiling code doesn't kill flags.
1377 profile_branch(x, cond);
1378 move_to_phi(x->state());
1379 if (x->x()->type()->is_float_kind()) {
1380 __ branch(lir_cond(cond), x->tsux(), x->usux());
1381 } else {
1382 __ branch(lir_cond(cond), x->tsux());
1383 }
1384 assert(x->default_sux() == x->fsux(), "wrong destination above");
1385 __ jump(x->default_sux());
1386 }
1387
1388 LIR_Opr LIRGenerator::getThreadPointer() {
1389 return FrameMap::as_pointer_opr(rthread);
1390 }
1391
1392 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1393
1394 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1395 CodeEmitInfo* info) {
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciObjArrayKlass.hpp"
37 #include "ci/ciTypeArrayKlass.hpp"
38 #include "compiler/compilerDefinitions.inline.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/powerOfTwo.hpp"
42 #include "vmreg_aarch64.inline.hpp"
43
44 #ifdef ASSERT
45 #define __ gen()->lir(__FILE__, __LINE__)->
46 #else
47 #define __ gen()->lir()->
48 #endif
49
50 // Item will be loaded into a byte register; Intel only
51 void LIRItem::load_byte_item() {
52 load_item();
53 }
54
55
307
308 //----------------------------------------------------------------------
309 // visitor functions
310 //----------------------------------------------------------------------
311
312 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
313 assert(x->is_pinned(),"");
314 LIRItem obj(x->obj(), this);
315 obj.load_item();
316
317 set_no_result(x);
318
319 // "lock" stores the address of the monitor stack slot, so this is not an oop
320 LIR_Opr lock = new_register(T_INT);
321 LIR_Opr scratch = new_register(T_INT);
322
323 CodeEmitInfo* info_for_exception = nullptr;
324 if (x->needs_null_check()) {
325 info_for_exception = state_for(x);
326 }
327
328 CodeStub* throw_ie_stub =
329 x->maybe_inlinetype() ?
330 new SimpleExceptionStub(StubId::c1_throw_identity_exception_id, obj.result(), state_for(x)) :
331 nullptr;
332
333 // this CodeEmitInfo must not have the xhandlers because here the
334 // object is already locked (xhandlers expect object to be unlocked)
335 CodeEmitInfo* info = state_for(x, x->state(), true);
336 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
337 x->monitor_no(), info_for_exception, info, throw_ie_stub);
338 }
339
340
341 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
342 assert(x->is_pinned(),"");
343
344 LIRItem obj(x->obj(), this);
345 obj.dont_load_item();
346
347 LIR_Opr lock = new_register(T_INT);
348 LIR_Opr obj_temp = new_register(T_INT);
349 LIR_Opr scratch = new_register(T_INT);
350 set_no_result(x);
351 monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
352 }
353
354 void LIRGenerator::do_NegateOp(NegateOp* x) {
355
356 LIRItem from(x->x(), this);
357 from.load_item();
1117 value.load_item();
1118 LIR_Opr input = value.result();
1119 LIR_Opr result = rlock(x);
1120
1121 // arguments of lir_convert
1122 LIR_Opr conv_input = input;
1123 LIR_Opr conv_result = result;
1124
1125 __ convert(x->op(), conv_input, conv_result);
1126
1127 assert(result->is_virtual(), "result must be virtual register");
1128 set_result(x, result);
1129 }
1130
1131 void LIRGenerator::do_NewInstance(NewInstance* x) {
1132 #ifndef PRODUCT
1133 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1134 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1135 }
1136 #endif
1137 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1138 LIR_Opr reg = result_register_for(x->type());
1139 new_instance(reg, x->klass(), x->is_unresolved(),
1140 !x->is_unresolved() && x->klass()->is_inlinetype(),
1141 FrameMap::r10_oop_opr,
1142 FrameMap::r11_oop_opr,
1143 FrameMap::r4_oop_opr,
1144 LIR_OprFact::illegalOpr,
1145 FrameMap::r3_metadata_opr, info);
1146 LIR_Opr result = rlock_result(x);
1147 __ move(reg, result);
1148 }
1149
1150 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1151 CodeEmitInfo* info = nullptr;
1152 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1153 info = state_for(x, x->state_before());
1154 info->set_force_reexecute();
1155 } else {
1156 info = state_for(x, x->state());
1157 }
1158
1159 LIRItem length(x->length(), this);
1160 length.load_item_force(FrameMap::r19_opr);
1161
1162 LIR_Opr reg = result_register_for(x->type());
1163 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1164 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1165 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1181 LIRItem length(x->length(), this);
1182 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1183 // and therefore provide the state before the parameters have been consumed
1184 CodeEmitInfo* patching_info = nullptr;
1185 if (!x->klass()->is_loaded() || PatchALot) {
1186 patching_info = state_for(x, x->state_before());
1187 }
1188
1189 CodeEmitInfo* info = state_for(x, x->state());
1190
1191 LIR_Opr reg = result_register_for(x->type());
1192 LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1193 LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1194 LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1195 LIR_Opr tmp4 = reg;
1196 LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1197
1198 length.load_item_force(FrameMap::r19_opr);
1199 LIR_Opr len = length.result();
1200
1201 ciKlass* obj = ciObjArrayKlass::make(x->klass());
1202
1203 // TODO 8265122 Implement a fast path for this
1204 bool is_flat = obj->is_loaded() && obj->is_flat_array_klass();
1205 bool is_null_free = obj->is_loaded() && obj->as_array_klass()->is_elem_null_free();
1206
1207 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, is_null_free);
1208 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1209 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1210 }
1211
1212 klass2reg_with_patching(klass_reg, obj, patching_info);
1213 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, is_null_free || is_flat);
1214
1215 LIR_Opr result = rlock_result(x);
1216 __ move(reg, result);
1217 }
1218
1219
1220 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1221 Values* dims = x->dims();
1222 int i = dims->length();
1223 LIRItemList* items = new LIRItemList(i, i, nullptr);
1224 while (i-- > 0) {
1225 LIRItem* size = new LIRItem(dims->at(i), this);
1226 items->at_put(i, size);
1227 }
1228
1229 // Evaluate state_for early since it may emit code.
1230 CodeEmitInfo* patching_info = nullptr;
1231 if (!x->klass()->is_loaded() || PatchALot) {
1232 patching_info = state_for(x, x->state_before());
1233
1288 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1289
1290 CodeStub* stub;
1291 if (x->is_incompatible_class_change_check()) {
1292 assert(patching_info == nullptr, "can't patch this");
1293 stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1294 } else if (x->is_invokespecial_receiver_check()) {
1295 assert(patching_info == nullptr, "can't patch this");
1296 stub = new DeoptimizeStub(info_for_exception,
1297 Deoptimization::Reason_class_check,
1298 Deoptimization::Action_none);
1299 } else {
1300 stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception);
1301 }
1302 LIR_Opr reg = rlock_result(x);
1303 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1304 tmp3 = new_register(objectType);
1305 __ checkcast(reg, obj.result(), x->klass(),
1306 new_register(objectType), new_register(objectType), tmp3,
1307 x->direct_compare(), info_for_exception, patching_info, stub,
1308 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1309 }
1310
1311 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1312 LIRItem obj(x->obj(), this);
1313
1314 // result and test object may not be in same register
1315 LIR_Opr reg = rlock_result(x);
1316 CodeEmitInfo* patching_info = nullptr;
1317 if ((!x->klass()->is_loaded() || PatchALot)) {
1318 // must do this before locking the destination register as an oop register
1319 patching_info = state_for(x, x->state_before());
1320 }
1321 obj.load_item();
1322 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1323 tmp3 = new_register(objectType);
1324 __ instanceof(reg, obj.result(), x->klass(),
1325 new_register(objectType), new_register(objectType), tmp3,
1326 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1327 }
1328
1369 } else {
1370 yin->load_item();
1371 }
1372 } else {
1373 yin->load_item();
1374 }
1375
1376 set_no_result(x);
1377
1378 LIR_Opr left = xin->result();
1379 LIR_Opr right = yin->result();
1380
1381 // add safepoint before generating condition code so it can be recomputed
1382 if (x->is_safepoint()) {
1383 // increment backedge counter if needed
1384 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1385 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1386 __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1387 }
1388
1389 if (x->substitutability_check()) {
1390 substitutability_check(x, *xin, *yin);
1391 } else {
1392 __ cmp(lir_cond(cond), left, right);
1393 }
1394
1395 // Generate branch profiling. Profiling code doesn't kill flags.
1396 profile_branch(x, cond);
1397 move_to_phi(x->state());
1398 if (x->x()->type()->is_float_kind()) {
1399 __ branch(lir_cond(cond), x->tsux(), x->usux());
1400 } else {
1401 __ branch(lir_cond(cond), x->tsux());
1402 }
1403 assert(x->default_sux() == x->fsux(), "wrong destination above");
1404 __ jump(x->default_sux());
1405 }
1406
1407 LIR_Opr LIRGenerator::getThreadPointer() {
1408 return FrameMap::as_pointer_opr(rthread);
1409 }
1410
1411 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1412
1413 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1414 CodeEmitInfo* info) {
|