13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_FrameMap.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_LIRGenerator.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArray.hpp"
33 #include "ci/ciObjArrayKlass.hpp"
34 #include "ci/ciTypeArrayKlass.hpp"
35 #include "gc/shared/c1/barrierSetC1.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "utilities/powerOfTwo.hpp"
39 #include "vmreg_x86.inline.hpp"
40
41 #ifdef ASSERT
42 #define __ gen()->lir(__FILE__, __LINE__)->
43 #else
44 #define __ gen()->lir()->
45 #endif
46
47 // Item will be loaded into a byte register; Intel only
48 void LIRItem::load_byte_item() {
49 load_item();
50 LIR_Opr res = result();
51
52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
93 case objectTag: opr = FrameMap::rax_oop_opr; break;
94 case longTag: opr = FrameMap::long0_opr; break;
95 case floatTag: opr = FrameMap::xmm0_float_opr; break;
96 case doubleTag: opr = FrameMap::xmm0_double_opr; break;
97 case addressTag:
98 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
99 }
100
101 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
102 return opr;
103 }
104
105
106 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
107 LIR_Opr reg = new_register(T_INT);
108 set_vreg_flag(reg, LIRGenerator::byte_reg);
109 return reg;
110 }
111
112
113 //--------- loading items into registers --------------------------------
114
115
116 // i486 instructions can inline constants
117 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
118 if (type == T_SHORT || type == T_CHAR) {
119 return false;
120 }
121 Constant* c = v->as_Constant();
122 if (c && c->state_before() == nullptr) {
123 // constants of any type can be stored directly, except for
124 // unloaded object constants.
125 return true;
126 }
127 return false;
128 }
129
130
131 bool LIRGenerator::can_inline_as_constant(Value v) const {
132 if (v->type()->tag() == longTag) return false;
288 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
289 LIR_Opr tmp1 = new_register(objectType);
290 LIR_Opr tmp2 = new_register(objectType);
291 LIR_Opr tmp3 = new_register(objectType);
292 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
293 }
294
295 //----------------------------------------------------------------------
296 // visitor functions
297 //----------------------------------------------------------------------
298
299 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
300 assert(x->is_pinned(),"");
301 LIRItem obj(x->obj(), this);
302 obj.load_item();
303
304 set_no_result(x);
305
306 // "lock" stores the address of the monitor stack slot, so this is not an oop
307 LIR_Opr lock = new_register(T_INT);
308
309 CodeEmitInfo* info_for_exception = nullptr;
310 if (x->needs_null_check()) {
311 info_for_exception = state_for(x);
312 }
313 // this CodeEmitInfo must not have the xhandlers because here the
314 // object is already locked (xhandlers expect object to be unlocked)
315 CodeEmitInfo* info = state_for(x, x->state(), true);
316 LIR_Opr tmp = LockingMode == LM_LIGHTWEIGHT ? new_register(T_ADDRESS) : LIR_OprFact::illegalOpr;
317 monitor_enter(obj.result(), lock, syncTempOpr(), tmp,
318 x->monitor_no(), info_for_exception, info);
319 }
320
321
322 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
323 assert(x->is_pinned(),"");
324
325 LIRItem obj(x->obj(), this);
326 obj.dont_load_item();
327
328 LIR_Opr lock = new_register(T_INT);
329 LIR_Opr obj_temp = new_register(T_INT);
330 set_no_result(x);
331 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
332 }
333
334 // _ineg, _lneg, _fneg, _dneg
335 void LIRGenerator::do_NegateOp(NegateOp* x) {
336 LIRItem value(x->x(), this);
337 value.set_destroys_register();
338 value.load_item();
1271
1272 if (needs_stub) {
1273 stub = new ConversionStub(x->op(), conv_input, conv_result);
1274 }
1275
1276 __ convert(x->op(), conv_input, conv_result, stub);
1277
1278 if (result != conv_result) {
1279 __ move(conv_result, result);
1280 }
1281
1282 assert(result->is_virtual(), "result must be virtual register");
1283 set_result(x, result);
1284 #endif // _LP64
1285 }
1286
1287
1288 void LIRGenerator::do_NewInstance(NewInstance* x) {
1289 print_if_not_loaded(x);
1290
1291 CodeEmitInfo* info = state_for(x, x->state());
1292 LIR_Opr reg = result_register_for(x->type());
1293 new_instance(reg, x->klass(), x->is_unresolved(),
1294 FrameMap::rcx_oop_opr,
1295 FrameMap::rdi_oop_opr,
1296 FrameMap::rsi_oop_opr,
1297 LIR_OprFact::illegalOpr,
1298 FrameMap::rdx_metadata_opr, info);
1299 LIR_Opr result = rlock_result(x);
1300 __ move(reg, result);
1301 }
1302
1303
1304 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1305 CodeEmitInfo* info = nullptr;
1306 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1307 info = state_for(x, x->state_before());
1308 info->set_force_reexecute();
1309 } else {
1310 info = state_for(x, x->state());
1311 }
1312
1313 LIRItem length(x->length(), this);
1314 length.load_item_force(FrameMap::rbx_opr);
1315
1316 LIR_Opr reg = result_register_for(x->type());
1317 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1318 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1319 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1320 LIR_Opr tmp4 = reg;
1321 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1322 LIR_Opr len = length.result();
1323 BasicType elem_type = x->elt_type();
1336 LIRItem length(x->length(), this);
1337 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1338 // and therefore provide the state before the parameters have been consumed
1339 CodeEmitInfo* patching_info = nullptr;
1340 if (!x->klass()->is_loaded() || PatchALot) {
1341 patching_info = state_for(x, x->state_before());
1342 }
1343
1344 CodeEmitInfo* info = state_for(x, x->state());
1345
1346 const LIR_Opr reg = result_register_for(x->type());
1347 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1348 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1349 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1350 LIR_Opr tmp4 = reg;
1351 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1352
1353 length.load_item_force(FrameMap::rbx_opr);
1354 LIR_Opr len = length.result();
1355
1356 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1357 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1358 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1359 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1360 }
1361 klass2reg_with_patching(klass_reg, obj, patching_info);
1362 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1363
1364 LIR_Opr result = rlock_result(x);
1365 __ move(reg, result);
1366 }
1367
1368
1369 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1370 Values* dims = x->dims();
1371 int i = dims->length();
1372 LIRItemList* items = new LIRItemList(i, i, nullptr);
1373 while (i-- > 0) {
1374 LIRItem* size = new LIRItem(dims->at(i), this);
1375 items->at_put(i, size);
1376 }
1377
1378 // Evaluate state_for early since it may emit code.
1379 CodeEmitInfo* patching_info = nullptr;
1380 if (!x->klass()->is_loaded() || PatchALot) {
1381 patching_info = state_for(x, x->state_before());
1382
1421 // nothing to do for now
1422 }
1423
1424
1425 void LIRGenerator::do_CheckCast(CheckCast* x) {
1426 LIRItem obj(x->obj(), this);
1427
1428 CodeEmitInfo* patching_info = nullptr;
1429 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1430 // must do this before locking the destination register as an oop register,
1431 // and before the obj is loaded (the latter is for deoptimization)
1432 patching_info = state_for(x, x->state_before());
1433 }
1434 obj.load_item();
1435
1436 // info for exceptions
1437 CodeEmitInfo* info_for_exception =
1438 (x->needs_exception_state() ? state_for(x) :
1439 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1440
1441 CodeStub* stub;
1442 if (x->is_incompatible_class_change_check()) {
1443 assert(patching_info == nullptr, "can't patch this");
1444 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1445 } else if (x->is_invokespecial_receiver_check()) {
1446 assert(patching_info == nullptr, "can't patch this");
1447 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1448 } else {
1449 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1450 }
1451 LIR_Opr reg = rlock_result(x);
1452 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1453 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1454 tmp3 = new_register(objectType);
1455 }
1456 __ checkcast(reg, obj.result(), x->klass(),
1457 new_register(objectType), new_register(objectType), tmp3,
1458 x->direct_compare(), info_for_exception, patching_info, stub,
1459 x->profiled_method(), x->profiled_bci());
1460 }
1461
1462
1463 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1464 LIRItem obj(x->obj(), this);
1465
1466 // result and test object may not be in same register
1467 LIR_Opr reg = rlock_result(x);
1468 CodeEmitInfo* patching_info = nullptr;
1469 if ((!x->klass()->is_loaded() || PatchALot)) {
1470 // must do this before locking the destination register as an oop register
1471 patching_info = state_for(x, x->state_before());
1472 }
1473 obj.load_item();
1474 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1475 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1476 tmp3 = new_register(objectType);
1477 }
1478 __ instanceof(reg, obj.result(), x->klass(),
1479 new_register(objectType), new_register(objectType), tmp3,
1495
1496 LIRItem xitem(x->x(), this);
1497 LIRItem yitem(x->y(), this);
1498 LIRItem* xin = &xitem;
1499 LIRItem* yin = &yitem;
1500
1501 if (tag == longTag) {
1502 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1503 // mirror for other conditions
1504 if (cond == If::gtr || cond == If::leq) {
1505 cond = Instruction::mirror(cond);
1506 xin = &yitem;
1507 yin = &xitem;
1508 }
1509 xin->set_destroys_register();
1510 }
1511 xin->load_item();
1512 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1513 // inline long zero
1514 yin->dont_load_item();
1515 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1516 // longs cannot handle constants at right side
1517 yin->load_item();
1518 } else {
1519 yin->dont_load_item();
1520 }
1521
1522 LIR_Opr left = xin->result();
1523 LIR_Opr right = yin->result();
1524
1525 set_no_result(x);
1526
1527 // add safepoint before generating condition code so it can be recomputed
1528 if (x->is_safepoint()) {
1529 // increment backedge counter if needed
1530 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1531 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1532 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1533 }
1534
1535 __ cmp(lir_cond(cond), left, right);
1536 // Generate branch profiling. Profiling code doesn't kill flags.
1537 profile_branch(x, cond);
1538 move_to_phi(x->state());
1539 if (x->x()->type()->is_float_kind()) {
1540 __ branch(lir_cond(cond), x->tsux(), x->usux());
1541 } else {
1542 __ branch(lir_cond(cond), x->tsux());
1543 }
1544 assert(x->default_sux() == x->fsux(), "wrong destination above");
1545 __ jump(x->default_sux());
1546 }
1547
1548
1549 LIR_Opr LIRGenerator::getThreadPointer() {
1550 #ifdef _LP64
1551 return FrameMap::as_pointer_opr(r15_thread);
1552 #else
1553 LIR_Opr result = new_register(T_INT);
1554 __ get_thread(result);
1555 return result;
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_FrameMap.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_LIRGenerator.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArray.hpp"
33 #include "ci/ciInlineKlass.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
94 case objectTag: opr = FrameMap::rax_oop_opr; break;
95 case longTag: opr = FrameMap::long0_opr; break;
96 case floatTag: opr = FrameMap::xmm0_float_opr; break;
97 case doubleTag: opr = FrameMap::xmm0_double_opr; break;
98 case addressTag:
99 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
100 }
101
102 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
103 return opr;
104 }
105
106
107 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
108 LIR_Opr reg = new_register(T_INT);
109 set_vreg_flag(reg, LIRGenerator::byte_reg);
110 return reg;
111 }
112
113
114 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
115 // We just need one 32-bit temp register for x86/x64, to check whether both
116 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
117 // @temp = %r10d
118 // mov $0x405, %r10d
119 // and (%left), %r10d /* if need to check left */
120 // and (%right), %r10d /* if need to check right */
121 // cmp $0x405, $r10d
122 // jne L_oops_not_equal
123 tmp1 = new_register(T_INT);
124 tmp2 = LIR_OprFact::illegalOpr;
125 }
126
127 //--------- loading items into registers --------------------------------
128
129
130 // i486 instructions can inline constants
131 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
132 if (type == T_SHORT || type == T_CHAR) {
133 return false;
134 }
135 Constant* c = v->as_Constant();
136 if (c && c->state_before() == nullptr) {
137 // constants of any type can be stored directly, except for
138 // unloaded object constants.
139 return true;
140 }
141 return false;
142 }
143
144
145 bool LIRGenerator::can_inline_as_constant(Value v) const {
146 if (v->type()->tag() == longTag) return false;
302 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
303 LIR_Opr tmp1 = new_register(objectType);
304 LIR_Opr tmp2 = new_register(objectType);
305 LIR_Opr tmp3 = new_register(objectType);
306 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
307 }
308
309 //----------------------------------------------------------------------
310 // visitor functions
311 //----------------------------------------------------------------------
312
313 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
314 assert(x->is_pinned(),"");
315 LIRItem obj(x->obj(), this);
316 obj.load_item();
317
318 set_no_result(x);
319
320 // "lock" stores the address of the monitor stack slot, so this is not an oop
321 LIR_Opr lock = new_register(T_INT);
322 // Need a scratch register for inline types on x86
323 LIR_Opr scratch = LIR_OprFact::illegalOpr;
324 if ((LockingMode == LM_LIGHTWEIGHT) ||
325 (EnableValhalla && x->maybe_inlinetype())) {
326 scratch = new_register(T_ADDRESS);
327 }
328
329 CodeEmitInfo* info_for_exception = nullptr;
330 if (x->needs_null_check()) {
331 info_for_exception = state_for(x);
332 }
333
334 CodeStub* throw_ie_stub = x->maybe_inlinetype() ?
335 new SimpleExceptionStub(C1StubId::throw_identity_exception_id,
336 obj.result(), state_for(x))
337 : nullptr;
338
339 // this CodeEmitInfo must not have the xhandlers because here the
340 // object is already locked (xhandlers expect object to be unlocked)
341 CodeEmitInfo* info = state_for(x, x->state(), true);
342 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
343 x->monitor_no(), info_for_exception, info, throw_ie_stub);
344 }
345
346
347 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
348 assert(x->is_pinned(),"");
349
350 LIRItem obj(x->obj(), this);
351 obj.dont_load_item();
352
353 LIR_Opr lock = new_register(T_INT);
354 LIR_Opr obj_temp = new_register(T_INT);
355 set_no_result(x);
356 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
357 }
358
359 // _ineg, _lneg, _fneg, _dneg
360 void LIRGenerator::do_NegateOp(NegateOp* x) {
361 LIRItem value(x->x(), this);
362 value.set_destroys_register();
363 value.load_item();
1296
1297 if (needs_stub) {
1298 stub = new ConversionStub(x->op(), conv_input, conv_result);
1299 }
1300
1301 __ convert(x->op(), conv_input, conv_result, stub);
1302
1303 if (result != conv_result) {
1304 __ move(conv_result, result);
1305 }
1306
1307 assert(result->is_virtual(), "result must be virtual register");
1308 set_result(x, result);
1309 #endif // _LP64
1310 }
1311
1312
1313 void LIRGenerator::do_NewInstance(NewInstance* x) {
1314 print_if_not_loaded(x);
1315
1316 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1317 LIR_Opr reg = result_register_for(x->type());
1318 new_instance(reg, x->klass(), x->is_unresolved(),
1319 !x->is_unresolved() && x->klass()->is_inlinetype(),
1320 FrameMap::rcx_oop_opr,
1321 FrameMap::rdi_oop_opr,
1322 FrameMap::rsi_oop_opr,
1323 LIR_OprFact::illegalOpr,
1324 FrameMap::rdx_metadata_opr, info);
1325 LIR_Opr result = rlock_result(x);
1326 __ move(reg, result);
1327 }
1328
1329 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1330 CodeEmitInfo* info = nullptr;
1331 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1332 info = state_for(x, x->state_before());
1333 info->set_force_reexecute();
1334 } else {
1335 info = state_for(x, x->state());
1336 }
1337
1338 LIRItem length(x->length(), this);
1339 length.load_item_force(FrameMap::rbx_opr);
1340
1341 LIR_Opr reg = result_register_for(x->type());
1342 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1343 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1344 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1345 LIR_Opr tmp4 = reg;
1346 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1347 LIR_Opr len = length.result();
1348 BasicType elem_type = x->elt_type();
1361 LIRItem length(x->length(), this);
1362 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1363 // and therefore provide the state before the parameters have been consumed
1364 CodeEmitInfo* patching_info = nullptr;
1365 if (!x->klass()->is_loaded() || PatchALot) {
1366 patching_info = state_for(x, x->state_before());
1367 }
1368
1369 CodeEmitInfo* info = state_for(x, x->state());
1370
1371 const LIR_Opr reg = result_register_for(x->type());
1372 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1373 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1374 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1375 LIR_Opr tmp4 = reg;
1376 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1377
1378 length.load_item_force(FrameMap::rbx_opr);
1379 LIR_Opr len = length.result();
1380
1381 ciKlass* obj = (ciKlass*) x->exact_type();
1382 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1383 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1384 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1385 }
1386 klass2reg_with_patching(klass_reg, obj, patching_info);
1387 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1388
1389 LIR_Opr result = rlock_result(x);
1390 __ move(reg, result);
1391 }
1392
1393
1394 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1395 Values* dims = x->dims();
1396 int i = dims->length();
1397 LIRItemList* items = new LIRItemList(i, i, nullptr);
1398 while (i-- > 0) {
1399 LIRItem* size = new LIRItem(dims->at(i), this);
1400 items->at_put(i, size);
1401 }
1402
1403 // Evaluate state_for early since it may emit code.
1404 CodeEmitInfo* patching_info = nullptr;
1405 if (!x->klass()->is_loaded() || PatchALot) {
1406 patching_info = state_for(x, x->state_before());
1407
1446 // nothing to do for now
1447 }
1448
1449
1450 void LIRGenerator::do_CheckCast(CheckCast* x) {
1451 LIRItem obj(x->obj(), this);
1452
1453 CodeEmitInfo* patching_info = nullptr;
1454 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1455 // must do this before locking the destination register as an oop register,
1456 // and before the obj is loaded (the latter is for deoptimization)
1457 patching_info = state_for(x, x->state_before());
1458 }
1459 obj.load_item();
1460
1461 // info for exceptions
1462 CodeEmitInfo* info_for_exception =
1463 (x->needs_exception_state() ? state_for(x) :
1464 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1465
1466 if (x->is_null_free()) {
1467 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1468 }
1469
1470 CodeStub* stub;
1471 if (x->is_incompatible_class_change_check()) {
1472 assert(patching_info == nullptr, "can't patch this");
1473 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1474 } else if (x->is_invokespecial_receiver_check()) {
1475 assert(patching_info == nullptr, "can't patch this");
1476 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1477 } else {
1478 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1479 }
1480 LIR_Opr reg = rlock_result(x);
1481 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1482 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1483 tmp3 = new_register(objectType);
1484 }
1485 __ checkcast(reg, obj.result(), x->klass(),
1486 new_register(objectType), new_register(objectType), tmp3,
1487 x->direct_compare(), info_for_exception, patching_info, stub,
1488 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1489 }
1490
1491
1492 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1493 LIRItem obj(x->obj(), this);
1494
1495 // result and test object may not be in same register
1496 LIR_Opr reg = rlock_result(x);
1497 CodeEmitInfo* patching_info = nullptr;
1498 if ((!x->klass()->is_loaded() || PatchALot)) {
1499 // must do this before locking the destination register as an oop register
1500 patching_info = state_for(x, x->state_before());
1501 }
1502 obj.load_item();
1503 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1504 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1505 tmp3 = new_register(objectType);
1506 }
1507 __ instanceof(reg, obj.result(), x->klass(),
1508 new_register(objectType), new_register(objectType), tmp3,
1524
1525 LIRItem xitem(x->x(), this);
1526 LIRItem yitem(x->y(), this);
1527 LIRItem* xin = &xitem;
1528 LIRItem* yin = &yitem;
1529
1530 if (tag == longTag) {
1531 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1532 // mirror for other conditions
1533 if (cond == If::gtr || cond == If::leq) {
1534 cond = Instruction::mirror(cond);
1535 xin = &yitem;
1536 yin = &xitem;
1537 }
1538 xin->set_destroys_register();
1539 }
1540 xin->load_item();
1541 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1542 // inline long zero
1543 yin->dont_load_item();
1544 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1545 // longs cannot handle constants at right side
1546 yin->load_item();
1547 } else {
1548 yin->dont_load_item();
1549 }
1550
1551 LIR_Opr left = xin->result();
1552 LIR_Opr right = yin->result();
1553
1554 set_no_result(x);
1555
1556 // add safepoint before generating condition code so it can be recomputed
1557 if (x->is_safepoint()) {
1558 // increment backedge counter if needed
1559 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1560 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1561 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1562 }
1563
1564 if (x->substitutability_check()) {
1565 substitutability_check(x, *xin, *yin);
1566 } else {
1567 __ cmp(lir_cond(cond), left, right);
1568 }
1569 // Generate branch profiling. Profiling code doesn't kill flags.
1570 profile_branch(x, cond);
1571 move_to_phi(x->state());
1572 if (x->x()->type()->is_float_kind()) {
1573 __ branch(lir_cond(cond), x->tsux(), x->usux());
1574 } else {
1575 __ branch(lir_cond(cond), x->tsux());
1576 }
1577 assert(x->default_sux() == x->fsux(), "wrong destination above");
1578 __ jump(x->default_sux());
1579 }
1580
1581
1582 LIR_Opr LIRGenerator::getThreadPointer() {
1583 #ifdef _LP64
1584 return FrameMap::as_pointer_opr(r15_thread);
1585 #else
1586 LIR_Opr result = new_register(T_INT);
1587 __ get_thread(result);
1588 return result;
|