14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
99 #else
100 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
101 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
102 #endif // _LP64
103 case addressTag:
104 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105 }
106
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 LIR_Opr reg = new_register(T_INT);
114 set_vreg_flag(reg, LIRGenerator::byte_reg);
115 return reg;
116 }
117
118
119 //--------- loading items into registers --------------------------------
120
121
122 // i486 instructions can inline constants
123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
124 if (type == T_SHORT || type == T_CHAR) {
125 return false;
126 }
127 Constant* c = v->as_Constant();
128 if (c && c->state_before() == nullptr) {
129 // constants of any type can be stored directly, except for
130 // unloaded object constants.
131 return true;
132 }
133 return false;
134 }
135
136
137 bool LIRGenerator::can_inline_as_constant(Value v) const {
138 if (v->type()->tag() == longTag) return false;
294 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
295 LIR_Opr tmp1 = new_register(objectType);
296 LIR_Opr tmp2 = new_register(objectType);
297 LIR_Opr tmp3 = new_register(objectType);
298 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
299 }
300
301 //----------------------------------------------------------------------
302 // visitor functions
303 //----------------------------------------------------------------------
304
305 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
306 assert(x->is_pinned(),"");
307 LIRItem obj(x->obj(), this);
308 obj.load_item();
309
310 set_no_result(x);
311
312 // "lock" stores the address of the monitor stack slot, so this is not an oop
313 LIR_Opr lock = new_register(T_INT);
314
315 CodeEmitInfo* info_for_exception = nullptr;
316 if (x->needs_null_check()) {
317 info_for_exception = state_for(x);
318 }
319 // this CodeEmitInfo must not have the xhandlers because here the
320 // object is already locked (xhandlers expect object to be unlocked)
321 CodeEmitInfo* info = state_for(x, x->state(), true);
322 LIR_Opr tmp = LockingMode == LM_LIGHTWEIGHT ? new_register(T_ADDRESS) : LIR_OprFact::illegalOpr;
323 monitor_enter(obj.result(), lock, syncTempOpr(), tmp,
324 x->monitor_no(), info_for_exception, info);
325 }
326
327
328 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
329 assert(x->is_pinned(),"");
330
331 LIRItem obj(x->obj(), this);
332 obj.dont_load_item();
333
334 LIR_Opr lock = new_register(T_INT);
335 LIR_Opr obj_temp = new_register(T_INT);
336 set_no_result(x);
337 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
338 }
339
340 // _ineg, _lneg, _fneg, _dneg
341 void LIRGenerator::do_NegateOp(NegateOp* x) {
342 LIRItem value(x->x(), this);
343 value.set_destroys_register();
344 value.load_item();
1277
1278 if (needs_stub) {
1279 stub = new ConversionStub(x->op(), conv_input, conv_result);
1280 }
1281
1282 __ convert(x->op(), conv_input, conv_result, stub);
1283
1284 if (result != conv_result) {
1285 __ move(conv_result, result);
1286 }
1287
1288 assert(result->is_virtual(), "result must be virtual register");
1289 set_result(x, result);
1290 #endif // _LP64
1291 }
1292
1293
1294 void LIRGenerator::do_NewInstance(NewInstance* x) {
1295 print_if_not_loaded(x);
1296
1297 CodeEmitInfo* info = state_for(x, x->state());
1298 LIR_Opr reg = result_register_for(x->type());
1299 new_instance(reg, x->klass(), x->is_unresolved(),
1300 FrameMap::rcx_oop_opr,
1301 FrameMap::rdi_oop_opr,
1302 FrameMap::rsi_oop_opr,
1303 LIR_OprFact::illegalOpr,
1304 FrameMap::rdx_metadata_opr, info);
1305 LIR_Opr result = rlock_result(x);
1306 __ move(reg, result);
1307 }
1308
1309
1310 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1311 CodeEmitInfo* info = state_for(x, x->state());
1312
1313 LIRItem length(x->length(), this);
1314 length.load_item_force(FrameMap::rbx_opr);
1315
1316 LIR_Opr reg = result_register_for(x->type());
1317 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1318 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1319 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1320 LIR_Opr tmp4 = reg;
1321 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1322 LIR_Opr len = length.result();
1323 BasicType elem_type = x->elt_type();
1324
1325 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1326
1327 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1328 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1329
1330 LIR_Opr result = rlock_result(x);
1331 __ move(reg, result);
1332 }
1333
1334
1335 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1336 LIRItem length(x->length(), this);
1337 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1338 // and therefore provide the state before the parameters have been consumed
1339 CodeEmitInfo* patching_info = nullptr;
1340 if (!x->klass()->is_loaded() || PatchALot) {
1341 patching_info = state_for(x, x->state_before());
1342 }
1343
1344 CodeEmitInfo* info = state_for(x, x->state());
1345
1346 const LIR_Opr reg = result_register_for(x->type());
1347 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1348 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1349 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1350 LIR_Opr tmp4 = reg;
1351 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1352
1353 length.load_item_force(FrameMap::rbx_opr);
1354 LIR_Opr len = length.result();
1355
1356 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1357 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1358 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1359 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1360 }
1361 klass2reg_with_patching(klass_reg, obj, patching_info);
1362 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1363
1364 LIR_Opr result = rlock_result(x);
1365 __ move(reg, result);
1366 }
1367
1368
1369 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1370 Values* dims = x->dims();
1371 int i = dims->length();
1372 LIRItemList* items = new LIRItemList(i, i, nullptr);
1373 while (i-- > 0) {
1374 LIRItem* size = new LIRItem(dims->at(i), this);
1375 items->at_put(i, size);
1376 }
1377
1378 // Evaluate state_for early since it may emit code.
1379 CodeEmitInfo* patching_info = nullptr;
1380 if (!x->klass()->is_loaded() || PatchALot) {
1381 patching_info = state_for(x, x->state_before());
1382
1421 // nothing to do for now
1422 }
1423
1424
1425 void LIRGenerator::do_CheckCast(CheckCast* x) {
1426 LIRItem obj(x->obj(), this);
1427
1428 CodeEmitInfo* patching_info = nullptr;
1429 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1430 // must do this before locking the destination register as an oop register,
1431 // and before the obj is loaded (the latter is for deoptimization)
1432 patching_info = state_for(x, x->state_before());
1433 }
1434 obj.load_item();
1435
1436 // info for exceptions
1437 CodeEmitInfo* info_for_exception =
1438 (x->needs_exception_state() ? state_for(x) :
1439 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1440
1441 CodeStub* stub;
1442 if (x->is_incompatible_class_change_check()) {
1443 assert(patching_info == nullptr, "can't patch this");
1444 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1445 } else if (x->is_invokespecial_receiver_check()) {
1446 assert(patching_info == nullptr, "can't patch this");
1447 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1448 } else {
1449 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1450 }
1451 LIR_Opr reg = rlock_result(x);
1452 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1453 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1454 tmp3 = new_register(objectType);
1455 }
1456 __ checkcast(reg, obj.result(), x->klass(),
1457 new_register(objectType), new_register(objectType), tmp3,
1458 x->direct_compare(), info_for_exception, patching_info, stub,
1459 x->profiled_method(), x->profiled_bci());
1460 }
1461
1462
1463 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1464 LIRItem obj(x->obj(), this);
1465
1466 // result and test object may not be in same register
1467 LIR_Opr reg = rlock_result(x);
1468 CodeEmitInfo* patching_info = nullptr;
1469 if ((!x->klass()->is_loaded() || PatchALot)) {
1470 // must do this before locking the destination register as an oop register
1471 patching_info = state_for(x, x->state_before());
1472 }
1473 obj.load_item();
1474 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1475 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1476 tmp3 = new_register(objectType);
1477 }
1478 __ instanceof(reg, obj.result(), x->klass(),
1479 new_register(objectType), new_register(objectType), tmp3,
1490
1491 LIRItem xitem(x->x(), this);
1492 LIRItem yitem(x->y(), this);
1493 LIRItem* xin = &xitem;
1494 LIRItem* yin = &yitem;
1495
1496 if (tag == longTag) {
1497 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1498 // mirror for other conditions
1499 if (cond == If::gtr || cond == If::leq) {
1500 cond = Instruction::mirror(cond);
1501 xin = &yitem;
1502 yin = &xitem;
1503 }
1504 xin->set_destroys_register();
1505 }
1506 xin->load_item();
1507 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1508 // inline long zero
1509 yin->dont_load_item();
1510 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1511 // longs cannot handle constants at right side
1512 yin->load_item();
1513 } else {
1514 yin->dont_load_item();
1515 }
1516
1517 LIR_Opr left = xin->result();
1518 LIR_Opr right = yin->result();
1519
1520 set_no_result(x);
1521
1522 // add safepoint before generating condition code so it can be recomputed
1523 if (x->is_safepoint()) {
1524 // increment backedge counter if needed
1525 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1526 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1527 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1528 }
1529
1530 __ cmp(lir_cond(cond), left, right);
1531 // Generate branch profiling. Profiling code doesn't kill flags.
1532 profile_branch(x, cond);
1533 move_to_phi(x->state());
1534 if (x->x()->type()->is_float_kind()) {
1535 __ branch(lir_cond(cond), x->tsux(), x->usux());
1536 } else {
1537 __ branch(lir_cond(cond), x->tsux());
1538 }
1539 assert(x->default_sux() == x->fsux(), "wrong destination above");
1540 __ jump(x->default_sux());
1541 }
1542
1543
1544 LIR_Opr LIRGenerator::getThreadPointer() {
1545 #ifdef _LP64
1546 return FrameMap::as_pointer_opr(r15_thread);
1547 #else
1548 LIR_Opr result = new_register(T_INT);
1549 __ get_thread(result);
1550 return result;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciInlineKlass.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "gc/shared/c1/barrierSetC1.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_x86.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 LIR_Opr res = result();
53
54 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
100 #else
101 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
102 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
103 #endif // _LP64
104 case addressTag:
105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
106 }
107
108 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
109 return opr;
110 }
111
112
113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
114 LIR_Opr reg = new_register(T_INT);
115 set_vreg_flag(reg, LIRGenerator::byte_reg);
116 return reg;
117 }
118
119
120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
121 // We just need one 32-bit temp register for x86/x64, to check whether both
122 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
123 // @temp = %r10d
124 // mov $0x405, %r10d
125 // and (%left), %r10d /* if need to check left */
126 // and (%right), %r10d /* if need to check right */
127 // cmp $0x405, $r10d
128 // jne L_oops_not_equal
129 tmp1 = new_register(T_INT);
130 tmp2 = LIR_OprFact::illegalOpr;
131 }
132
133 //--------- loading items into registers --------------------------------
134
135
136 // i486 instructions can inline constants
137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
138 if (type == T_SHORT || type == T_CHAR) {
139 return false;
140 }
141 Constant* c = v->as_Constant();
142 if (c && c->state_before() == nullptr) {
143 // constants of any type can be stored directly, except for
144 // unloaded object constants.
145 return true;
146 }
147 return false;
148 }
149
150
151 bool LIRGenerator::can_inline_as_constant(Value v) const {
152 if (v->type()->tag() == longTag) return false;
308 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
309 LIR_Opr tmp1 = new_register(objectType);
310 LIR_Opr tmp2 = new_register(objectType);
311 LIR_Opr tmp3 = new_register(objectType);
312 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
313 }
314
315 //----------------------------------------------------------------------
316 // visitor functions
317 //----------------------------------------------------------------------
318
319 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
320 assert(x->is_pinned(),"");
321 LIRItem obj(x->obj(), this);
322 obj.load_item();
323
324 set_no_result(x);
325
326 // "lock" stores the address of the monitor stack slot, so this is not an oop
327 LIR_Opr lock = new_register(T_INT);
328 // Need a scratch register for inline types on x86
329 LIR_Opr scratch = LIR_OprFact::illegalOpr;
330 if ((LockingMode == LM_LIGHTWEIGHT) ||
331 (EnableValhalla && x->maybe_inlinetype())) {
332 scratch = new_register(T_ADDRESS);
333 }
334
335 CodeEmitInfo* info_for_exception = nullptr;
336 if (x->needs_null_check()) {
337 info_for_exception = state_for(x);
338 }
339
340 CodeStub* throw_imse_stub = x->maybe_inlinetype() ?
341 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id,
342 LIR_OprFact::illegalOpr, state_for(x))
343 : nullptr;
344
345 // this CodeEmitInfo must not have the xhandlers because here the
346 // object is already locked (xhandlers expect object to be unlocked)
347 CodeEmitInfo* info = state_for(x, x->state(), true);
348 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
349 x->monitor_no(), info_for_exception, info, throw_imse_stub);
350 }
351
352
353 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
354 assert(x->is_pinned(),"");
355
356 LIRItem obj(x->obj(), this);
357 obj.dont_load_item();
358
359 LIR_Opr lock = new_register(T_INT);
360 LIR_Opr obj_temp = new_register(T_INT);
361 set_no_result(x);
362 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
363 }
364
365 // _ineg, _lneg, _fneg, _dneg
366 void LIRGenerator::do_NegateOp(NegateOp* x) {
367 LIRItem value(x->x(), this);
368 value.set_destroys_register();
369 value.load_item();
1302
1303 if (needs_stub) {
1304 stub = new ConversionStub(x->op(), conv_input, conv_result);
1305 }
1306
1307 __ convert(x->op(), conv_input, conv_result, stub);
1308
1309 if (result != conv_result) {
1310 __ move(conv_result, result);
1311 }
1312
1313 assert(result->is_virtual(), "result must be virtual register");
1314 set_result(x, result);
1315 #endif // _LP64
1316 }
1317
1318
1319 void LIRGenerator::do_NewInstance(NewInstance* x) {
1320 print_if_not_loaded(x);
1321
1322 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1323 LIR_Opr reg = result_register_for(x->type());
1324 new_instance(reg, x->klass(), x->is_unresolved(),
1325 !x->is_unresolved() && x->klass()->is_inlinetype(),
1326 FrameMap::rcx_oop_opr,
1327 FrameMap::rdi_oop_opr,
1328 FrameMap::rsi_oop_opr,
1329 LIR_OprFact::illegalOpr,
1330 FrameMap::rdx_metadata_opr, info);
1331 LIR_Opr result = rlock_result(x);
1332 __ move(reg, result);
1333 }
1334
1335 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1336 CodeEmitInfo* info = state_for(x, x->state());
1337
1338 LIRItem length(x->length(), this);
1339 length.load_item_force(FrameMap::rbx_opr);
1340
1341 LIR_Opr reg = result_register_for(x->type());
1342 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1343 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1344 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1345 LIR_Opr tmp4 = reg;
1346 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1347 LIR_Opr len = length.result();
1348 BasicType elem_type = x->elt_type();
1349
1350 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1351
1352 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1353 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, false);
1354
1355 LIR_Opr result = rlock_result(x);
1356 __ move(reg, result);
1357 }
1358
1359
1360 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1361 LIRItem length(x->length(), this);
1362 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1363 // and therefore provide the state before the parameters have been consumed
1364 CodeEmitInfo* patching_info = nullptr;
1365 if (!x->klass()->is_loaded() || PatchALot) {
1366 patching_info = state_for(x, x->state_before());
1367 }
1368
1369 CodeEmitInfo* info = state_for(x, x->state());
1370
1371 const LIR_Opr reg = result_register_for(x->type());
1372 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1373 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1374 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1375 LIR_Opr tmp4 = reg;
1376 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1377
1378 length.load_item_force(FrameMap::rbx_opr);
1379 LIR_Opr len = length.result();
1380
1381 ciKlass* obj = (ciKlass*) x->exact_type();
1382 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1383 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1384 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1385 }
1386 klass2reg_with_patching(klass_reg, obj, patching_info);
1387 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, x->is_null_free());
1388
1389 LIR_Opr result = rlock_result(x);
1390 __ move(reg, result);
1391 }
1392
1393
1394 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1395 Values* dims = x->dims();
1396 int i = dims->length();
1397 LIRItemList* items = new LIRItemList(i, i, nullptr);
1398 while (i-- > 0) {
1399 LIRItem* size = new LIRItem(dims->at(i), this);
1400 items->at_put(i, size);
1401 }
1402
1403 // Evaluate state_for early since it may emit code.
1404 CodeEmitInfo* patching_info = nullptr;
1405 if (!x->klass()->is_loaded() || PatchALot) {
1406 patching_info = state_for(x, x->state_before());
1407
1446 // nothing to do for now
1447 }
1448
1449
1450 void LIRGenerator::do_CheckCast(CheckCast* x) {
1451 LIRItem obj(x->obj(), this);
1452
1453 CodeEmitInfo* patching_info = nullptr;
1454 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1455 // must do this before locking the destination register as an oop register,
1456 // and before the obj is loaded (the latter is for deoptimization)
1457 patching_info = state_for(x, x->state_before());
1458 }
1459 obj.load_item();
1460
1461 // info for exceptions
1462 CodeEmitInfo* info_for_exception =
1463 (x->needs_exception_state() ? state_for(x) :
1464 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1465
1466 if (x->is_null_free()) {
1467 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1468 }
1469
1470 CodeStub* stub;
1471 if (x->is_incompatible_class_change_check()) {
1472 assert(patching_info == nullptr, "can't patch this");
1473 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1474 } else if (x->is_invokespecial_receiver_check()) {
1475 assert(patching_info == nullptr, "can't patch this");
1476 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1477 } else {
1478 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1479 }
1480 LIR_Opr reg = rlock_result(x);
1481 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1482 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1483 tmp3 = new_register(objectType);
1484 }
1485 __ checkcast(reg, obj.result(), x->klass(),
1486 new_register(objectType), new_register(objectType), tmp3,
1487 x->direct_compare(), info_for_exception, patching_info, stub,
1488 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1489 }
1490
1491
1492 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1493 LIRItem obj(x->obj(), this);
1494
1495 // result and test object may not be in same register
1496 LIR_Opr reg = rlock_result(x);
1497 CodeEmitInfo* patching_info = nullptr;
1498 if ((!x->klass()->is_loaded() || PatchALot)) {
1499 // must do this before locking the destination register as an oop register
1500 patching_info = state_for(x, x->state_before());
1501 }
1502 obj.load_item();
1503 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1504 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1505 tmp3 = new_register(objectType);
1506 }
1507 __ instanceof(reg, obj.result(), x->klass(),
1508 new_register(objectType), new_register(objectType), tmp3,
1519
1520 LIRItem xitem(x->x(), this);
1521 LIRItem yitem(x->y(), this);
1522 LIRItem* xin = &xitem;
1523 LIRItem* yin = &yitem;
1524
1525 if (tag == longTag) {
1526 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1527 // mirror for other conditions
1528 if (cond == If::gtr || cond == If::leq) {
1529 cond = Instruction::mirror(cond);
1530 xin = &yitem;
1531 yin = &xitem;
1532 }
1533 xin->set_destroys_register();
1534 }
1535 xin->load_item();
1536 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1537 // inline long zero
1538 yin->dont_load_item();
1539 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1540 // longs cannot handle constants at right side
1541 yin->load_item();
1542 } else {
1543 yin->dont_load_item();
1544 }
1545
1546 LIR_Opr left = xin->result();
1547 LIR_Opr right = yin->result();
1548
1549 set_no_result(x);
1550
1551 // add safepoint before generating condition code so it can be recomputed
1552 if (x->is_safepoint()) {
1553 // increment backedge counter if needed
1554 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1555 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1556 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1557 }
1558
1559 if (x->substitutability_check()) {
1560 substitutability_check(x, *xin, *yin);
1561 } else {
1562 __ cmp(lir_cond(cond), left, right);
1563 }
1564 // Generate branch profiling. Profiling code doesn't kill flags.
1565 profile_branch(x, cond);
1566 move_to_phi(x->state());
1567 if (x->x()->type()->is_float_kind()) {
1568 __ branch(lir_cond(cond), x->tsux(), x->usux());
1569 } else {
1570 __ branch(lir_cond(cond), x->tsux());
1571 }
1572 assert(x->default_sux() == x->fsux(), "wrong destination above");
1573 __ jump(x->default_sux());
1574 }
1575
1576
1577 LIR_Opr LIRGenerator::getThreadPointer() {
1578 #ifdef _LP64
1579 return FrameMap::as_pointer_opr(r15_thread);
1580 #else
1581 LIR_Opr result = new_register(T_INT);
1582 __ get_thread(result);
1583 return result;
|