14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
99 #else
100 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
101 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
102 #endif // _LP64
103 case addressTag:
104 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105 }
106
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 LIR_Opr reg = new_register(T_INT);
114 set_vreg_flag(reg, LIRGenerator::byte_reg);
115 return reg;
116 }
117
118
119 //--------- loading items into registers --------------------------------
120
121
122 // i486 instructions can inline constants
123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
124 if (type == T_SHORT || type == T_CHAR) {
125 return false;
126 }
127 Constant* c = v->as_Constant();
128 if (c && c->state_before() == nullptr) {
129 // constants of any type can be stored directly, except for
130 // unloaded object constants.
131 return true;
132 }
133 return false;
134 }
135
136
137 bool LIRGenerator::can_inline_as_constant(Value v) const {
138 if (v->type()->tag() == longTag) return false;
294 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
295 LIR_Opr tmp1 = new_register(objectType);
296 LIR_Opr tmp2 = new_register(objectType);
297 LIR_Opr tmp3 = new_register(objectType);
298 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
299 }
300
301 //----------------------------------------------------------------------
302 // visitor functions
303 //----------------------------------------------------------------------
304
305 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
306 assert(x->is_pinned(),"");
307 LIRItem obj(x->obj(), this);
308 obj.load_item();
309
310 set_no_result(x);
311
312 // "lock" stores the address of the monitor stack slot, so this is not an oop
313 LIR_Opr lock = new_register(T_INT);
314
315 CodeEmitInfo* info_for_exception = nullptr;
316 if (x->needs_null_check()) {
317 info_for_exception = state_for(x);
318 }
319 // this CodeEmitInfo must not have the xhandlers because here the
320 // object is already locked (xhandlers expect object to be unlocked)
321 CodeEmitInfo* info = state_for(x, x->state(), true);
322 LIR_Opr tmp = LockingMode == LM_LIGHTWEIGHT ? new_register(T_ADDRESS) : LIR_OprFact::illegalOpr;
323 monitor_enter(obj.result(), lock, syncTempOpr(), tmp,
324 x->monitor_no(), info_for_exception, info);
325 }
326
327
328 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
329 assert(x->is_pinned(),"");
330
331 LIRItem obj(x->obj(), this);
332 obj.dont_load_item();
333
334 LIR_Opr lock = new_register(T_INT);
335 LIR_Opr obj_temp = new_register(T_INT);
336 set_no_result(x);
337 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
338 }
339
340 // _ineg, _lneg, _fneg, _dneg
341 void LIRGenerator::do_NegateOp(NegateOp* x) {
342 LIRItem value(x->x(), this);
343 value.set_destroys_register();
344 value.load_item();
1293
1294 if (needs_stub) {
1295 stub = new ConversionStub(x->op(), conv_input, conv_result);
1296 }
1297
1298 __ convert(x->op(), conv_input, conv_result, stub);
1299
1300 if (result != conv_result) {
1301 __ move(conv_result, result);
1302 }
1303
1304 assert(result->is_virtual(), "result must be virtual register");
1305 set_result(x, result);
1306 #endif // _LP64
1307 }
1308
1309
1310 void LIRGenerator::do_NewInstance(NewInstance* x) {
1311 print_if_not_loaded(x);
1312
1313 CodeEmitInfo* info = state_for(x, x->state());
1314 LIR_Opr reg = result_register_for(x->type());
1315 new_instance(reg, x->klass(), x->is_unresolved(),
1316 FrameMap::rcx_oop_opr,
1317 FrameMap::rdi_oop_opr,
1318 FrameMap::rsi_oop_opr,
1319 LIR_OprFact::illegalOpr,
1320 FrameMap::rdx_metadata_opr, info);
1321 LIR_Opr result = rlock_result(x);
1322 __ move(reg, result);
1323 }
1324
1325
1326 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1327 CodeEmitInfo* info = nullptr;
1328 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1329 info = state_for(x, x->state_before());
1330 info->set_force_reexecute();
1331 } else {
1332 info = state_for(x, x->state());
1333 }
1334
1335 LIRItem length(x->length(), this);
1336 length.load_item_force(FrameMap::rbx_opr);
1337
1338 LIR_Opr reg = result_register_for(x->type());
1339 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1340 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1341 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1342 LIR_Opr tmp4 = reg;
1343 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1344 LIR_Opr len = length.result();
1345 BasicType elem_type = x->elt_type();
1358 LIRItem length(x->length(), this);
1359 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1360 // and therefore provide the state before the parameters have been consumed
1361 CodeEmitInfo* patching_info = nullptr;
1362 if (!x->klass()->is_loaded() || PatchALot) {
1363 patching_info = state_for(x, x->state_before());
1364 }
1365
1366 CodeEmitInfo* info = state_for(x, x->state());
1367
1368 const LIR_Opr reg = result_register_for(x->type());
1369 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1370 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1371 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1372 LIR_Opr tmp4 = reg;
1373 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1374
1375 length.load_item_force(FrameMap::rbx_opr);
1376 LIR_Opr len = length.result();
1377
1378 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1379 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1380 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1381 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1382 }
1383 klass2reg_with_patching(klass_reg, obj, patching_info);
1384 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1385
1386 LIR_Opr result = rlock_result(x);
1387 __ move(reg, result);
1388 }
1389
1390
1391 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1392 Values* dims = x->dims();
1393 int i = dims->length();
1394 LIRItemList* items = new LIRItemList(i, i, nullptr);
1395 while (i-- > 0) {
1396 LIRItem* size = new LIRItem(dims->at(i), this);
1397 items->at_put(i, size);
1398 }
1399
1400 // Evaluate state_for early since it may emit code.
1401 CodeEmitInfo* patching_info = nullptr;
1402 if (!x->klass()->is_loaded() || PatchALot) {
1403 patching_info = state_for(x, x->state_before());
1404
1443 // nothing to do for now
1444 }
1445
1446
1447 void LIRGenerator::do_CheckCast(CheckCast* x) {
1448 LIRItem obj(x->obj(), this);
1449
1450 CodeEmitInfo* patching_info = nullptr;
1451 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1452 // must do this before locking the destination register as an oop register,
1453 // and before the obj is loaded (the latter is for deoptimization)
1454 patching_info = state_for(x, x->state_before());
1455 }
1456 obj.load_item();
1457
1458 // info for exceptions
1459 CodeEmitInfo* info_for_exception =
1460 (x->needs_exception_state() ? state_for(x) :
1461 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1462
1463 CodeStub* stub;
1464 if (x->is_incompatible_class_change_check()) {
1465 assert(patching_info == nullptr, "can't patch this");
1466 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1467 } else if (x->is_invokespecial_receiver_check()) {
1468 assert(patching_info == nullptr, "can't patch this");
1469 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1470 } else {
1471 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1472 }
1473 LIR_Opr reg = rlock_result(x);
1474 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1475 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1476 tmp3 = new_register(objectType);
1477 }
1478 __ checkcast(reg, obj.result(), x->klass(),
1479 new_register(objectType), new_register(objectType), tmp3,
1480 x->direct_compare(), info_for_exception, patching_info, stub,
1481 x->profiled_method(), x->profiled_bci());
1482 }
1483
1484
1485 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1486 LIRItem obj(x->obj(), this);
1487
1488 // result and test object may not be in same register
1489 LIR_Opr reg = rlock_result(x);
1490 CodeEmitInfo* patching_info = nullptr;
1491 if ((!x->klass()->is_loaded() || PatchALot)) {
1492 // must do this before locking the destination register as an oop register
1493 patching_info = state_for(x, x->state_before());
1494 }
1495 obj.load_item();
1496 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1497 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1498 tmp3 = new_register(objectType);
1499 }
1500 __ instanceof(reg, obj.result(), x->klass(),
1501 new_register(objectType), new_register(objectType), tmp3,
1512
1513 LIRItem xitem(x->x(), this);
1514 LIRItem yitem(x->y(), this);
1515 LIRItem* xin = &xitem;
1516 LIRItem* yin = &yitem;
1517
1518 if (tag == longTag) {
1519 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1520 // mirror for other conditions
1521 if (cond == If::gtr || cond == If::leq) {
1522 cond = Instruction::mirror(cond);
1523 xin = &yitem;
1524 yin = &xitem;
1525 }
1526 xin->set_destroys_register();
1527 }
1528 xin->load_item();
1529 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1530 // inline long zero
1531 yin->dont_load_item();
1532 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1533 // longs cannot handle constants at right side
1534 yin->load_item();
1535 } else {
1536 yin->dont_load_item();
1537 }
1538
1539 LIR_Opr left = xin->result();
1540 LIR_Opr right = yin->result();
1541
1542 set_no_result(x);
1543
1544 // add safepoint before generating condition code so it can be recomputed
1545 if (x->is_safepoint()) {
1546 // increment backedge counter if needed
1547 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1548 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1549 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1550 }
1551
1552 __ cmp(lir_cond(cond), left, right);
1553 // Generate branch profiling. Profiling code doesn't kill flags.
1554 profile_branch(x, cond);
1555 move_to_phi(x->state());
1556 if (x->x()->type()->is_float_kind()) {
1557 __ branch(lir_cond(cond), x->tsux(), x->usux());
1558 } else {
1559 __ branch(lir_cond(cond), x->tsux());
1560 }
1561 assert(x->default_sux() == x->fsux(), "wrong destination above");
1562 __ jump(x->default_sux());
1563 }
1564
1565
1566 LIR_Opr LIRGenerator::getThreadPointer() {
1567 #ifdef _LP64
1568 return FrameMap::as_pointer_opr(r15_thread);
1569 #else
1570 LIR_Opr result = new_register(T_INT);
1571 __ get_thread(result);
1572 return result;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciInlineKlass.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "gc/shared/c1/barrierSetC1.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_x86.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 LIR_Opr res = result();
53
54 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
100 #else
101 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
102 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
103 #endif // _LP64
104 case addressTag:
105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
106 }
107
108 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
109 return opr;
110 }
111
112
113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
114 LIR_Opr reg = new_register(T_INT);
115 set_vreg_flag(reg, LIRGenerator::byte_reg);
116 return reg;
117 }
118
119
120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
121 // We just need one 32-bit temp register for x86/x64, to check whether both
122 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
123 // @temp = %r10d
124 // mov $0x405, %r10d
125 // and (%left), %r10d /* if need to check left */
126 // and (%right), %r10d /* if need to check right */
127 // cmp $0x405, $r10d
128 // jne L_oops_not_equal
129 tmp1 = new_register(T_INT);
130 tmp2 = LIR_OprFact::illegalOpr;
131 }
132
133 //--------- loading items into registers --------------------------------
134
135
136 // i486 instructions can inline constants
137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
138 if (type == T_SHORT || type == T_CHAR) {
139 return false;
140 }
141 Constant* c = v->as_Constant();
142 if (c && c->state_before() == nullptr) {
143 // constants of any type can be stored directly, except for
144 // unloaded object constants.
145 return true;
146 }
147 return false;
148 }
149
150
151 bool LIRGenerator::can_inline_as_constant(Value v) const {
152 if (v->type()->tag() == longTag) return false;
308 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
309 LIR_Opr tmp1 = new_register(objectType);
310 LIR_Opr tmp2 = new_register(objectType);
311 LIR_Opr tmp3 = new_register(objectType);
312 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
313 }
314
315 //----------------------------------------------------------------------
316 // visitor functions
317 //----------------------------------------------------------------------
318
319 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
320 assert(x->is_pinned(),"");
321 LIRItem obj(x->obj(), this);
322 obj.load_item();
323
324 set_no_result(x);
325
326 // "lock" stores the address of the monitor stack slot, so this is not an oop
327 LIR_Opr lock = new_register(T_INT);
328 // Need a scratch register for inline types on x86
329 LIR_Opr scratch = LIR_OprFact::illegalOpr;
330 if ((LockingMode == LM_LIGHTWEIGHT) ||
331 (EnableValhalla && x->maybe_inlinetype())) {
332 scratch = new_register(T_ADDRESS);
333 }
334
335 CodeEmitInfo* info_for_exception = nullptr;
336 if (x->needs_null_check()) {
337 info_for_exception = state_for(x);
338 }
339
340 CodeStub* throw_ie_stub = x->maybe_inlinetype() ?
341 new SimpleExceptionStub(Runtime1::throw_identity_exception_id,
342 obj.result(), state_for(x))
343 : nullptr;
344
345 // this CodeEmitInfo must not have the xhandlers because here the
346 // object is already locked (xhandlers expect object to be unlocked)
347 CodeEmitInfo* info = state_for(x, x->state(), true);
348 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
349 x->monitor_no(), info_for_exception, info, throw_ie_stub);
350 }
351
352
353 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
354 assert(x->is_pinned(),"");
355
356 LIRItem obj(x->obj(), this);
357 obj.dont_load_item();
358
359 LIR_Opr lock = new_register(T_INT);
360 LIR_Opr obj_temp = new_register(T_INT);
361 set_no_result(x);
362 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
363 }
364
365 // _ineg, _lneg, _fneg, _dneg
366 void LIRGenerator::do_NegateOp(NegateOp* x) {
367 LIRItem value(x->x(), this);
368 value.set_destroys_register();
369 value.load_item();
1318
1319 if (needs_stub) {
1320 stub = new ConversionStub(x->op(), conv_input, conv_result);
1321 }
1322
1323 __ convert(x->op(), conv_input, conv_result, stub);
1324
1325 if (result != conv_result) {
1326 __ move(conv_result, result);
1327 }
1328
1329 assert(result->is_virtual(), "result must be virtual register");
1330 set_result(x, result);
1331 #endif // _LP64
1332 }
1333
1334
1335 void LIRGenerator::do_NewInstance(NewInstance* x) {
1336 print_if_not_loaded(x);
1337
1338 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1339 LIR_Opr reg = result_register_for(x->type());
1340 new_instance(reg, x->klass(), x->is_unresolved(),
1341 !x->is_unresolved() && x->klass()->is_inlinetype(),
1342 FrameMap::rcx_oop_opr,
1343 FrameMap::rdi_oop_opr,
1344 FrameMap::rsi_oop_opr,
1345 LIR_OprFact::illegalOpr,
1346 FrameMap::rdx_metadata_opr, info);
1347 LIR_Opr result = rlock_result(x);
1348 __ move(reg, result);
1349 }
1350
1351 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1352 CodeEmitInfo* info = nullptr;
1353 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1354 info = state_for(x, x->state_before());
1355 info->set_force_reexecute();
1356 } else {
1357 info = state_for(x, x->state());
1358 }
1359
1360 LIRItem length(x->length(), this);
1361 length.load_item_force(FrameMap::rbx_opr);
1362
1363 LIR_Opr reg = result_register_for(x->type());
1364 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1365 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1366 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1367 LIR_Opr tmp4 = reg;
1368 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1369 LIR_Opr len = length.result();
1370 BasicType elem_type = x->elt_type();
1383 LIRItem length(x->length(), this);
1384 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1385 // and therefore provide the state before the parameters have been consumed
1386 CodeEmitInfo* patching_info = nullptr;
1387 if (!x->klass()->is_loaded() || PatchALot) {
1388 patching_info = state_for(x, x->state_before());
1389 }
1390
1391 CodeEmitInfo* info = state_for(x, x->state());
1392
1393 const LIR_Opr reg = result_register_for(x->type());
1394 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1395 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1396 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1397 LIR_Opr tmp4 = reg;
1398 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1399
1400 length.load_item_force(FrameMap::rbx_opr);
1401 LIR_Opr len = length.result();
1402
1403 ciKlass* obj = (ciKlass*) x->exact_type();
1404 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1405 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1406 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1407 }
1408 klass2reg_with_patching(klass_reg, obj, patching_info);
1409 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1410
1411 LIR_Opr result = rlock_result(x);
1412 __ move(reg, result);
1413 }
1414
1415
1416 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1417 Values* dims = x->dims();
1418 int i = dims->length();
1419 LIRItemList* items = new LIRItemList(i, i, nullptr);
1420 while (i-- > 0) {
1421 LIRItem* size = new LIRItem(dims->at(i), this);
1422 items->at_put(i, size);
1423 }
1424
1425 // Evaluate state_for early since it may emit code.
1426 CodeEmitInfo* patching_info = nullptr;
1427 if (!x->klass()->is_loaded() || PatchALot) {
1428 patching_info = state_for(x, x->state_before());
1429
1468 // nothing to do for now
1469 }
1470
1471
1472 void LIRGenerator::do_CheckCast(CheckCast* x) {
1473 LIRItem obj(x->obj(), this);
1474
1475 CodeEmitInfo* patching_info = nullptr;
1476 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1477 // must do this before locking the destination register as an oop register,
1478 // and before the obj is loaded (the latter is for deoptimization)
1479 patching_info = state_for(x, x->state_before());
1480 }
1481 obj.load_item();
1482
1483 // info for exceptions
1484 CodeEmitInfo* info_for_exception =
1485 (x->needs_exception_state() ? state_for(x) :
1486 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1487
1488 if (x->is_null_free()) {
1489 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1490 }
1491
1492 CodeStub* stub;
1493 if (x->is_incompatible_class_change_check()) {
1494 assert(patching_info == nullptr, "can't patch this");
1495 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1496 } else if (x->is_invokespecial_receiver_check()) {
1497 assert(patching_info == nullptr, "can't patch this");
1498 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1499 } else {
1500 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1501 }
1502 LIR_Opr reg = rlock_result(x);
1503 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1504 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1505 tmp3 = new_register(objectType);
1506 }
1507 __ checkcast(reg, obj.result(), x->klass(),
1508 new_register(objectType), new_register(objectType), tmp3,
1509 x->direct_compare(), info_for_exception, patching_info, stub,
1510 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1511 }
1512
1513
1514 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1515 LIRItem obj(x->obj(), this);
1516
1517 // result and test object may not be in same register
1518 LIR_Opr reg = rlock_result(x);
1519 CodeEmitInfo* patching_info = nullptr;
1520 if ((!x->klass()->is_loaded() || PatchALot)) {
1521 // must do this before locking the destination register as an oop register
1522 patching_info = state_for(x, x->state_before());
1523 }
1524 obj.load_item();
1525 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1526 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1527 tmp3 = new_register(objectType);
1528 }
1529 __ instanceof(reg, obj.result(), x->klass(),
1530 new_register(objectType), new_register(objectType), tmp3,
1541
1542 LIRItem xitem(x->x(), this);
1543 LIRItem yitem(x->y(), this);
1544 LIRItem* xin = &xitem;
1545 LIRItem* yin = &yitem;
1546
1547 if (tag == longTag) {
1548 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1549 // mirror for other conditions
1550 if (cond == If::gtr || cond == If::leq) {
1551 cond = Instruction::mirror(cond);
1552 xin = &yitem;
1553 yin = &xitem;
1554 }
1555 xin->set_destroys_register();
1556 }
1557 xin->load_item();
1558 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1559 // inline long zero
1560 yin->dont_load_item();
1561 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1562 // longs cannot handle constants at right side
1563 yin->load_item();
1564 } else {
1565 yin->dont_load_item();
1566 }
1567
1568 LIR_Opr left = xin->result();
1569 LIR_Opr right = yin->result();
1570
1571 set_no_result(x);
1572
1573 // add safepoint before generating condition code so it can be recomputed
1574 if (x->is_safepoint()) {
1575 // increment backedge counter if needed
1576 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1577 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1578 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1579 }
1580
1581 if (x->substitutability_check()) {
1582 substitutability_check(x, *xin, *yin);
1583 } else {
1584 __ cmp(lir_cond(cond), left, right);
1585 }
1586 // Generate branch profiling. Profiling code doesn't kill flags.
1587 profile_branch(x, cond);
1588 move_to_phi(x->state());
1589 if (x->x()->type()->is_float_kind()) {
1590 __ branch(lir_cond(cond), x->tsux(), x->usux());
1591 } else {
1592 __ branch(lir_cond(cond), x->tsux());
1593 }
1594 assert(x->default_sux() == x->fsux(), "wrong destination above");
1595 __ jump(x->default_sux());
1596 }
1597
1598
1599 LIR_Opr LIRGenerator::getThreadPointer() {
1600 #ifdef _LP64
1601 return FrameMap::as_pointer_opr(r15_thread);
1602 #else
1603 LIR_Opr result = new_register(T_INT);
1604 __ get_thread(result);
1605 return result;
|