14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
99 #else
100 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
101 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
102 #endif // _LP64
103 case addressTag:
104 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105 }
106
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 LIR_Opr reg = new_register(T_INT);
114 set_vreg_flag(reg, LIRGenerator::byte_reg);
115 return reg;
116 }
117
118
119 //--------- loading items into registers --------------------------------
120
121
122 // i486 instructions can inline constants
123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
124 if (type == T_SHORT || type == T_CHAR) {
125 return false;
126 }
127 Constant* c = v->as_Constant();
128 if (c && c->state_before() == nullptr) {
129 // constants of any type can be stored directly, except for
130 // unloaded object constants.
131 return true;
132 }
133 return false;
134 }
135
136
137 bool LIRGenerator::can_inline_as_constant(Value v) const {
138 if (v->type()->tag() == longTag) return false;
294 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
295 LIR_Opr tmp1 = new_register(objectType);
296 LIR_Opr tmp2 = new_register(objectType);
297 LIR_Opr tmp3 = new_register(objectType);
298 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
299 }
300
301 //----------------------------------------------------------------------
302 // visitor functions
303 //----------------------------------------------------------------------
304
305 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
306 assert(x->is_pinned(),"");
307 LIRItem obj(x->obj(), this);
308 obj.load_item();
309
310 set_no_result(x);
311
312 // "lock" stores the address of the monitor stack slot, so this is not an oop
313 LIR_Opr lock = new_register(T_INT);
314
315 CodeEmitInfo* info_for_exception = nullptr;
316 if (x->needs_null_check()) {
317 info_for_exception = state_for(x);
318 }
319 // this CodeEmitInfo must not have the xhandlers because here the
320 // object is already locked (xhandlers expect object to be unlocked)
321 CodeEmitInfo* info = state_for(x, x->state(), true);
322 LIR_Opr tmp = LockingMode == LM_LIGHTWEIGHT ? new_register(T_ADDRESS) : LIR_OprFact::illegalOpr;
323 monitor_enter(obj.result(), lock, syncTempOpr(), tmp,
324 x->monitor_no(), info_for_exception, info);
325 }
326
327
328 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
329 assert(x->is_pinned(),"");
330
331 LIRItem obj(x->obj(), this);
332 obj.dont_load_item();
333
334 LIR_Opr lock = new_register(T_INT);
335 LIR_Opr obj_temp = new_register(T_INT);
336 set_no_result(x);
337 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
338 }
339
340 // _ineg, _lneg, _fneg, _dneg
341 void LIRGenerator::do_NegateOp(NegateOp* x) {
342 LIRItem value(x->x(), this);
343 value.set_destroys_register();
344 value.load_item();
1303
1304 if (needs_stub) {
1305 stub = new ConversionStub(x->op(), conv_input, conv_result);
1306 }
1307
1308 __ convert(x->op(), conv_input, conv_result, stub);
1309
1310 if (result != conv_result) {
1311 __ move(conv_result, result);
1312 }
1313
1314 assert(result->is_virtual(), "result must be virtual register");
1315 set_result(x, result);
1316 #endif // _LP64
1317 }
1318
1319
1320 void LIRGenerator::do_NewInstance(NewInstance* x) {
1321 print_if_not_loaded(x);
1322
1323 CodeEmitInfo* info = state_for(x, x->state());
1324 LIR_Opr reg = result_register_for(x->type());
1325 new_instance(reg, x->klass(), x->is_unresolved(),
1326 FrameMap::rcx_oop_opr,
1327 FrameMap::rdi_oop_opr,
1328 FrameMap::rsi_oop_opr,
1329 LIR_OprFact::illegalOpr,
1330 FrameMap::rdx_metadata_opr, info);
1331 LIR_Opr result = rlock_result(x);
1332 __ move(reg, result);
1333 }
1334
1335
1336 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1337 CodeEmitInfo* info = nullptr;
1338 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1339 info = state_for(x, x->state_before());
1340 info->set_force_reexecute();
1341 } else {
1342 info = state_for(x, x->state());
1343 }
1344
1345 LIRItem length(x->length(), this);
1346 length.load_item_force(FrameMap::rbx_opr);
1347
1348 LIR_Opr reg = result_register_for(x->type());
1349 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1350 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1351 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1352 LIR_Opr tmp4 = reg;
1353 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1354 LIR_Opr len = length.result();
1355 BasicType elem_type = x->elt_type();
1368 LIRItem length(x->length(), this);
1369 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1370 // and therefore provide the state before the parameters have been consumed
1371 CodeEmitInfo* patching_info = nullptr;
1372 if (!x->klass()->is_loaded() || PatchALot) {
1373 patching_info = state_for(x, x->state_before());
1374 }
1375
1376 CodeEmitInfo* info = state_for(x, x->state());
1377
1378 const LIR_Opr reg = result_register_for(x->type());
1379 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1380 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1381 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1382 LIR_Opr tmp4 = reg;
1383 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1384
1385 length.load_item_force(FrameMap::rbx_opr);
1386 LIR_Opr len = length.result();
1387
1388 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1389 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1390 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1391 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1392 }
1393 klass2reg_with_patching(klass_reg, obj, patching_info);
1394 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1395
1396 LIR_Opr result = rlock_result(x);
1397 __ move(reg, result);
1398 }
1399
1400
1401 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1402 Values* dims = x->dims();
1403 int i = dims->length();
1404 LIRItemList* items = new LIRItemList(i, i, nullptr);
1405 while (i-- > 0) {
1406 LIRItem* size = new LIRItem(dims->at(i), this);
1407 items->at_put(i, size);
1408 }
1409
1410 // Evaluate state_for early since it may emit code.
1411 CodeEmitInfo* patching_info = nullptr;
1412 if (!x->klass()->is_loaded() || PatchALot) {
1413 patching_info = state_for(x, x->state_before());
1414
1453 // nothing to do for now
1454 }
1455
1456
1457 void LIRGenerator::do_CheckCast(CheckCast* x) {
1458 LIRItem obj(x->obj(), this);
1459
1460 CodeEmitInfo* patching_info = nullptr;
1461 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1462 // must do this before locking the destination register as an oop register,
1463 // and before the obj is loaded (the latter is for deoptimization)
1464 patching_info = state_for(x, x->state_before());
1465 }
1466 obj.load_item();
1467
1468 // info for exceptions
1469 CodeEmitInfo* info_for_exception =
1470 (x->needs_exception_state() ? state_for(x) :
1471 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1472
1473 CodeStub* stub;
1474 if (x->is_incompatible_class_change_check()) {
1475 assert(patching_info == nullptr, "can't patch this");
1476 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1477 } else if (x->is_invokespecial_receiver_check()) {
1478 assert(patching_info == nullptr, "can't patch this");
1479 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1480 } else {
1481 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1482 }
1483 LIR_Opr reg = rlock_result(x);
1484 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1485 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1486 tmp3 = new_register(objectType);
1487 }
1488 __ checkcast(reg, obj.result(), x->klass(),
1489 new_register(objectType), new_register(objectType), tmp3,
1490 x->direct_compare(), info_for_exception, patching_info, stub,
1491 x->profiled_method(), x->profiled_bci());
1492 }
1493
1494
1495 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1496 LIRItem obj(x->obj(), this);
1497
1498 // result and test object may not be in same register
1499 LIR_Opr reg = rlock_result(x);
1500 CodeEmitInfo* patching_info = nullptr;
1501 if ((!x->klass()->is_loaded() || PatchALot)) {
1502 // must do this before locking the destination register as an oop register
1503 patching_info = state_for(x, x->state_before());
1504 }
1505 obj.load_item();
1506 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1507 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1508 tmp3 = new_register(objectType);
1509 }
1510 __ instanceof(reg, obj.result(), x->klass(),
1511 new_register(objectType), new_register(objectType), tmp3,
1522
1523 LIRItem xitem(x->x(), this);
1524 LIRItem yitem(x->y(), this);
1525 LIRItem* xin = &xitem;
1526 LIRItem* yin = &yitem;
1527
1528 if (tag == longTag) {
1529 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1530 // mirror for other conditions
1531 if (cond == If::gtr || cond == If::leq) {
1532 cond = Instruction::mirror(cond);
1533 xin = &yitem;
1534 yin = &xitem;
1535 }
1536 xin->set_destroys_register();
1537 }
1538 xin->load_item();
1539 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1540 // inline long zero
1541 yin->dont_load_item();
1542 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1543 // longs cannot handle constants at right side
1544 yin->load_item();
1545 } else {
1546 yin->dont_load_item();
1547 }
1548
1549 LIR_Opr left = xin->result();
1550 LIR_Opr right = yin->result();
1551
1552 set_no_result(x);
1553
1554 // add safepoint before generating condition code so it can be recomputed
1555 if (x->is_safepoint()) {
1556 // increment backedge counter if needed
1557 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1558 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1559 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1560 }
1561
1562 __ cmp(lir_cond(cond), left, right);
1563 // Generate branch profiling. Profiling code doesn't kill flags.
1564 profile_branch(x, cond);
1565 move_to_phi(x->state());
1566 if (x->x()->type()->is_float_kind()) {
1567 __ branch(lir_cond(cond), x->tsux(), x->usux());
1568 } else {
1569 __ branch(lir_cond(cond), x->tsux());
1570 }
1571 assert(x->default_sux() == x->fsux(), "wrong destination above");
1572 __ jump(x->default_sux());
1573 }
1574
1575
1576 LIR_Opr LIRGenerator::getThreadPointer() {
1577 #ifdef _LP64
1578 return FrameMap::as_pointer_opr(r15_thread);
1579 #else
1580 LIR_Opr result = new_register(T_INT);
1581 __ get_thread(result);
1582 return result;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciInlineKlass.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "gc/shared/c1/barrierSetC1.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_x86.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 LIR_Opr res = result();
53
54 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
100 #else
101 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
102 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
103 #endif // _LP64
104 case addressTag:
105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
106 }
107
108 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
109 return opr;
110 }
111
112
113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
114 LIR_Opr reg = new_register(T_INT);
115 set_vreg_flag(reg, LIRGenerator::byte_reg);
116 return reg;
117 }
118
119
120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
121 // We just need one 32-bit temp register for x86/x64, to check whether both
122 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
123 // @temp = %r10d
124 // mov $0x405, %r10d
125 // and (%left), %r10d /* if need to check left */
126 // and (%right), %r10d /* if need to check right */
127 // cmp $0x405, $r10d
128 // jne L_oops_not_equal
129 tmp1 = new_register(T_INT);
130 tmp2 = LIR_OprFact::illegalOpr;
131 }
132
133 //--------- loading items into registers --------------------------------
134
135
136 // i486 instructions can inline constants
137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
138 if (type == T_SHORT || type == T_CHAR) {
139 return false;
140 }
141 Constant* c = v->as_Constant();
142 if (c && c->state_before() == nullptr) {
143 // constants of any type can be stored directly, except for
144 // unloaded object constants.
145 return true;
146 }
147 return false;
148 }
149
150
151 bool LIRGenerator::can_inline_as_constant(Value v) const {
152 if (v->type()->tag() == longTag) return false;
308 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
309 LIR_Opr tmp1 = new_register(objectType);
310 LIR_Opr tmp2 = new_register(objectType);
311 LIR_Opr tmp3 = new_register(objectType);
312 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
313 }
314
315 //----------------------------------------------------------------------
316 // visitor functions
317 //----------------------------------------------------------------------
318
319 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
320 assert(x->is_pinned(),"");
321 LIRItem obj(x->obj(), this);
322 obj.load_item();
323
324 set_no_result(x);
325
326 // "lock" stores the address of the monitor stack slot, so this is not an oop
327 LIR_Opr lock = new_register(T_INT);
328 // Need a scratch register for inline types on x86
329 LIR_Opr scratch = LIR_OprFact::illegalOpr;
330 if ((LockingMode == LM_LIGHTWEIGHT) ||
331 (EnableValhalla && x->maybe_inlinetype())) {
332 scratch = new_register(T_ADDRESS);
333 }
334
335 CodeEmitInfo* info_for_exception = nullptr;
336 if (x->needs_null_check()) {
337 info_for_exception = state_for(x);
338 }
339
340 CodeStub* throw_ie_stub = x->maybe_inlinetype() ?
341 new SimpleExceptionStub(C1StubId::throw_identity_exception_id,
342 obj.result(), state_for(x))
343 : nullptr;
344
345 // this CodeEmitInfo must not have the xhandlers because here the
346 // object is already locked (xhandlers expect object to be unlocked)
347 CodeEmitInfo* info = state_for(x, x->state(), true);
348 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
349 x->monitor_no(), info_for_exception, info, throw_ie_stub);
350 }
351
352
353 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
354 assert(x->is_pinned(),"");
355
356 LIRItem obj(x->obj(), this);
357 obj.dont_load_item();
358
359 LIR_Opr lock = new_register(T_INT);
360 LIR_Opr obj_temp = new_register(T_INT);
361 set_no_result(x);
362 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
363 }
364
365 // _ineg, _lneg, _fneg, _dneg
366 void LIRGenerator::do_NegateOp(NegateOp* x) {
367 LIRItem value(x->x(), this);
368 value.set_destroys_register();
369 value.load_item();
1328
1329 if (needs_stub) {
1330 stub = new ConversionStub(x->op(), conv_input, conv_result);
1331 }
1332
1333 __ convert(x->op(), conv_input, conv_result, stub);
1334
1335 if (result != conv_result) {
1336 __ move(conv_result, result);
1337 }
1338
1339 assert(result->is_virtual(), "result must be virtual register");
1340 set_result(x, result);
1341 #endif // _LP64
1342 }
1343
1344
1345 void LIRGenerator::do_NewInstance(NewInstance* x) {
1346 print_if_not_loaded(x);
1347
1348 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1349 LIR_Opr reg = result_register_for(x->type());
1350 new_instance(reg, x->klass(), x->is_unresolved(),
1351 !x->is_unresolved() && x->klass()->is_inlinetype(),
1352 FrameMap::rcx_oop_opr,
1353 FrameMap::rdi_oop_opr,
1354 FrameMap::rsi_oop_opr,
1355 LIR_OprFact::illegalOpr,
1356 FrameMap::rdx_metadata_opr, info);
1357 LIR_Opr result = rlock_result(x);
1358 __ move(reg, result);
1359 }
1360
1361 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1362 CodeEmitInfo* info = nullptr;
1363 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1364 info = state_for(x, x->state_before());
1365 info->set_force_reexecute();
1366 } else {
1367 info = state_for(x, x->state());
1368 }
1369
1370 LIRItem length(x->length(), this);
1371 length.load_item_force(FrameMap::rbx_opr);
1372
1373 LIR_Opr reg = result_register_for(x->type());
1374 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1375 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1376 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1377 LIR_Opr tmp4 = reg;
1378 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1379 LIR_Opr len = length.result();
1380 BasicType elem_type = x->elt_type();
1393 LIRItem length(x->length(), this);
1394 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1395 // and therefore provide the state before the parameters have been consumed
1396 CodeEmitInfo* patching_info = nullptr;
1397 if (!x->klass()->is_loaded() || PatchALot) {
1398 patching_info = state_for(x, x->state_before());
1399 }
1400
1401 CodeEmitInfo* info = state_for(x, x->state());
1402
1403 const LIR_Opr reg = result_register_for(x->type());
1404 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1405 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1406 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1407 LIR_Opr tmp4 = reg;
1408 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1409
1410 length.load_item_force(FrameMap::rbx_opr);
1411 LIR_Opr len = length.result();
1412
1413 ciKlass* obj = (ciKlass*) x->exact_type();
1414 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1415 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1416 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1417 }
1418 klass2reg_with_patching(klass_reg, obj, patching_info);
1419 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1420
1421 LIR_Opr result = rlock_result(x);
1422 __ move(reg, result);
1423 }
1424
1425
1426 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1427 Values* dims = x->dims();
1428 int i = dims->length();
1429 LIRItemList* items = new LIRItemList(i, i, nullptr);
1430 while (i-- > 0) {
1431 LIRItem* size = new LIRItem(dims->at(i), this);
1432 items->at_put(i, size);
1433 }
1434
1435 // Evaluate state_for early since it may emit code.
1436 CodeEmitInfo* patching_info = nullptr;
1437 if (!x->klass()->is_loaded() || PatchALot) {
1438 patching_info = state_for(x, x->state_before());
1439
1478 // nothing to do for now
1479 }
1480
1481
1482 void LIRGenerator::do_CheckCast(CheckCast* x) {
1483 LIRItem obj(x->obj(), this);
1484
1485 CodeEmitInfo* patching_info = nullptr;
1486 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1487 // must do this before locking the destination register as an oop register,
1488 // and before the obj is loaded (the latter is for deoptimization)
1489 patching_info = state_for(x, x->state_before());
1490 }
1491 obj.load_item();
1492
1493 // info for exceptions
1494 CodeEmitInfo* info_for_exception =
1495 (x->needs_exception_state() ? state_for(x) :
1496 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1497
1498 if (x->is_null_free()) {
1499 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1500 }
1501
1502 CodeStub* stub;
1503 if (x->is_incompatible_class_change_check()) {
1504 assert(patching_info == nullptr, "can't patch this");
1505 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1506 } else if (x->is_invokespecial_receiver_check()) {
1507 assert(patching_info == nullptr, "can't patch this");
1508 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1509 } else {
1510 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1511 }
1512 LIR_Opr reg = rlock_result(x);
1513 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1514 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1515 tmp3 = new_register(objectType);
1516 }
1517 __ checkcast(reg, obj.result(), x->klass(),
1518 new_register(objectType), new_register(objectType), tmp3,
1519 x->direct_compare(), info_for_exception, patching_info, stub,
1520 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1521 }
1522
1523
1524 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1525 LIRItem obj(x->obj(), this);
1526
1527 // result and test object may not be in same register
1528 LIR_Opr reg = rlock_result(x);
1529 CodeEmitInfo* patching_info = nullptr;
1530 if ((!x->klass()->is_loaded() || PatchALot)) {
1531 // must do this before locking the destination register as an oop register
1532 patching_info = state_for(x, x->state_before());
1533 }
1534 obj.load_item();
1535 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1536 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1537 tmp3 = new_register(objectType);
1538 }
1539 __ instanceof(reg, obj.result(), x->klass(),
1540 new_register(objectType), new_register(objectType), tmp3,
1551
1552 LIRItem xitem(x->x(), this);
1553 LIRItem yitem(x->y(), this);
1554 LIRItem* xin = &xitem;
1555 LIRItem* yin = &yitem;
1556
1557 if (tag == longTag) {
1558 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1559 // mirror for other conditions
1560 if (cond == If::gtr || cond == If::leq) {
1561 cond = Instruction::mirror(cond);
1562 xin = &yitem;
1563 yin = &xitem;
1564 }
1565 xin->set_destroys_register();
1566 }
1567 xin->load_item();
1568 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1569 // inline long zero
1570 yin->dont_load_item();
1571 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1572 // longs cannot handle constants at right side
1573 yin->load_item();
1574 } else {
1575 yin->dont_load_item();
1576 }
1577
1578 LIR_Opr left = xin->result();
1579 LIR_Opr right = yin->result();
1580
1581 set_no_result(x);
1582
1583 // add safepoint before generating condition code so it can be recomputed
1584 if (x->is_safepoint()) {
1585 // increment backedge counter if needed
1586 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1587 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1588 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1589 }
1590
1591 if (x->substitutability_check()) {
1592 substitutability_check(x, *xin, *yin);
1593 } else {
1594 __ cmp(lir_cond(cond), left, right);
1595 }
1596 // Generate branch profiling. Profiling code doesn't kill flags.
1597 profile_branch(x, cond);
1598 move_to_phi(x->state());
1599 if (x->x()->type()->is_float_kind()) {
1600 __ branch(lir_cond(cond), x->tsux(), x->usux());
1601 } else {
1602 __ branch(lir_cond(cond), x->tsux());
1603 }
1604 assert(x->default_sux() == x->fsux(), "wrong destination above");
1605 __ jump(x->default_sux());
1606 }
1607
1608
1609 LIR_Opr LIRGenerator::getThreadPointer() {
1610 #ifdef _LP64
1611 return FrameMap::as_pointer_opr(r15_thread);
1612 #else
1613 LIR_Opr result = new_register(T_INT);
1614 __ get_thread(result);
1615 return result;
|