14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "gc/shared/c1/barrierSetC1.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/powerOfTwo.hpp"
40 #include "vmreg_x86.inline.hpp"
41
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
47
48 // Item will be loaded into a byte register; Intel only
49 void LIRItem::load_byte_item() {
50 load_item();
51 LIR_Opr res = result();
52
53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
99 #else
100 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
101 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
102 #endif // _LP64
103 case addressTag:
104 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
105 }
106
107 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
108 return opr;
109 }
110
111
112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
113 LIR_Opr reg = new_register(T_INT);
114 set_vreg_flag(reg, LIRGenerator::byte_reg);
115 return reg;
116 }
117
118
119 //--------- loading items into registers --------------------------------
120
121
122 // i486 instructions can inline constants
123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
124 if (type == T_SHORT || type == T_CHAR) {
125 return false;
126 }
127 Constant* c = v->as_Constant();
128 if (c && c->state_before() == NULL) {
129 // constants of any type can be stored directly, except for
130 // unloaded object constants.
131 return true;
132 }
133 return false;
134 }
135
136
137 bool LIRGenerator::can_inline_as_constant(Value v) const {
138 if (v->type()->tag() == longTag) return false;
294 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
295 LIR_Opr tmp1 = new_register(objectType);
296 LIR_Opr tmp2 = new_register(objectType);
297 LIR_Opr tmp3 = new_register(objectType);
298 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
299 }
300
301 //----------------------------------------------------------------------
302 // visitor functions
303 //----------------------------------------------------------------------
304
305 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
306 assert(x->is_pinned(),"");
307 LIRItem obj(x->obj(), this);
308 obj.load_item();
309
310 set_no_result(x);
311
312 // "lock" stores the address of the monitor stack slot, so this is not an oop
313 LIR_Opr lock = new_register(T_INT);
314
315 CodeEmitInfo* info_for_exception = NULL;
316 if (x->needs_null_check()) {
317 info_for_exception = state_for(x);
318 }
319 // this CodeEmitInfo must not have the xhandlers because here the
320 // object is already locked (xhandlers expect object to be unlocked)
321 CodeEmitInfo* info = state_for(x, x->state(), true);
322 monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
323 x->monitor_no(), info_for_exception, info);
324 }
325
326
327 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
328 assert(x->is_pinned(),"");
329
330 LIRItem obj(x->obj(), this);
331 obj.dont_load_item();
332
333 LIR_Opr lock = new_register(T_INT);
334 LIR_Opr obj_temp = new_register(T_INT);
335 set_no_result(x);
336 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
337 }
338
339 // _ineg, _lneg, _fneg, _dneg
340 void LIRGenerator::do_NegateOp(NegateOp* x) {
341 LIRItem value(x->x(), this);
342 value.set_destroys_register();
343 value.load_item();
1269 }
1270
1271 __ convert(x->op(), conv_input, conv_result, stub);
1272
1273 if (result != conv_result) {
1274 __ move(conv_result, result);
1275 }
1276
1277 assert(result->is_virtual(), "result must be virtual register");
1278 set_result(x, result);
1279 #endif // _LP64
1280 }
1281
1282
1283 void LIRGenerator::do_NewInstance(NewInstance* x) {
1284 print_if_not_loaded(x);
1285
1286 CodeEmitInfo* info = state_for(x, x->state());
1287 LIR_Opr reg = result_register_for(x->type());
1288 new_instance(reg, x->klass(), x->is_unresolved(),
1289 FrameMap::rcx_oop_opr,
1290 FrameMap::rdi_oop_opr,
1291 FrameMap::rsi_oop_opr,
1292 LIR_OprFact::illegalOpr,
1293 FrameMap::rdx_metadata_opr, info);
1294 LIR_Opr result = rlock_result(x);
1295 __ move(reg, result);
1296 }
1297
1298
1299 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1300 CodeEmitInfo* info = state_for(x, x->state());
1301
1302 LIRItem length(x->length(), this);
1303 length.load_item_force(FrameMap::rbx_opr);
1304
1305 LIR_Opr reg = result_register_for(x->type());
1306 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1307 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1308 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1309 LIR_Opr tmp4 = reg;
1310 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1311 LIR_Opr len = length.result();
1312 BasicType elem_type = x->elt_type();
1313
1314 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1315
1316 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1317 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1325 LIRItem length(x->length(), this);
1326 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1327 // and therefore provide the state before the parameters have been consumed
1328 CodeEmitInfo* patching_info = NULL;
1329 if (!x->klass()->is_loaded() || PatchALot) {
1330 patching_info = state_for(x, x->state_before());
1331 }
1332
1333 CodeEmitInfo* info = state_for(x, x->state());
1334
1335 const LIR_Opr reg = result_register_for(x->type());
1336 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1337 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1338 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1339 LIR_Opr tmp4 = reg;
1340 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1341
1342 length.load_item_force(FrameMap::rbx_opr);
1343 LIR_Opr len = length.result();
1344
1345 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1346 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1347 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1348 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1349 }
1350 klass2reg_with_patching(klass_reg, obj, patching_info);
1351 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1352
1353 LIR_Opr result = rlock_result(x);
1354 __ move(reg, result);
1355 }
1356
1357
1358 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1359 Values* dims = x->dims();
1360 int i = dims->length();
1361 LIRItemList* items = new LIRItemList(i, i, NULL);
1362 while (i-- > 0) {
1363 LIRItem* size = new LIRItem(dims->at(i), this);
1364 items->at_put(i, size);
1365 }
1366
1367 // Evaluate state_for early since it may emit code.
1368 CodeEmitInfo* patching_info = NULL;
1369 if (!x->klass()->is_loaded() || PatchALot) {
1370 patching_info = state_for(x, x->state_before());
1371
1410 // nothing to do for now
1411 }
1412
1413
1414 void LIRGenerator::do_CheckCast(CheckCast* x) {
1415 LIRItem obj(x->obj(), this);
1416
1417 CodeEmitInfo* patching_info = NULL;
1418 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1419 // must do this before locking the destination register as an oop register,
1420 // and before the obj is loaded (the latter is for deoptimization)
1421 patching_info = state_for(x, x->state_before());
1422 }
1423 obj.load_item();
1424
1425 // info for exceptions
1426 CodeEmitInfo* info_for_exception =
1427 (x->needs_exception_state() ? state_for(x) :
1428 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1429
1430 CodeStub* stub;
1431 if (x->is_incompatible_class_change_check()) {
1432 assert(patching_info == NULL, "can't patch this");
1433 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1434 } else if (x->is_invokespecial_receiver_check()) {
1435 assert(patching_info == NULL, "can't patch this");
1436 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1437 } else {
1438 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1439 }
1440 LIR_Opr reg = rlock_result(x);
1441 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1442 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1443 tmp3 = new_register(objectType);
1444 }
1445 __ checkcast(reg, obj.result(), x->klass(),
1446 new_register(objectType), new_register(objectType), tmp3,
1447 x->direct_compare(), info_for_exception, patching_info, stub,
1448 x->profiled_method(), x->profiled_bci());
1449 }
1450
1451
1452 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1453 LIRItem obj(x->obj(), this);
1454
1455 // result and test object may not be in same register
1456 LIR_Opr reg = rlock_result(x);
1457 CodeEmitInfo* patching_info = NULL;
1458 if ((!x->klass()->is_loaded() || PatchALot)) {
1459 // must do this before locking the destination register as an oop register
1460 patching_info = state_for(x, x->state_before());
1461 }
1462 obj.load_item();
1463 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1464 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1465 tmp3 = new_register(objectType);
1466 }
1467 __ instanceof(reg, obj.result(), x->klass(),
1468 new_register(objectType), new_register(objectType), tmp3,
1479
1480 LIRItem xitem(x->x(), this);
1481 LIRItem yitem(x->y(), this);
1482 LIRItem* xin = &xitem;
1483 LIRItem* yin = &yitem;
1484
1485 if (tag == longTag) {
1486 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1487 // mirror for other conditions
1488 if (cond == If::gtr || cond == If::leq) {
1489 cond = Instruction::mirror(cond);
1490 xin = &yitem;
1491 yin = &xitem;
1492 }
1493 xin->set_destroys_register();
1494 }
1495 xin->load_item();
1496 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1497 // inline long zero
1498 yin->dont_load_item();
1499 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1500 // longs cannot handle constants at right side
1501 yin->load_item();
1502 } else {
1503 yin->dont_load_item();
1504 }
1505
1506 LIR_Opr left = xin->result();
1507 LIR_Opr right = yin->result();
1508
1509 set_no_result(x);
1510
1511 // add safepoint before generating condition code so it can be recomputed
1512 if (x->is_safepoint()) {
1513 // increment backedge counter if needed
1514 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1515 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1516 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1517 }
1518
1519 __ cmp(lir_cond(cond), left, right);
1520 // Generate branch profiling. Profiling code doesn't kill flags.
1521 profile_branch(x, cond);
1522 move_to_phi(x->state());
1523 if (x->x()->type()->is_float_kind()) {
1524 __ branch(lir_cond(cond), x->tsux(), x->usux());
1525 } else {
1526 __ branch(lir_cond(cond), x->tsux());
1527 }
1528 assert(x->default_sux() == x->fsux(), "wrong destination above");
1529 __ jump(x->default_sux());
1530 }
1531
1532
1533 LIR_Opr LIRGenerator::getThreadPointer() {
1534 #ifdef _LP64
1535 return FrameMap::as_pointer_opr(r15_thread);
1536 #else
1537 LIR_Opr result = new_register(T_INT);
1538 __ get_thread(result);
1539 return result;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciInlineKlass.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "gc/shared/c1/barrierSetC1.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/powerOfTwo.hpp"
41 #include "vmreg_x86.inline.hpp"
42
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
48
49 // Item will be loaded into a byte register; Intel only
50 void LIRItem::load_byte_item() {
51 load_item();
52 LIR_Opr res = result();
53
54 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
100 #else
101 case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
102 case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
103 #endif // _LP64
104 case addressTag:
105 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
106 }
107
108 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
109 return opr;
110 }
111
112
113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
114 LIR_Opr reg = new_register(T_INT);
115 set_vreg_flag(reg, LIRGenerator::byte_reg);
116 return reg;
117 }
118
119
120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
121 // We just need one 32-bit temp register for x86/x64, to check whether both
122 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
123 // @temp = %r10d
124 // mov $0x405, %r10d
125 // and (%left), %r10d /* if need to check left */
126 // and (%right), %r10d /* if need to check right */
127 // cmp $0x405, $r10d
128 // jne L_oops_not_equal
129 tmp1 = new_register(T_INT);
130 tmp2 = LIR_OprFact::illegalOpr;
131 }
132
133 //--------- loading items into registers --------------------------------
134
135
136 // i486 instructions can inline constants
137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
138 if (type == T_SHORT || type == T_CHAR) {
139 return false;
140 }
141 Constant* c = v->as_Constant();
142 if (c && c->state_before() == NULL) {
143 // constants of any type can be stored directly, except for
144 // unloaded object constants.
145 return true;
146 }
147 return false;
148 }
149
150
151 bool LIRGenerator::can_inline_as_constant(Value v) const {
152 if (v->type()->tag() == longTag) return false;
308 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
309 LIR_Opr tmp1 = new_register(objectType);
310 LIR_Opr tmp2 = new_register(objectType);
311 LIR_Opr tmp3 = new_register(objectType);
312 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
313 }
314
315 //----------------------------------------------------------------------
316 // visitor functions
317 //----------------------------------------------------------------------
318
319 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
320 assert(x->is_pinned(),"");
321 LIRItem obj(x->obj(), this);
322 obj.load_item();
323
324 set_no_result(x);
325
326 // "lock" stores the address of the monitor stack slot, so this is not an oop
327 LIR_Opr lock = new_register(T_INT);
328 // Need a scratch register for inline types on x86
329 LIR_Opr scratch = LIR_OprFact::illegalOpr;
330 if (EnableValhalla && x->maybe_inlinetype()) {
331 scratch = new_register(T_INT);
332 }
333
334 CodeEmitInfo* info_for_exception = NULL;
335 if (x->needs_null_check()) {
336 info_for_exception = state_for(x);
337 }
338
339 CodeStub* throw_imse_stub = x->maybe_inlinetype() ?
340 new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id,
341 LIR_OprFact::illegalOpr, state_for(x))
342 : NULL;
343
344 // this CodeEmitInfo must not have the xhandlers because here the
345 // object is already locked (xhandlers expect object to be unlocked)
346 CodeEmitInfo* info = state_for(x, x->state(), true);
347 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
348 x->monitor_no(), info_for_exception, info, throw_imse_stub);
349 }
350
351
352 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
353 assert(x->is_pinned(),"");
354
355 LIRItem obj(x->obj(), this);
356 obj.dont_load_item();
357
358 LIR_Opr lock = new_register(T_INT);
359 LIR_Opr obj_temp = new_register(T_INT);
360 set_no_result(x);
361 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
362 }
363
364 // _ineg, _lneg, _fneg, _dneg
365 void LIRGenerator::do_NegateOp(NegateOp* x) {
366 LIRItem value(x->x(), this);
367 value.set_destroys_register();
368 value.load_item();
1294 }
1295
1296 __ convert(x->op(), conv_input, conv_result, stub);
1297
1298 if (result != conv_result) {
1299 __ move(conv_result, result);
1300 }
1301
1302 assert(result->is_virtual(), "result must be virtual register");
1303 set_result(x, result);
1304 #endif // _LP64
1305 }
1306
1307
1308 void LIRGenerator::do_NewInstance(NewInstance* x) {
1309 print_if_not_loaded(x);
1310
1311 CodeEmitInfo* info = state_for(x, x->state());
1312 LIR_Opr reg = result_register_for(x->type());
1313 new_instance(reg, x->klass(), x->is_unresolved(),
1314 /* allow_inline */ false,
1315 FrameMap::rcx_oop_opr,
1316 FrameMap::rdi_oop_opr,
1317 FrameMap::rsi_oop_opr,
1318 LIR_OprFact::illegalOpr,
1319 FrameMap::rdx_metadata_opr, info);
1320 LIR_Opr result = rlock_result(x);
1321 __ move(reg, result);
1322 }
1323
1324 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1325 // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1326 CodeEmitInfo* info = state_for(x, x->state_before());
1327 x->set_to_object_type();
1328 LIR_Opr reg = result_register_for(x->type());
1329 new_instance(reg, x->klass(), false,
1330 /* allow_inline */ true,
1331 FrameMap::rcx_oop_opr,
1332 FrameMap::rdi_oop_opr,
1333 FrameMap::rsi_oop_opr,
1334 LIR_OprFact::illegalOpr,
1335 FrameMap::rdx_metadata_opr, info);
1336 LIR_Opr result = rlock_result(x);
1337 __ move(reg, result);
1338 }
1339
1340 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1341 CodeEmitInfo* info = state_for(x, x->state());
1342
1343 LIRItem length(x->length(), this);
1344 length.load_item_force(FrameMap::rbx_opr);
1345
1346 LIR_Opr reg = result_register_for(x->type());
1347 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1348 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1349 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1350 LIR_Opr tmp4 = reg;
1351 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1352 LIR_Opr len = length.result();
1353 BasicType elem_type = x->elt_type();
1354
1355 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1356
1357 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1358 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1366 LIRItem length(x->length(), this);
1367 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1368 // and therefore provide the state before the parameters have been consumed
1369 CodeEmitInfo* patching_info = NULL;
1370 if (!x->klass()->is_loaded() || PatchALot) {
1371 patching_info = state_for(x, x->state_before());
1372 }
1373
1374 CodeEmitInfo* info = state_for(x, x->state());
1375
1376 const LIR_Opr reg = result_register_for(x->type());
1377 LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1378 LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1379 LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1380 LIR_Opr tmp4 = reg;
1381 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1382
1383 length.load_item_force(FrameMap::rbx_opr);
1384 LIR_Opr len = length.result();
1385
1386 ciKlass* obj = (ciKlass*) x->exact_type();
1387 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1388 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1389 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1390 }
1391 klass2reg_with_patching(klass_reg, obj, patching_info);
1392 if (x->is_null_free()) {
1393 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_PRIMITIVE_OBJECT, klass_reg, slow_path);
1394 } else {
1395 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1396 }
1397
1398 LIR_Opr result = rlock_result(x);
1399 __ move(reg, result);
1400 }
1401
1402
1403 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1404 Values* dims = x->dims();
1405 int i = dims->length();
1406 LIRItemList* items = new LIRItemList(i, i, NULL);
1407 while (i-- > 0) {
1408 LIRItem* size = new LIRItem(dims->at(i), this);
1409 items->at_put(i, size);
1410 }
1411
1412 // Evaluate state_for early since it may emit code.
1413 CodeEmitInfo* patching_info = NULL;
1414 if (!x->klass()->is_loaded() || PatchALot) {
1415 patching_info = state_for(x, x->state_before());
1416
1455 // nothing to do for now
1456 }
1457
1458
1459 void LIRGenerator::do_CheckCast(CheckCast* x) {
1460 LIRItem obj(x->obj(), this);
1461
1462 CodeEmitInfo* patching_info = NULL;
1463 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1464 // must do this before locking the destination register as an oop register,
1465 // and before the obj is loaded (the latter is for deoptimization)
1466 patching_info = state_for(x, x->state_before());
1467 }
1468 obj.load_item();
1469
1470 // info for exceptions
1471 CodeEmitInfo* info_for_exception =
1472 (x->needs_exception_state() ? state_for(x) :
1473 state_for(x, x->state_before(), true /*ignore_xhandler*/));
1474
1475 if (x->is_null_free()) {
1476 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1477 }
1478
1479 CodeStub* stub;
1480 if (x->is_incompatible_class_change_check()) {
1481 assert(patching_info == NULL, "can't patch this");
1482 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1483 } else if (x->is_invokespecial_receiver_check()) {
1484 assert(patching_info == NULL, "can't patch this");
1485 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1486 } else {
1487 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1488 }
1489 LIR_Opr reg = rlock_result(x);
1490 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1491 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1492 tmp3 = new_register(objectType);
1493 }
1494 __ checkcast(reg, obj.result(), x->klass(),
1495 new_register(objectType), new_register(objectType), tmp3,
1496 x->direct_compare(), info_for_exception, patching_info, stub,
1497 x->profiled_method(), x->profiled_bci(), x->is_null_free());
1498 }
1499
1500
1501 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1502 LIRItem obj(x->obj(), this);
1503
1504 // result and test object may not be in same register
1505 LIR_Opr reg = rlock_result(x);
1506 CodeEmitInfo* patching_info = NULL;
1507 if ((!x->klass()->is_loaded() || PatchALot)) {
1508 // must do this before locking the destination register as an oop register
1509 patching_info = state_for(x, x->state_before());
1510 }
1511 obj.load_item();
1512 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1513 if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1514 tmp3 = new_register(objectType);
1515 }
1516 __ instanceof(reg, obj.result(), x->klass(),
1517 new_register(objectType), new_register(objectType), tmp3,
1528
1529 LIRItem xitem(x->x(), this);
1530 LIRItem yitem(x->y(), this);
1531 LIRItem* xin = &xitem;
1532 LIRItem* yin = &yitem;
1533
1534 if (tag == longTag) {
1535 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1536 // mirror for other conditions
1537 if (cond == If::gtr || cond == If::leq) {
1538 cond = Instruction::mirror(cond);
1539 xin = &yitem;
1540 yin = &xitem;
1541 }
1542 xin->set_destroys_register();
1543 }
1544 xin->load_item();
1545 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1546 // inline long zero
1547 yin->dont_load_item();
1548 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1549 // longs cannot handle constants at right side
1550 yin->load_item();
1551 } else {
1552 yin->dont_load_item();
1553 }
1554
1555 LIR_Opr left = xin->result();
1556 LIR_Opr right = yin->result();
1557
1558 set_no_result(x);
1559
1560 // add safepoint before generating condition code so it can be recomputed
1561 if (x->is_safepoint()) {
1562 // increment backedge counter if needed
1563 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1564 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1565 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1566 }
1567
1568 if (x->substitutability_check()) {
1569 substitutability_check(x, *xin, *yin);
1570 } else {
1571 __ cmp(lir_cond(cond), left, right);
1572 }
1573 // Generate branch profiling. Profiling code doesn't kill flags.
1574 profile_branch(x, cond);
1575 move_to_phi(x->state());
1576 if (x->x()->type()->is_float_kind()) {
1577 __ branch(lir_cond(cond), x->tsux(), x->usux());
1578 } else {
1579 __ branch(lir_cond(cond), x->tsux());
1580 }
1581 assert(x->default_sux() == x->fsux(), "wrong destination above");
1582 __ jump(x->default_sux());
1583 }
1584
1585
1586 LIR_Opr LIRGenerator::getThreadPointer() {
1587 #ifdef _LP64
1588 return FrameMap::as_pointer_opr(r15_thread);
1589 #else
1590 LIR_Opr result = new_register(T_INT);
1591 __ get_thread(result);
1592 return result;
|