< prev index next >

src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"

  34 #include "ci/ciObjArrayKlass.hpp"
  35 #include "ci/ciTypeArrayKlass.hpp"
  36 #include "gc/shared/c1/barrierSetC1.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "utilities/powerOfTwo.hpp"
  40 #include "vmreg_x86.inline.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 // Item will be loaded into a byte register; Intel only
  49 void LIRItem::load_byte_item() {
  50   load_item();
  51   LIR_Opr res = result();
  52 
  53   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {

  99 #else
 100     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
 101     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
 102 #endif // _LP64
 103     case addressTag:
 104     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 105   }
 106 
 107   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 108   return opr;
 109 }
 110 
 111 
 112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 113   LIR_Opr reg = new_register(T_INT);
 114   set_vreg_flag(reg, LIRGenerator::byte_reg);
 115   return reg;
 116 }
 117 
 118 













 119 //--------- loading items into registers --------------------------------
 120 
 121 
 122 // i486 instructions can inline constants
 123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 124   if (type == T_SHORT || type == T_CHAR) {
 125     // there is no immediate move of word values in asembler_i486.?pp
 126     return false;
 127   }
 128   Constant* c = v->as_Constant();
 129   if (c && c->state_before() == NULL) {
 130     // constants of any type can be stored directly, except for
 131     // unloaded object constants.
 132     return true;
 133   }
 134   return false;
 135 }
 136 
 137 
 138 bool LIRGenerator::can_inline_as_constant(Value v) const {

 295 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 296   LIR_Opr tmp1 = new_register(objectType);
 297   LIR_Opr tmp2 = new_register(objectType);
 298   LIR_Opr tmp3 = new_register(objectType);
 299   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 300 }
 301 
 302 //----------------------------------------------------------------------
 303 //             visitor functions
 304 //----------------------------------------------------------------------
 305 
 306 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 307   assert(x->is_pinned(),"");
 308   LIRItem obj(x->obj(), this);
 309   obj.load_item();
 310 
 311   set_no_result(x);
 312 
 313   // "lock" stores the address of the monitor stack slot, so this is not an oop
 314   LIR_Opr lock = new_register(T_INT);





 315 
 316   CodeEmitInfo* info_for_exception = NULL;
 317   if (x->needs_null_check()) {
 318     info_for_exception = state_for(x);
 319   }






 320   // this CodeEmitInfo must not have the xhandlers because here the
 321   // object is already locked (xhandlers expect object to be unlocked)
 322   CodeEmitInfo* info = state_for(x, x->state(), true);
 323   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 324                         x->monitor_no(), info_for_exception, info);
 325 }
 326 
 327 
 328 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 329   assert(x->is_pinned(),"");
 330 
 331   LIRItem obj(x->obj(), this);
 332   obj.dont_load_item();
 333 
 334   LIR_Opr lock = new_register(T_INT);
 335   LIR_Opr obj_temp = new_register(T_INT);
 336   set_no_result(x);
 337   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 338 }
 339 
 340 
 341 // _ineg, _lneg, _fneg, _dneg
 342 void LIRGenerator::do_NegateOp(NegateOp* x) {
 343   LIRItem value(x->x(), this);
 344   value.set_destroys_register();

1272   }
1273 
1274   __ convert(x->op(), conv_input, conv_result, stub);
1275 
1276   if (result != conv_result) {
1277     __ move(conv_result, result);
1278   }
1279 
1280   assert(result->is_virtual(), "result must be virtual register");
1281   set_result(x, result);
1282 #endif // _LP64
1283 }
1284 
1285 
1286 void LIRGenerator::do_NewInstance(NewInstance* x) {
1287   print_if_not_loaded(x);
1288 
1289   CodeEmitInfo* info = state_for(x, x->state());
1290   LIR_Opr reg = result_register_for(x->type());
1291   new_instance(reg, x->klass(), x->is_unresolved(),
1292                        FrameMap::rcx_oop_opr,
1293                        FrameMap::rdi_oop_opr,
1294                        FrameMap::rsi_oop_opr,
1295                        LIR_OprFact::illegalOpr,
1296                        FrameMap::rdx_metadata_opr, info);

1297   LIR_Opr result = rlock_result(x);
1298   __ move(reg, result);
1299 }
1300 















1301 
1302 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1303   CodeEmitInfo* info = state_for(x, x->state());
1304 
1305   LIRItem length(x->length(), this);
1306   length.load_item_force(FrameMap::rbx_opr);
1307 
1308   LIR_Opr reg = result_register_for(x->type());
1309   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1310   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1311   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1312   LIR_Opr tmp4 = reg;
1313   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1314   LIR_Opr len = length.result();
1315   BasicType elem_type = x->elt_type();
1316 
1317   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1318 
1319   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1320   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);

1328   LIRItem length(x->length(), this);
1329   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1330   // and therefore provide the state before the parameters have been consumed
1331   CodeEmitInfo* patching_info = NULL;
1332   if (!x->klass()->is_loaded() || PatchALot) {
1333     patching_info =  state_for(x, x->state_before());
1334   }
1335 
1336   CodeEmitInfo* info = state_for(x, x->state());
1337 
1338   const LIR_Opr reg = result_register_for(x->type());
1339   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1340   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1341   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1342   LIR_Opr tmp4 = reg;
1343   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1344 
1345   length.load_item_force(FrameMap::rbx_opr);
1346   LIR_Opr len = length.result();
1347 
1348   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1349   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1350   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1351     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1352   }
1353   klass2reg_with_patching(klass_reg, obj, patching_info);
1354   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);




1355 
1356   LIR_Opr result = rlock_result(x);
1357   __ move(reg, result);
1358 }
1359 
1360 
1361 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1362   Values* dims = x->dims();
1363   int i = dims->length();
1364   LIRItemList* items = new LIRItemList(i, i, NULL);
1365   while (i-- > 0) {
1366     LIRItem* size = new LIRItem(dims->at(i), this);
1367     items->at_put(i, size);
1368   }
1369 
1370   // Evaluate state_for early since it may emit code.
1371   CodeEmitInfo* patching_info = NULL;
1372   if (!x->klass()->is_loaded() || PatchALot) {
1373     patching_info = state_for(x, x->state_before());
1374 

1413   // nothing to do for now
1414 }
1415 
1416 
1417 void LIRGenerator::do_CheckCast(CheckCast* x) {
1418   LIRItem obj(x->obj(), this);
1419 
1420   CodeEmitInfo* patching_info = NULL;
1421   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1422     // must do this before locking the destination register as an oop register,
1423     // and before the obj is loaded (the latter is for deoptimization)
1424     patching_info = state_for(x, x->state_before());
1425   }
1426   obj.load_item();
1427 
1428   // info for exceptions
1429   CodeEmitInfo* info_for_exception =
1430       (x->needs_exception_state() ? state_for(x) :
1431                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1432 




1433   CodeStub* stub;
1434   if (x->is_incompatible_class_change_check()) {
1435     assert(patching_info == NULL, "can't patch this");
1436     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1437   } else if (x->is_invokespecial_receiver_check()) {
1438     assert(patching_info == NULL, "can't patch this");
1439     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1440   } else {
1441     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1442   }
1443   LIR_Opr reg = rlock_result(x);
1444   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1445   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1446     tmp3 = new_register(objectType);
1447   }
1448   __ checkcast(reg, obj.result(), x->klass(),
1449                new_register(objectType), new_register(objectType), tmp3,
1450                x->direct_compare(), info_for_exception, patching_info, stub,
1451                x->profiled_method(), x->profiled_bci());
1452 }
1453 
1454 
1455 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1456   LIRItem obj(x->obj(), this);
1457 
1458   // result and test object may not be in same register
1459   LIR_Opr reg = rlock_result(x);
1460   CodeEmitInfo* patching_info = NULL;
1461   if ((!x->klass()->is_loaded() || PatchALot)) {
1462     // must do this before locking the destination register as an oop register
1463     patching_info = state_for(x, x->state_before());
1464   }
1465   obj.load_item();
1466   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1467   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1468     tmp3 = new_register(objectType);
1469   }
1470   __ instanceof(reg, obj.result(), x->klass(),
1471                 new_register(objectType), new_register(objectType), tmp3,

1482 
1483   LIRItem xitem(x->x(), this);
1484   LIRItem yitem(x->y(), this);
1485   LIRItem* xin = &xitem;
1486   LIRItem* yin = &yitem;
1487 
1488   if (tag == longTag) {
1489     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1490     // mirror for other conditions
1491     if (cond == If::gtr || cond == If::leq) {
1492       cond = Instruction::mirror(cond);
1493       xin = &yitem;
1494       yin = &xitem;
1495     }
1496     xin->set_destroys_register();
1497   }
1498   xin->load_item();
1499   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1500     // inline long zero
1501     yin->dont_load_item();
1502   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1503     // longs cannot handle constants at right side
1504     yin->load_item();
1505   } else {
1506     yin->dont_load_item();
1507   }
1508 
1509   LIR_Opr left = xin->result();
1510   LIR_Opr right = yin->result();
1511 
1512   set_no_result(x);
1513 
1514   // add safepoint before generating condition code so it can be recomputed
1515   if (x->is_safepoint()) {
1516     // increment backedge counter if needed
1517     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1518         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1519     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1520   }
1521 
1522   __ cmp(lir_cond(cond), left, right);




1523   // Generate branch profiling. Profiling code doesn't kill flags.
1524   profile_branch(x, cond);
1525   move_to_phi(x->state());
1526   if (x->x()->type()->is_float_kind()) {
1527     __ branch(lir_cond(cond), x->tsux(), x->usux());
1528   } else {
1529     __ branch(lir_cond(cond), x->tsux());
1530   }
1531   assert(x->default_sux() == x->fsux(), "wrong destination above");
1532   __ jump(x->default_sux());
1533 }
1534 
1535 
1536 LIR_Opr LIRGenerator::getThreadPointer() {
1537 #ifdef _LP64
1538   return FrameMap::as_pointer_opr(r15_thread);
1539 #else
1540   LIR_Opr result = new_register(T_INT);
1541   __ get_thread(result);
1542   return result;

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "gc/shared/c1/barrierSetC1.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52   LIR_Opr res = result();
  53 
  54   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {

 100 #else
 101     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
 102     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
 103 #endif // _LP64
 104     case addressTag:
 105     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 106   }
 107 
 108   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 109   return opr;
 110 }
 111 
 112 
 113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 114   LIR_Opr reg = new_register(T_INT);
 115   set_vreg_flag(reg, LIRGenerator::byte_reg);
 116   return reg;
 117 }
 118 
 119 
 120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 121   // We just need one 32-bit temp register for x86/x64, to check whether both
 122   // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
 123   // @temp = %r10d
 124   // mov $0x405, %r10d
 125   // and (%left), %r10d   /* if need to check left */
 126   // and (%right), %r10d  /* if need to check right */
 127   // cmp $0x405, $r10d
 128   // jne L_oops_not_equal
 129   tmp1 = new_register(T_INT);
 130   tmp2 = LIR_OprFact::illegalOpr;
 131 }
 132 
 133 //--------- loading items into registers --------------------------------
 134 
 135 
 136 // i486 instructions can inline constants
 137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 138   if (type == T_SHORT || type == T_CHAR) {
 139     // there is no immediate move of word values in asembler_i486.?pp
 140     return false;
 141   }
 142   Constant* c = v->as_Constant();
 143   if (c && c->state_before() == NULL) {
 144     // constants of any type can be stored directly, except for
 145     // unloaded object constants.
 146     return true;
 147   }
 148   return false;
 149 }
 150 
 151 
 152 bool LIRGenerator::can_inline_as_constant(Value v) const {

 309 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 310   LIR_Opr tmp1 = new_register(objectType);
 311   LIR_Opr tmp2 = new_register(objectType);
 312   LIR_Opr tmp3 = new_register(objectType);
 313   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 314 }
 315 
 316 //----------------------------------------------------------------------
 317 //             visitor functions
 318 //----------------------------------------------------------------------
 319 
 320 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 321   assert(x->is_pinned(),"");
 322   LIRItem obj(x->obj(), this);
 323   obj.load_item();
 324 
 325   set_no_result(x);
 326 
 327   // "lock" stores the address of the monitor stack slot, so this is not an oop
 328   LIR_Opr lock = new_register(T_INT);
 329   // Need a scratch register for inline types on x86
 330   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 331   if (EnableValhalla && x->maybe_inlinetype()) {
 332     scratch = new_register(T_INT);
 333   }
 334 
 335   CodeEmitInfo* info_for_exception = NULL;
 336   if (x->needs_null_check()) {
 337     info_for_exception = state_for(x);
 338   }
 339 
 340   CodeStub* throw_imse_stub = x->maybe_inlinetype() ?
 341       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id,
 342                               LIR_OprFact::illegalOpr, state_for(x))
 343     : NULL;
 344 
 345   // this CodeEmitInfo must not have the xhandlers because here the
 346   // object is already locked (xhandlers expect object to be unlocked)
 347   CodeEmitInfo* info = state_for(x, x->state(), true);
 348   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 349                 x->monitor_no(), info_for_exception, info, throw_imse_stub);
 350 }
 351 
 352 
 353 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 354   assert(x->is_pinned(),"");
 355 
 356   LIRItem obj(x->obj(), this);
 357   obj.dont_load_item();
 358 
 359   LIR_Opr lock = new_register(T_INT);
 360   LIR_Opr obj_temp = new_register(T_INT);
 361   set_no_result(x);
 362   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 363 }
 364 
 365 
 366 // _ineg, _lneg, _fneg, _dneg
 367 void LIRGenerator::do_NegateOp(NegateOp* x) {
 368   LIRItem value(x->x(), this);
 369   value.set_destroys_register();

1297   }
1298 
1299   __ convert(x->op(), conv_input, conv_result, stub);
1300 
1301   if (result != conv_result) {
1302     __ move(conv_result, result);
1303   }
1304 
1305   assert(result->is_virtual(), "result must be virtual register");
1306   set_result(x, result);
1307 #endif // _LP64
1308 }
1309 
1310 
1311 void LIRGenerator::do_NewInstance(NewInstance* x) {
1312   print_if_not_loaded(x);
1313 
1314   CodeEmitInfo* info = state_for(x, x->state());
1315   LIR_Opr reg = result_register_for(x->type());
1316   new_instance(reg, x->klass(), x->is_unresolved(),
1317                /* allow_inline */ false,
1318                FrameMap::rcx_oop_opr,
1319                FrameMap::rdi_oop_opr,
1320                FrameMap::rsi_oop_opr,
1321                LIR_OprFact::illegalOpr,
1322                FrameMap::rdx_metadata_opr, info);
1323   LIR_Opr result = rlock_result(x);
1324   __ move(reg, result);
1325 }
1326 
1327 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1328   // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1329   CodeEmitInfo* info = state_for(x, x->state_before());
1330   x->set_to_object_type();
1331   LIR_Opr reg = result_register_for(x->type());
1332   new_instance(reg, x->klass(), false,
1333                /* allow_inline */ true,
1334                FrameMap::rcx_oop_opr,
1335                FrameMap::rdi_oop_opr,
1336                FrameMap::rsi_oop_opr,
1337                LIR_OprFact::illegalOpr,
1338                FrameMap::rdx_metadata_opr, info);
1339   LIR_Opr result = rlock_result(x);
1340   __ move(reg, result);
1341 }
1342 
1343 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1344   CodeEmitInfo* info = state_for(x, x->state());
1345 
1346   LIRItem length(x->length(), this);
1347   length.load_item_force(FrameMap::rbx_opr);
1348 
1349   LIR_Opr reg = result_register_for(x->type());
1350   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1351   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1352   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1353   LIR_Opr tmp4 = reg;
1354   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1355   LIR_Opr len = length.result();
1356   BasicType elem_type = x->elt_type();
1357 
1358   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1359 
1360   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1361   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);

1369   LIRItem length(x->length(), this);
1370   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1371   // and therefore provide the state before the parameters have been consumed
1372   CodeEmitInfo* patching_info = NULL;
1373   if (!x->klass()->is_loaded() || PatchALot) {
1374     patching_info =  state_for(x, x->state_before());
1375   }
1376 
1377   CodeEmitInfo* info = state_for(x, x->state());
1378 
1379   const LIR_Opr reg = result_register_for(x->type());
1380   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1381   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1382   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1383   LIR_Opr tmp4 = reg;
1384   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1385 
1386   length.load_item_force(FrameMap::rbx_opr);
1387   LIR_Opr len = length.result();
1388 
1389   ciKlass* obj = (ciKlass*) x->exact_type();
1390   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1391   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1392     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1393   }
1394   klass2reg_with_patching(klass_reg, obj, patching_info);
1395   if (x->is_null_free()) {
1396     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_PRIMITIVE_OBJECT, klass_reg, slow_path);
1397   } else {
1398     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1399   }
1400 
1401   LIR_Opr result = rlock_result(x);
1402   __ move(reg, result);
1403 }
1404 
1405 
1406 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1407   Values* dims = x->dims();
1408   int i = dims->length();
1409   LIRItemList* items = new LIRItemList(i, i, NULL);
1410   while (i-- > 0) {
1411     LIRItem* size = new LIRItem(dims->at(i), this);
1412     items->at_put(i, size);
1413   }
1414 
1415   // Evaluate state_for early since it may emit code.
1416   CodeEmitInfo* patching_info = NULL;
1417   if (!x->klass()->is_loaded() || PatchALot) {
1418     patching_info = state_for(x, x->state_before());
1419 

1458   // nothing to do for now
1459 }
1460 
1461 
1462 void LIRGenerator::do_CheckCast(CheckCast* x) {
1463   LIRItem obj(x->obj(), this);
1464 
1465   CodeEmitInfo* patching_info = NULL;
1466   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1467     // must do this before locking the destination register as an oop register,
1468     // and before the obj is loaded (the latter is for deoptimization)
1469     patching_info = state_for(x, x->state_before());
1470   }
1471   obj.load_item();
1472 
1473   // info for exceptions
1474   CodeEmitInfo* info_for_exception =
1475       (x->needs_exception_state() ? state_for(x) :
1476                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1477 
1478   if (x->is_null_free()) {
1479     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1480   }
1481 
1482   CodeStub* stub;
1483   if (x->is_incompatible_class_change_check()) {
1484     assert(patching_info == NULL, "can't patch this");
1485     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1486   } else if (x->is_invokespecial_receiver_check()) {
1487     assert(patching_info == NULL, "can't patch this");
1488     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1489   } else {
1490     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1491   }
1492   LIR_Opr reg = rlock_result(x);
1493   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1494   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1495     tmp3 = new_register(objectType);
1496   }
1497   __ checkcast(reg, obj.result(), x->klass(),
1498                new_register(objectType), new_register(objectType), tmp3,
1499                x->direct_compare(), info_for_exception, patching_info, stub,
1500                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1501 }
1502 
1503 
1504 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1505   LIRItem obj(x->obj(), this);
1506 
1507   // result and test object may not be in same register
1508   LIR_Opr reg = rlock_result(x);
1509   CodeEmitInfo* patching_info = NULL;
1510   if ((!x->klass()->is_loaded() || PatchALot)) {
1511     // must do this before locking the destination register as an oop register
1512     patching_info = state_for(x, x->state_before());
1513   }
1514   obj.load_item();
1515   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1516   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1517     tmp3 = new_register(objectType);
1518   }
1519   __ instanceof(reg, obj.result(), x->klass(),
1520                 new_register(objectType), new_register(objectType), tmp3,

1531 
1532   LIRItem xitem(x->x(), this);
1533   LIRItem yitem(x->y(), this);
1534   LIRItem* xin = &xitem;
1535   LIRItem* yin = &yitem;
1536 
1537   if (tag == longTag) {
1538     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1539     // mirror for other conditions
1540     if (cond == If::gtr || cond == If::leq) {
1541       cond = Instruction::mirror(cond);
1542       xin = &yitem;
1543       yin = &xitem;
1544     }
1545     xin->set_destroys_register();
1546   }
1547   xin->load_item();
1548   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1549     // inline long zero
1550     yin->dont_load_item();
1551   } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1552     // longs cannot handle constants at right side
1553     yin->load_item();
1554   } else {
1555     yin->dont_load_item();
1556   }
1557 
1558   LIR_Opr left = xin->result();
1559   LIR_Opr right = yin->result();
1560 
1561   set_no_result(x);
1562 
1563   // add safepoint before generating condition code so it can be recomputed
1564   if (x->is_safepoint()) {
1565     // increment backedge counter if needed
1566     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1567         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1568     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1569   }
1570 
1571   if (x->substitutability_check()) {
1572     substitutability_check(x, *xin, *yin);
1573   } else {
1574     __ cmp(lir_cond(cond), left, right);
1575   }
1576   // Generate branch profiling. Profiling code doesn't kill flags.
1577   profile_branch(x, cond);
1578   move_to_phi(x->state());
1579   if (x->x()->type()->is_float_kind()) {
1580     __ branch(lir_cond(cond), x->tsux(), x->usux());
1581   } else {
1582     __ branch(lir_cond(cond), x->tsux());
1583   }
1584   assert(x->default_sux() == x->fsux(), "wrong destination above");
1585   __ jump(x->default_sux());
1586 }
1587 
1588 
1589 LIR_Opr LIRGenerator::getThreadPointer() {
1590 #ifdef _LP64
1591   return FrameMap::as_pointer_opr(r15_thread);
1592 #else
1593   LIR_Opr result = new_register(T_INT);
1594   __ get_thread(result);
1595   return result;
< prev index next >