< prev index next >

src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"

  34 #include "ci/ciObjArrayKlass.hpp"
  35 #include "ci/ciTypeArrayKlass.hpp"
  36 #include "gc/shared/c1/barrierSetC1.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "utilities/powerOfTwo.hpp"
  40 #include "vmreg_x86.inline.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 // Item will be loaded into a byte register; Intel only
  49 void LIRItem::load_byte_item() {
  50   load_item();
  51   LIR_Opr res = result();
  52 
  53   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {

  99 #else
 100     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
 101     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
 102 #endif // _LP64
 103     case addressTag:
 104     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 105   }
 106 
 107   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 108   return opr;
 109 }
 110 
 111 
 112 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 113   LIR_Opr reg = new_register(T_INT);
 114   set_vreg_flag(reg, LIRGenerator::byte_reg);
 115   return reg;
 116 }
 117 
 118 













 119 //--------- loading items into registers --------------------------------
 120 
 121 
 122 // i486 instructions can inline constants
 123 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 124   if (type == T_SHORT || type == T_CHAR) {
 125     // there is no immediate move of word values in asembler_i486.?pp
 126     return false;
 127   }
 128   Constant* c = v->as_Constant();
 129   if (c && c->state_before() == NULL) {
 130     // constants of any type can be stored directly, except for
 131     // unloaded object constants.
 132     return true;
 133   }
 134   return false;
 135 }
 136 
 137 
 138 bool LIRGenerator::can_inline_as_constant(Value v) const {

 271 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 272   LIR_Opr tmp1 = new_register(objectType);
 273   LIR_Opr tmp2 = new_register(objectType);
 274   LIR_Opr tmp3 = new_register(objectType);
 275   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 276 }
 277 
 278 //----------------------------------------------------------------------
 279 //             visitor functions
 280 //----------------------------------------------------------------------
 281 
 282 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 283   assert(x->is_pinned(),"");
 284   LIRItem obj(x->obj(), this);
 285   obj.load_item();
 286 
 287   set_no_result(x);
 288 
 289   // "lock" stores the address of the monitor stack slot, so this is not an oop
 290   LIR_Opr lock = new_register(T_INT);





 291 
 292   CodeEmitInfo* info_for_exception = NULL;
 293   if (x->needs_null_check()) {
 294     info_for_exception = state_for(x);
 295   }






 296   // this CodeEmitInfo must not have the xhandlers because here the
 297   // object is already locked (xhandlers expect object to be unlocked)
 298   CodeEmitInfo* info = state_for(x, x->state(), true);
 299   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 300                         x->monitor_no(), info_for_exception, info);
 301 }
 302 
 303 
 304 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 305   assert(x->is_pinned(),"");
 306 
 307   LIRItem obj(x->obj(), this);
 308   obj.dont_load_item();
 309 
 310   LIR_Opr lock = new_register(T_INT);
 311   LIR_Opr obj_temp = new_register(T_INT);
 312   set_no_result(x);
 313   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 314 }
 315 
 316 
 317 // _ineg, _lneg, _fneg, _dneg
 318 void LIRGenerator::do_NegateOp(NegateOp* x) {
 319   LIRItem value(x->x(), this);
 320   value.set_destroys_register();

1242   }
1243 
1244   __ convert(x->op(), conv_input, conv_result, stub);
1245 
1246   if (result != conv_result) {
1247     __ move(conv_result, result);
1248   }
1249 
1250   assert(result->is_virtual(), "result must be virtual register");
1251   set_result(x, result);
1252 #endif // _LP64
1253 }
1254 
1255 
1256 void LIRGenerator::do_NewInstance(NewInstance* x) {
1257   print_if_not_loaded(x);
1258 
1259   CodeEmitInfo* info = state_for(x, x->state());
1260   LIR_Opr reg = result_register_for(x->type());
1261   new_instance(reg, x->klass(), x->is_unresolved(),
1262                        FrameMap::rcx_oop_opr,
1263                        FrameMap::rdi_oop_opr,
1264                        FrameMap::rsi_oop_opr,
1265                        LIR_OprFact::illegalOpr,
1266                        FrameMap::rdx_metadata_opr, info);

1267   LIR_Opr result = rlock_result(x);
1268   __ move(reg, result);
1269 }
1270 















1271 
1272 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1273   CodeEmitInfo* info = state_for(x, x->state());
1274 
1275   LIRItem length(x->length(), this);
1276   length.load_item_force(FrameMap::rbx_opr);
1277 
1278   LIR_Opr reg = result_register_for(x->type());
1279   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1280   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1281   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1282   LIR_Opr tmp4 = reg;
1283   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1284   LIR_Opr len = length.result();
1285   BasicType elem_type = x->elt_type();
1286 
1287   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1288 
1289   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1290   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);

1298   LIRItem length(x->length(), this);
1299   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1300   // and therefore provide the state before the parameters have been consumed
1301   CodeEmitInfo* patching_info = NULL;
1302   if (!x->klass()->is_loaded() || PatchALot) {
1303     patching_info =  state_for(x, x->state_before());
1304   }
1305 
1306   CodeEmitInfo* info = state_for(x, x->state());
1307 
1308   const LIR_Opr reg = result_register_for(x->type());
1309   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1310   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1311   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1312   LIR_Opr tmp4 = reg;
1313   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1314 
1315   length.load_item_force(FrameMap::rbx_opr);
1316   LIR_Opr len = length.result();
1317 
1318   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1319   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1320   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1321     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1322   }
1323   klass2reg_with_patching(klass_reg, obj, patching_info);
1324   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);




1325 
1326   LIR_Opr result = rlock_result(x);
1327   __ move(reg, result);
1328 }
1329 
1330 
1331 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1332   Values* dims = x->dims();
1333   int i = dims->length();
1334   LIRItemList* items = new LIRItemList(i, i, NULL);
1335   while (i-- > 0) {
1336     LIRItem* size = new LIRItem(dims->at(i), this);
1337     items->at_put(i, size);
1338   }
1339 
1340   // Evaluate state_for early since it may emit code.
1341   CodeEmitInfo* patching_info = NULL;
1342   if (!x->klass()->is_loaded() || PatchALot) {
1343     patching_info = state_for(x, x->state_before());
1344 

1383   // nothing to do for now
1384 }
1385 
1386 
1387 void LIRGenerator::do_CheckCast(CheckCast* x) {
1388   LIRItem obj(x->obj(), this);
1389 
1390   CodeEmitInfo* patching_info = NULL;
1391   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1392     // must do this before locking the destination register as an oop register,
1393     // and before the obj is loaded (the latter is for deoptimization)
1394     patching_info = state_for(x, x->state_before());
1395   }
1396   obj.load_item();
1397 
1398   // info for exceptions
1399   CodeEmitInfo* info_for_exception =
1400       (x->needs_exception_state() ? state_for(x) :
1401                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1402 




1403   CodeStub* stub;
1404   if (x->is_incompatible_class_change_check()) {
1405     assert(patching_info == NULL, "can't patch this");
1406     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1407   } else if (x->is_invokespecial_receiver_check()) {
1408     assert(patching_info == NULL, "can't patch this");
1409     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1410   } else {
1411     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1412   }
1413   LIR_Opr reg = rlock_result(x);
1414   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1415   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1416     tmp3 = new_register(objectType);
1417   }
1418   __ checkcast(reg, obj.result(), x->klass(),
1419                new_register(objectType), new_register(objectType), tmp3,
1420                x->direct_compare(), info_for_exception, patching_info, stub,
1421                x->profiled_method(), x->profiled_bci());
1422 }
1423 
1424 
1425 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1426   LIRItem obj(x->obj(), this);
1427 
1428   // result and test object may not be in same register
1429   LIR_Opr reg = rlock_result(x);
1430   CodeEmitInfo* patching_info = NULL;
1431   if ((!x->klass()->is_loaded() || PatchALot)) {
1432     // must do this before locking the destination register as an oop register
1433     patching_info = state_for(x, x->state_before());
1434   }
1435   obj.load_item();
1436   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1437   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1438     tmp3 = new_register(objectType);
1439   }
1440   __ instanceof(reg, obj.result(), x->klass(),
1441                 new_register(objectType), new_register(objectType), tmp3,

1452 
1453   LIRItem xitem(x->x(), this);
1454   LIRItem yitem(x->y(), this);
1455   LIRItem* xin = &xitem;
1456   LIRItem* yin = &yitem;
1457 
1458   if (tag == longTag) {
1459     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1460     // mirror for other conditions
1461     if (cond == If::gtr || cond == If::leq) {
1462       cond = Instruction::mirror(cond);
1463       xin = &yitem;
1464       yin = &xitem;
1465     }
1466     xin->set_destroys_register();
1467   }
1468   xin->load_item();
1469   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1470     // inline long zero
1471     yin->dont_load_item();
1472   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1473     // longs cannot handle constants at right side
1474     yin->load_item();
1475   } else {
1476     yin->dont_load_item();
1477   }
1478 
1479   LIR_Opr left = xin->result();
1480   LIR_Opr right = yin->result();
1481 
1482   set_no_result(x);
1483 
1484   // add safepoint before generating condition code so it can be recomputed
1485   if (x->is_safepoint()) {
1486     // increment backedge counter if needed
1487     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1488         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1489     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1490   }
1491 
1492   __ cmp(lir_cond(cond), left, right);




1493   // Generate branch profiling. Profiling code doesn't kill flags.
1494   profile_branch(x, cond);
1495   move_to_phi(x->state());
1496   if (x->x()->type()->is_float_kind()) {
1497     __ branch(lir_cond(cond), x->tsux(), x->usux());
1498   } else {
1499     __ branch(lir_cond(cond), x->tsux());
1500   }
1501   assert(x->default_sux() == x->fsux(), "wrong destination above");
1502   __ jump(x->default_sux());
1503 }
1504 
1505 
1506 LIR_Opr LIRGenerator::getThreadPointer() {
1507 #ifdef _LP64
1508   return FrameMap::as_pointer_opr(r15_thread);
1509 #else
1510   LIR_Opr result = new_register(T_INT);
1511   __ get_thread(result);
1512   return result;

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArray.hpp"
  34 #include "ci/ciInlineKlass.hpp"
  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "gc/shared/c1/barrierSetC1.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_x86.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52   LIR_Opr res = result();
  53 
  54   if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {

 100 #else
 101     case floatTag:   opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr  : FrameMap::fpu0_float_opr;  break;
 102     case doubleTag:  opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr;  break;
 103 #endif // _LP64
 104     case addressTag:
 105     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
 106   }
 107 
 108   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
 109   return opr;
 110 }
 111 
 112 
 113 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 114   LIR_Opr reg = new_register(T_INT);
 115   set_vreg_flag(reg, LIRGenerator::byte_reg);
 116   return reg;
 117 }
 118 
 119 
 120 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 121   // We just need one 32-bit temp register for x86/x64, to check whether both
 122   // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck().
 123   // @temp = %r10d
 124   // mov $0x405, %r10d
 125   // and (%left), %r10d   /* if need to check left */
 126   // and (%right), %r10d  /* if need to check right */
 127   // cmp $0x405, $r10d
 128   // jne L_oops_not_equal
 129   tmp1 = new_register(T_INT);
 130   tmp2 = LIR_OprFact::illegalOpr;
 131 }
 132 
 133 //--------- loading items into registers --------------------------------
 134 
 135 
 136 // i486 instructions can inline constants
 137 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 138   if (type == T_SHORT || type == T_CHAR) {
 139     // there is no immediate move of word values in asembler_i486.?pp
 140     return false;
 141   }
 142   Constant* c = v->as_Constant();
 143   if (c && c->state_before() == NULL) {
 144     // constants of any type can be stored directly, except for
 145     // unloaded object constants.
 146     return true;
 147   }
 148   return false;
 149 }
 150 
 151 
 152 bool LIRGenerator::can_inline_as_constant(Value v) const {

 285 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 286   LIR_Opr tmp1 = new_register(objectType);
 287   LIR_Opr tmp2 = new_register(objectType);
 288   LIR_Opr tmp3 = new_register(objectType);
 289   __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 290 }
 291 
 292 //----------------------------------------------------------------------
 293 //             visitor functions
 294 //----------------------------------------------------------------------
 295 
 296 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 297   assert(x->is_pinned(),"");
 298   LIRItem obj(x->obj(), this);
 299   obj.load_item();
 300 
 301   set_no_result(x);
 302 
 303   // "lock" stores the address of the monitor stack slot, so this is not an oop
 304   LIR_Opr lock = new_register(T_INT);
 305   // Need a scratch register for inline types on x86
 306   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 307   if (EnableValhalla && x->maybe_inlinetype()) {
 308     scratch = new_register(T_INT);
 309   }
 310 
 311   CodeEmitInfo* info_for_exception = NULL;
 312   if (x->needs_null_check()) {
 313     info_for_exception = state_for(x);
 314   }
 315 
 316   CodeStub* throw_imse_stub = x->maybe_inlinetype() ?
 317       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id,
 318                               LIR_OprFact::illegalOpr, state_for(x))
 319     : NULL;
 320 
 321   // this CodeEmitInfo must not have the xhandlers because here the
 322   // object is already locked (xhandlers expect object to be unlocked)
 323   CodeEmitInfo* info = state_for(x, x->state(), true);
 324   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 325                 x->monitor_no(), info_for_exception, info, throw_imse_stub);
 326 }
 327 
 328 
 329 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 330   assert(x->is_pinned(),"");
 331 
 332   LIRItem obj(x->obj(), this);
 333   obj.dont_load_item();
 334 
 335   LIR_Opr lock = new_register(T_INT);
 336   LIR_Opr obj_temp = new_register(T_INT);
 337   set_no_result(x);
 338   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 339 }
 340 
 341 
 342 // _ineg, _lneg, _fneg, _dneg
 343 void LIRGenerator::do_NegateOp(NegateOp* x) {
 344   LIRItem value(x->x(), this);
 345   value.set_destroys_register();

1267   }
1268 
1269   __ convert(x->op(), conv_input, conv_result, stub);
1270 
1271   if (result != conv_result) {
1272     __ move(conv_result, result);
1273   }
1274 
1275   assert(result->is_virtual(), "result must be virtual register");
1276   set_result(x, result);
1277 #endif // _LP64
1278 }
1279 
1280 
1281 void LIRGenerator::do_NewInstance(NewInstance* x) {
1282   print_if_not_loaded(x);
1283 
1284   CodeEmitInfo* info = state_for(x, x->state());
1285   LIR_Opr reg = result_register_for(x->type());
1286   new_instance(reg, x->klass(), x->is_unresolved(),
1287                /* allow_inline */ false,
1288                FrameMap::rcx_oop_opr,
1289                FrameMap::rdi_oop_opr,
1290                FrameMap::rsi_oop_opr,
1291                LIR_OprFact::illegalOpr,
1292                FrameMap::rdx_metadata_opr, info);
1293   LIR_Opr result = rlock_result(x);
1294   __ move(reg, result);
1295 }
1296 
1297 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1298   // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1299   CodeEmitInfo* info = state_for(x, x->state_before());
1300   x->set_to_object_type();
1301   LIR_Opr reg = result_register_for(x->type());
1302   new_instance(reg, x->klass(), false,
1303                /* allow_inline */ true,
1304                FrameMap::rcx_oop_opr,
1305                FrameMap::rdi_oop_opr,
1306                FrameMap::rsi_oop_opr,
1307                LIR_OprFact::illegalOpr,
1308                FrameMap::rdx_metadata_opr, info);
1309   LIR_Opr result = rlock_result(x);
1310   __ move(reg, result);
1311 }
1312 
1313 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1314   CodeEmitInfo* info = state_for(x, x->state());
1315 
1316   LIRItem length(x->length(), this);
1317   length.load_item_force(FrameMap::rbx_opr);
1318 
1319   LIR_Opr reg = result_register_for(x->type());
1320   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1321   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1322   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1323   LIR_Opr tmp4 = reg;
1324   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1325   LIR_Opr len = length.result();
1326   BasicType elem_type = x->elt_type();
1327 
1328   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1329 
1330   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1331   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);

1339   LIRItem length(x->length(), this);
1340   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1341   // and therefore provide the state before the parameters have been consumed
1342   CodeEmitInfo* patching_info = NULL;
1343   if (!x->klass()->is_loaded() || PatchALot) {
1344     patching_info =  state_for(x, x->state_before());
1345   }
1346 
1347   CodeEmitInfo* info = state_for(x, x->state());
1348 
1349   const LIR_Opr reg = result_register_for(x->type());
1350   LIR_Opr tmp1 = FrameMap::rcx_oop_opr;
1351   LIR_Opr tmp2 = FrameMap::rsi_oop_opr;
1352   LIR_Opr tmp3 = FrameMap::rdi_oop_opr;
1353   LIR_Opr tmp4 = reg;
1354   LIR_Opr klass_reg = FrameMap::rdx_metadata_opr;
1355 
1356   length.load_item_force(FrameMap::rbx_opr);
1357   LIR_Opr len = length.result();
1358 
1359   ciKlass* obj = (ciKlass*) x->exact_type();
1360   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1361   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1362     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1363   }
1364   klass2reg_with_patching(klass_reg, obj, patching_info);
1365   if (x->is_null_free()) {
1366     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_INLINE_TYPE, klass_reg, slow_path);
1367   } else {
1368     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1369   }
1370 
1371   LIR_Opr result = rlock_result(x);
1372   __ move(reg, result);
1373 }
1374 
1375 
1376 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1377   Values* dims = x->dims();
1378   int i = dims->length();
1379   LIRItemList* items = new LIRItemList(i, i, NULL);
1380   while (i-- > 0) {
1381     LIRItem* size = new LIRItem(dims->at(i), this);
1382     items->at_put(i, size);
1383   }
1384 
1385   // Evaluate state_for early since it may emit code.
1386   CodeEmitInfo* patching_info = NULL;
1387   if (!x->klass()->is_loaded() || PatchALot) {
1388     patching_info = state_for(x, x->state_before());
1389 

1428   // nothing to do for now
1429 }
1430 
1431 
1432 void LIRGenerator::do_CheckCast(CheckCast* x) {
1433   LIRItem obj(x->obj(), this);
1434 
1435   CodeEmitInfo* patching_info = NULL;
1436   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1437     // must do this before locking the destination register as an oop register,
1438     // and before the obj is loaded (the latter is for deoptimization)
1439     patching_info = state_for(x, x->state_before());
1440   }
1441   obj.load_item();
1442 
1443   // info for exceptions
1444   CodeEmitInfo* info_for_exception =
1445       (x->needs_exception_state() ? state_for(x) :
1446                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1447 
1448   if (x->is_null_free()) {
1449     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1450   }
1451 
1452   CodeStub* stub;
1453   if (x->is_incompatible_class_change_check()) {
1454     assert(patching_info == NULL, "can't patch this");
1455     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1456   } else if (x->is_invokespecial_receiver_check()) {
1457     assert(patching_info == NULL, "can't patch this");
1458     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
1459   } else {
1460     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1461   }
1462   LIR_Opr reg = rlock_result(x);
1463   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1464   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1465     tmp3 = new_register(objectType);
1466   }
1467   __ checkcast(reg, obj.result(), x->klass(),
1468                new_register(objectType), new_register(objectType), tmp3,
1469                x->direct_compare(), info_for_exception, patching_info, stub,
1470                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1471 }
1472 
1473 
1474 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1475   LIRItem obj(x->obj(), this);
1476 
1477   // result and test object may not be in same register
1478   LIR_Opr reg = rlock_result(x);
1479   CodeEmitInfo* patching_info = NULL;
1480   if ((!x->klass()->is_loaded() || PatchALot)) {
1481     // must do this before locking the destination register as an oop register
1482     patching_info = state_for(x, x->state_before());
1483   }
1484   obj.load_item();
1485   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1486   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1487     tmp3 = new_register(objectType);
1488   }
1489   __ instanceof(reg, obj.result(), x->klass(),
1490                 new_register(objectType), new_register(objectType), tmp3,

1501 
1502   LIRItem xitem(x->x(), this);
1503   LIRItem yitem(x->y(), this);
1504   LIRItem* xin = &xitem;
1505   LIRItem* yin = &yitem;
1506 
1507   if (tag == longTag) {
1508     // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1509     // mirror for other conditions
1510     if (cond == If::gtr || cond == If::leq) {
1511       cond = Instruction::mirror(cond);
1512       xin = &yitem;
1513       yin = &xitem;
1514     }
1515     xin->set_destroys_register();
1516   }
1517   xin->load_item();
1518   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1519     // inline long zero
1520     yin->dont_load_item();
1521   } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) {
1522     // longs cannot handle constants at right side
1523     yin->load_item();
1524   } else {
1525     yin->dont_load_item();
1526   }
1527 
1528   LIR_Opr left = xin->result();
1529   LIR_Opr right = yin->result();
1530 
1531   set_no_result(x);
1532 
1533   // add safepoint before generating condition code so it can be recomputed
1534   if (x->is_safepoint()) {
1535     // increment backedge counter if needed
1536     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1537         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1538     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1539   }
1540 
1541   if (x->substitutability_check()) {
1542     substitutability_check(x, *xin, *yin);
1543   } else {
1544     __ cmp(lir_cond(cond), left, right);
1545   }
1546   // Generate branch profiling. Profiling code doesn't kill flags.
1547   profile_branch(x, cond);
1548   move_to_phi(x->state());
1549   if (x->x()->type()->is_float_kind()) {
1550     __ branch(lir_cond(cond), x->tsux(), x->usux());
1551   } else {
1552     __ branch(lir_cond(cond), x->tsux());
1553   }
1554   assert(x->default_sux() == x->fsux(), "wrong destination above");
1555   __ jump(x->default_sux());
1556 }
1557 
1558 
1559 LIR_Opr LIRGenerator::getThreadPointer() {
1560 #ifdef _LP64
1561   return FrameMap::as_pointer_opr(r15_thread);
1562 #else
1563   LIR_Opr result = new_register(T_INT);
1564   __ get_thread(result);
1565   return result;
< prev index next >