< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"

  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 
  55 void LIRItem::load_nonconstant() {

  85     case longTag:    opr = FrameMap::long0_opr;        break;
  86     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  87     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  88 
  89     case addressTag:
  90     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  91   }
  92 
  93   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  94   return opr;
  95 }
  96 
  97 
  98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  99   LIR_Opr reg = new_register(T_INT);
 100   set_vreg_flag(reg, LIRGenerator::byte_reg);
 101   return reg;
 102 }
 103 
 104 






 105 //--------- loading items into registers --------------------------------
 106 
 107 
 108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 109   if (v->type()->as_IntConstant() != NULL) {
 110     return v->type()->as_IntConstant()->value() == 0L;
 111   } else if (v->type()->as_LongConstant() != NULL) {
 112     return v->type()->as_LongConstant()->value() == 0L;
 113   } else if (v->type()->as_ObjectConstant() != NULL) {
 114     return v->type()->as_ObjectConstant()->value()->is_null_object();
 115   } else {
 116     return false;
 117   }
 118 }
 119 
 120 bool LIRGenerator::can_inline_as_constant(Value v) const {
 121   // FIXME: Just a guess
 122   if (v->type()->as_IntConstant() != NULL) {
 123     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 124   } else if (v->type()->as_LongConstant() != NULL) {

 313 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 314     LIR_Opr tmp1 = new_register(objectType);
 315     LIR_Opr tmp2 = new_register(objectType);
 316     LIR_Opr tmp3 = new_register(objectType);
 317     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 318 }
 319 
 320 //----------------------------------------------------------------------
 321 //             visitor functions
 322 //----------------------------------------------------------------------
 323 
 324 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 325   assert(x->is_pinned(),"");
 326   LIRItem obj(x->obj(), this);
 327   obj.load_item();
 328 
 329   set_no_result(x);
 330 
 331   // "lock" stores the address of the monitor stack slot, so this is not an oop
 332   LIR_Opr lock = new_register(T_INT);





 333 
 334   CodeEmitInfo* info_for_exception = NULL;
 335   if (x->needs_null_check()) {
 336     info_for_exception = state_for(x);
 337   }






 338   // this CodeEmitInfo must not have the xhandlers because here the
 339   // object is already locked (xhandlers expect object to be unlocked)
 340   CodeEmitInfo* info = state_for(x, x->state(), true);
 341   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 342                         x->monitor_no(), info_for_exception, info);
 343 }
 344 
 345 
 346 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 347   assert(x->is_pinned(),"");
 348 
 349   LIRItem obj(x->obj(), this);
 350   obj.dont_load_item();
 351 
 352   LIR_Opr lock = new_register(T_INT);
 353   LIR_Opr obj_temp = new_register(T_INT);
 354   set_no_result(x);
 355   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 356 }
 357 
 358 
 359 void LIRGenerator::do_NegateOp(NegateOp* x) {
 360 
 361   LIRItem from(x->x(), this);
 362   from.load_item();

1111 
1112   // arguments of lir_convert
1113   LIR_Opr conv_input = input;
1114   LIR_Opr conv_result = result;
1115 
1116   __ convert(x->op(), conv_input, conv_result);
1117 
1118   assert(result->is_virtual(), "result must be virtual register");
1119   set_result(x, result);
1120 }
1121 
1122 void LIRGenerator::do_NewInstance(NewInstance* x) {
1123 #ifndef PRODUCT
1124   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1125     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1126   }
1127 #endif
1128   CodeEmitInfo* info = state_for(x, x->state());
1129   LIR_Opr reg = result_register_for(x->type());
1130   new_instance(reg, x->klass(), x->is_unresolved(),
1131                        FrameMap::r10_oop_opr,
1132                        FrameMap::r11_oop_opr,
1133                        FrameMap::r4_oop_opr,
1134                        LIR_OprFact::illegalOpr,
1135                        FrameMap::r3_metadata_opr, info);

















1136   LIR_Opr result = rlock_result(x);
1137   __ move(reg, result);

1138 }
1139 
1140 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1141   CodeEmitInfo* info = state_for(x, x->state());
1142 
1143   LIRItem length(x->length(), this);
1144   length.load_item_force(FrameMap::r19_opr);
1145 
1146   LIR_Opr reg = result_register_for(x->type());
1147   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1148   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1149   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1150   LIR_Opr tmp4 = reg;
1151   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1152   LIR_Opr len = length.result();
1153   BasicType elem_type = x->elt_type();
1154 
1155   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1156 
1157   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);

1165   LIRItem length(x->length(), this);
1166   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1167   // and therefore provide the state before the parameters have been consumed
1168   CodeEmitInfo* patching_info = NULL;
1169   if (!x->klass()->is_loaded() || PatchALot) {
1170     patching_info =  state_for(x, x->state_before());
1171   }
1172 
1173   CodeEmitInfo* info = state_for(x, x->state());
1174 
1175   LIR_Opr reg = result_register_for(x->type());
1176   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1177   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1178   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1179   LIR_Opr tmp4 = reg;
1180   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1181 
1182   length.load_item_force(FrameMap::r19_opr);
1183   LIR_Opr len = length.result();
1184 
1185   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1186   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1187   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1188     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1189   }

1190   klass2reg_with_patching(klass_reg, obj, patching_info);
1191   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);




1192 
1193   LIR_Opr result = rlock_result(x);
1194   __ move(reg, result);
1195 }
1196 
1197 
1198 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1199   Values* dims = x->dims();
1200   int i = dims->length();
1201   LIRItemList* items = new LIRItemList(i, i, NULL);
1202   while (i-- > 0) {
1203     LIRItem* size = new LIRItem(dims->at(i), this);
1204     items->at_put(i, size);
1205   }
1206 
1207   // Evaluate state_for early since it may emit code.
1208   CodeEmitInfo* patching_info = NULL;
1209   if (!x->klass()->is_loaded() || PatchALot) {
1210     patching_info = state_for(x, x->state_before());
1211 

1247 
1248 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1249   // nothing to do for now
1250 }
1251 
1252 void LIRGenerator::do_CheckCast(CheckCast* x) {
1253   LIRItem obj(x->obj(), this);
1254 
1255   CodeEmitInfo* patching_info = NULL;
1256   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1257     // must do this before locking the destination register as an oop register,
1258     // and before the obj is loaded (the latter is for deoptimization)
1259     patching_info = state_for(x, x->state_before());
1260   }
1261   obj.load_item();
1262 
1263   // info for exceptions
1264   CodeEmitInfo* info_for_exception =
1265       (x->needs_exception_state() ? state_for(x) :
1266                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));



1267 
1268   CodeStub* stub;
1269   if (x->is_incompatible_class_change_check()) {
1270     assert(patching_info == NULL, "can't patch this");
1271     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1272   } else if (x->is_invokespecial_receiver_check()) {
1273     assert(patching_info == NULL, "can't patch this");
1274     stub = new DeoptimizeStub(info_for_exception,
1275                               Deoptimization::Reason_class_check,
1276                               Deoptimization::Action_none);
1277   } else {
1278     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1279   }
1280   LIR_Opr reg = rlock_result(x);
1281   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1282   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1283     tmp3 = new_register(objectType);
1284   }


1285   __ checkcast(reg, obj.result(), x->klass(),
1286                new_register(objectType), new_register(objectType), tmp3,
1287                x->direct_compare(), info_for_exception, patching_info, stub,
1288                x->profiled_method(), x->profiled_bci());

1289 }
1290 
1291 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1292   LIRItem obj(x->obj(), this);
1293 
1294   // result and test object may not be in same register
1295   LIR_Opr reg = rlock_result(x);
1296   CodeEmitInfo* patching_info = NULL;
1297   if ((!x->klass()->is_loaded() || PatchALot)) {
1298     // must do this before locking the destination register as an oop register
1299     patching_info = state_for(x, x->state_before());
1300   }
1301   obj.load_item();
1302   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1303   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1304     tmp3 = new_register(objectType);
1305   }
1306   __ instanceof(reg, obj.result(), x->klass(),
1307                 new_register(objectType), new_register(objectType), tmp3,
1308                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1346     } else {
1347       yin->load_item();
1348     }
1349   } else {
1350     yin->load_item();
1351   }
1352 
1353   set_no_result(x);
1354 
1355   LIR_Opr left = xin->result();
1356   LIR_Opr right = yin->result();
1357 
1358   // add safepoint before generating condition code so it can be recomputed
1359   if (x->is_safepoint()) {
1360     // increment backedge counter if needed
1361     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1362         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1363     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1364   }
1365 
1366   __ cmp(lir_cond(cond), left, right);





1367   // Generate branch profiling. Profiling code doesn't kill flags.
1368   profile_branch(x, cond);
1369   move_to_phi(x->state());
1370   if (x->x()->type()->is_float_kind()) {
1371     __ branch(lir_cond(cond), x->tsux(), x->usux());
1372   } else {
1373     __ branch(lir_cond(cond), x->tsux());
1374   }
1375   assert(x->default_sux() == x->fsux(), "wrong destination above");
1376   __ jump(x->default_sux());
1377 }
1378 
1379 LIR_Opr LIRGenerator::getThreadPointer() {
1380    return FrameMap::as_pointer_opr(rthread);
1381 }
1382 
1383 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1384 
1385 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1386                                         CodeEmitInfo* info) {

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciInlineKlass.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {

  86     case longTag:    opr = FrameMap::long0_opr;        break;
  87     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 
  99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 100   LIR_Opr reg = new_register(T_INT);
 101   set_vreg_flag(reg, LIRGenerator::byte_reg);
 102   return reg;
 103 }
 104 
 105 
 106 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 107   tmp1 = new_register(T_INT);
 108   tmp2 = LIR_OprFact::illegalOpr;
 109 }
 110 
 111 
 112 //--------- loading items into registers --------------------------------
 113 
 114 
 115 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 116   if (v->type()->as_IntConstant() != NULL) {
 117     return v->type()->as_IntConstant()->value() == 0L;
 118   } else if (v->type()->as_LongConstant() != NULL) {
 119     return v->type()->as_LongConstant()->value() == 0L;
 120   } else if (v->type()->as_ObjectConstant() != NULL) {
 121     return v->type()->as_ObjectConstant()->value()->is_null_object();
 122   } else {
 123     return false;
 124   }
 125 }
 126 
 127 bool LIRGenerator::can_inline_as_constant(Value v) const {
 128   // FIXME: Just a guess
 129   if (v->type()->as_IntConstant() != NULL) {
 130     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 131   } else if (v->type()->as_LongConstant() != NULL) {

 320 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 321     LIR_Opr tmp1 = new_register(objectType);
 322     LIR_Opr tmp2 = new_register(objectType);
 323     LIR_Opr tmp3 = new_register(objectType);
 324     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 325 }
 326 
 327 //----------------------------------------------------------------------
 328 //             visitor functions
 329 //----------------------------------------------------------------------
 330 
 331 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 332   assert(x->is_pinned(),"");
 333   LIRItem obj(x->obj(), this);
 334   obj.load_item();
 335 
 336   set_no_result(x);
 337 
 338   // "lock" stores the address of the monitor stack slot, so this is not an oop
 339   LIR_Opr lock = new_register(T_INT);
 340   // Need a scratch register for inline type
 341   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 342   if (EnableValhalla && x->maybe_inlinetype()) {
 343     scratch = new_register(T_INT);
 344   }
 345 
 346   CodeEmitInfo* info_for_exception = NULL;
 347   if (x->needs_null_check()) {
 348     info_for_exception = state_for(x);
 349   }
 350 
 351   CodeStub* throw_imse_stub =
 352       x->maybe_inlinetype() ?
 353       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
 354       NULL;
 355 
 356   // this CodeEmitInfo must not have the xhandlers because here the
 357   // object is already locked (xhandlers expect object to be unlocked)
 358   CodeEmitInfo* info = state_for(x, x->state(), true);
 359   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 360                         x->monitor_no(), info_for_exception, info, throw_imse_stub);
 361 }
 362 
 363 
 364 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 365   assert(x->is_pinned(),"");
 366 
 367   LIRItem obj(x->obj(), this);
 368   obj.dont_load_item();
 369 
 370   LIR_Opr lock = new_register(T_INT);
 371   LIR_Opr obj_temp = new_register(T_INT);
 372   set_no_result(x);
 373   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 374 }
 375 
 376 
 377 void LIRGenerator::do_NegateOp(NegateOp* x) {
 378 
 379   LIRItem from(x->x(), this);
 380   from.load_item();

1129 
1130   // arguments of lir_convert
1131   LIR_Opr conv_input = input;
1132   LIR_Opr conv_result = result;
1133 
1134   __ convert(x->op(), conv_input, conv_result);
1135 
1136   assert(result->is_virtual(), "result must be virtual register");
1137   set_result(x, result);
1138 }
1139 
1140 void LIRGenerator::do_NewInstance(NewInstance* x) {
1141 #ifndef PRODUCT
1142   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1143     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1144   }
1145 #endif
1146   CodeEmitInfo* info = state_for(x, x->state());
1147   LIR_Opr reg = result_register_for(x->type());
1148   new_instance(reg, x->klass(), x->is_unresolved(),
1149                /* allow_inline */ false,
1150                FrameMap::r10_oop_opr,
1151                FrameMap::r11_oop_opr,
1152                FrameMap::r4_oop_opr,
1153                LIR_OprFact::illegalOpr,
1154                FrameMap::r3_metadata_opr, info);
1155   LIR_Opr result = rlock_result(x);
1156   __ move(reg, result);
1157 }
1158 
1159 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1160   // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1161   CodeEmitInfo* info = state_for(x, x->state_before());
1162   x->set_to_object_type();
1163   LIR_Opr reg = result_register_for(x->type());
1164   new_instance(reg, x->klass(), false,
1165                /* allow_inline */ true,
1166                FrameMap::r10_oop_opr,
1167                FrameMap::r11_oop_opr,
1168                FrameMap::r4_oop_opr,
1169                LIR_OprFact::illegalOpr,
1170                FrameMap::r3_metadata_opr, info);
1171   LIR_Opr result = rlock_result(x);
1172   __ move(reg, result);
1173 
1174 }
1175 
1176 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1177   CodeEmitInfo* info = state_for(x, x->state());
1178 
1179   LIRItem length(x->length(), this);
1180   length.load_item_force(FrameMap::r19_opr);
1181 
1182   LIR_Opr reg = result_register_for(x->type());
1183   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1184   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1185   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1186   LIR_Opr tmp4 = reg;
1187   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1188   LIR_Opr len = length.result();
1189   BasicType elem_type = x->elt_type();
1190 
1191   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1192 
1193   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);

1201   LIRItem length(x->length(), this);
1202   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1203   // and therefore provide the state before the parameters have been consumed
1204   CodeEmitInfo* patching_info = NULL;
1205   if (!x->klass()->is_loaded() || PatchALot) {
1206     patching_info =  state_for(x, x->state_before());
1207   }
1208 
1209   CodeEmitInfo* info = state_for(x, x->state());
1210 
1211   LIR_Opr reg = result_register_for(x->type());
1212   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1213   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1214   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1215   LIR_Opr tmp4 = reg;
1216   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1217 
1218   length.load_item_force(FrameMap::r19_opr);
1219   LIR_Opr len = length.result();
1220 
1221   ciKlass* obj = (ciKlass*) x->exact_type();
1222   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1223   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1224     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1225   }
1226 
1227   klass2reg_with_patching(klass_reg, obj, patching_info);
1228   if (x->is_null_free()) {
1229     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_INLINE_TYPE, klass_reg, slow_path);
1230   } else {
1231     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1232   }
1233 
1234   LIR_Opr result = rlock_result(x);
1235   __ move(reg, result);
1236 }
1237 
1238 
1239 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1240   Values* dims = x->dims();
1241   int i = dims->length();
1242   LIRItemList* items = new LIRItemList(i, i, NULL);
1243   while (i-- > 0) {
1244     LIRItem* size = new LIRItem(dims->at(i), this);
1245     items->at_put(i, size);
1246   }
1247 
1248   // Evaluate state_for early since it may emit code.
1249   CodeEmitInfo* patching_info = NULL;
1250   if (!x->klass()->is_loaded() || PatchALot) {
1251     patching_info = state_for(x, x->state_before());
1252 

1288 
1289 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1290   // nothing to do for now
1291 }
1292 
1293 void LIRGenerator::do_CheckCast(CheckCast* x) {
1294   LIRItem obj(x->obj(), this);
1295 
1296   CodeEmitInfo* patching_info = NULL;
1297   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1298     // must do this before locking the destination register as an oop register,
1299     // and before the obj is loaded (the latter is for deoptimization)
1300     patching_info = state_for(x, x->state_before());
1301   }
1302   obj.load_item();
1303 
1304   // info for exceptions
1305   CodeEmitInfo* info_for_exception =
1306       (x->needs_exception_state() ? state_for(x) :
1307                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1308   if (x->is_null_free()) {
1309     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1310   }
1311 
1312   CodeStub* stub;
1313   if (x->is_incompatible_class_change_check()) {
1314     assert(patching_info == NULL, "can't patch this");
1315     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1316   } else if (x->is_invokespecial_receiver_check()) {
1317     assert(patching_info == NULL, "can't patch this");
1318     stub = new DeoptimizeStub(info_for_exception,
1319                               Deoptimization::Reason_class_check,
1320                               Deoptimization::Action_none);
1321   } else {
1322     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1323   }
1324   LIR_Opr reg = rlock_result(x);
1325   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1326   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1327     tmp3 = new_register(objectType);
1328   }
1329 
1330 
1331   __ checkcast(reg, obj.result(), x->klass(),
1332                new_register(objectType), new_register(objectType), tmp3,
1333                x->direct_compare(), info_for_exception, patching_info, stub,
1334                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1335 
1336 }
1337 
1338 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1339   LIRItem obj(x->obj(), this);
1340 
1341   // result and test object may not be in same register
1342   LIR_Opr reg = rlock_result(x);
1343   CodeEmitInfo* patching_info = NULL;
1344   if ((!x->klass()->is_loaded() || PatchALot)) {
1345     // must do this before locking the destination register as an oop register
1346     patching_info = state_for(x, x->state_before());
1347   }
1348   obj.load_item();
1349   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1350   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1351     tmp3 = new_register(objectType);
1352   }
1353   __ instanceof(reg, obj.result(), x->klass(),
1354                 new_register(objectType), new_register(objectType), tmp3,
1355                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1393     } else {
1394       yin->load_item();
1395     }
1396   } else {
1397     yin->load_item();
1398   }
1399 
1400   set_no_result(x);
1401 
1402   LIR_Opr left = xin->result();
1403   LIR_Opr right = yin->result();
1404 
1405   // add safepoint before generating condition code so it can be recomputed
1406   if (x->is_safepoint()) {
1407     // increment backedge counter if needed
1408     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1409         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1410     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1411   }
1412 
1413   if (x->substitutability_check()) {
1414     substitutability_check(x, *xin, *yin);
1415   } else {
1416     __ cmp(lir_cond(cond), left, right);
1417   }
1418 
1419   // Generate branch profiling. Profiling code doesn't kill flags.
1420   profile_branch(x, cond);
1421   move_to_phi(x->state());
1422   if (x->x()->type()->is_float_kind()) {
1423     __ branch(lir_cond(cond), x->tsux(), x->usux());
1424   } else {
1425     __ branch(lir_cond(cond), x->tsux());
1426   }
1427   assert(x->default_sux() == x->fsux(), "wrong destination above");
1428   __ jump(x->default_sux());
1429 }
1430 
1431 LIR_Opr LIRGenerator::getThreadPointer() {
1432    return FrameMap::as_pointer_opr(rthread);
1433 }
1434 
1435 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1436 
1437 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1438                                         CodeEmitInfo* info) {
< prev index next >