< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"

  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 
  55 void LIRItem::load_nonconstant() {

  85     case longTag:    opr = FrameMap::long0_opr;        break;
  86     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  87     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  88 
  89     case addressTag:
  90     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  91   }
  92 
  93   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  94   return opr;
  95 }
  96 
  97 
  98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  99   LIR_Opr reg = new_register(T_INT);
 100   set_vreg_flag(reg, LIRGenerator::byte_reg);
 101   return reg;
 102 }
 103 
 104 






 105 //--------- loading items into registers --------------------------------
 106 
 107 
 108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 109   if (v->type()->as_IntConstant() != NULL) {
 110     return v->type()->as_IntConstant()->value() == 0L;
 111   } else if (v->type()->as_LongConstant() != NULL) {
 112     return v->type()->as_LongConstant()->value() == 0L;
 113   } else if (v->type()->as_ObjectConstant() != NULL) {
 114     return v->type()->as_ObjectConstant()->value()->is_null_object();
 115   } else {
 116     return false;
 117   }
 118 }
 119 
 120 bool LIRGenerator::can_inline_as_constant(Value v) const {
 121   // FIXME: Just a guess
 122   if (v->type()->as_IntConstant() != NULL) {
 123     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 124   } else if (v->type()->as_LongConstant() != NULL) {

 314 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 315     LIR_Opr tmp1 = new_register(objectType);
 316     LIR_Opr tmp2 = new_register(objectType);
 317     LIR_Opr tmp3 = new_register(objectType);
 318     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 319 }
 320 
 321 //----------------------------------------------------------------------
 322 //             visitor functions
 323 //----------------------------------------------------------------------
 324 
 325 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 326   assert(x->is_pinned(),"");
 327   LIRItem obj(x->obj(), this);
 328   obj.load_item();
 329 
 330   set_no_result(x);
 331 
 332   // "lock" stores the address of the monitor stack slot, so this is not an oop
 333   LIR_Opr lock = new_register(T_INT);





 334 
 335   CodeEmitInfo* info_for_exception = NULL;
 336   if (x->needs_null_check()) {
 337     info_for_exception = state_for(x);
 338   }






 339   // this CodeEmitInfo must not have the xhandlers because here the
 340   // object is already locked (xhandlers expect object to be unlocked)
 341   CodeEmitInfo* info = state_for(x, x->state(), true);
 342   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 343                         x->monitor_no(), info_for_exception, info);
 344 }
 345 
 346 
 347 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 348   assert(x->is_pinned(),"");
 349 
 350   LIRItem obj(x->obj(), this);
 351   obj.dont_load_item();
 352 
 353   LIR_Opr lock = new_register(T_INT);
 354   LIR_Opr obj_temp = new_register(T_INT);
 355   set_no_result(x);
 356   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 357 }
 358 
 359 
 360 void LIRGenerator::do_NegateOp(NegateOp* x) {
 361 
 362   LIRItem from(x->x(), this);
 363   from.load_item();

1110 
1111   // arguments of lir_convert
1112   LIR_Opr conv_input = input;
1113   LIR_Opr conv_result = result;
1114 
1115   __ convert(x->op(), conv_input, conv_result);
1116 
1117   assert(result->is_virtual(), "result must be virtual register");
1118   set_result(x, result);
1119 }
1120 
1121 void LIRGenerator::do_NewInstance(NewInstance* x) {
1122 #ifndef PRODUCT
1123   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1124     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1125   }
1126 #endif
1127   CodeEmitInfo* info = state_for(x, x->state());
1128   LIR_Opr reg = result_register_for(x->type());
1129   new_instance(reg, x->klass(), x->is_unresolved(),
1130                        FrameMap::r10_oop_opr,
1131                        FrameMap::r11_oop_opr,
1132                        FrameMap::r4_oop_opr,
1133                        LIR_OprFact::illegalOpr,
1134                        FrameMap::r3_metadata_opr, info);

















1135   LIR_Opr result = rlock_result(x);
1136   __ move(reg, result);

1137 }
1138 
1139 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1140   CodeEmitInfo* info = state_for(x, x->state());
1141 
1142   LIRItem length(x->length(), this);
1143   length.load_item_force(FrameMap::r19_opr);
1144 
1145   LIR_Opr reg = result_register_for(x->type());
1146   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1147   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1148   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1149   LIR_Opr tmp4 = reg;
1150   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1151   LIR_Opr len = length.result();
1152   BasicType elem_type = x->elt_type();
1153 
1154   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1155 
1156   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);

1164   LIRItem length(x->length(), this);
1165   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1166   // and therefore provide the state before the parameters have been consumed
1167   CodeEmitInfo* patching_info = NULL;
1168   if (!x->klass()->is_loaded() || PatchALot) {
1169     patching_info =  state_for(x, x->state_before());
1170   }
1171 
1172   CodeEmitInfo* info = state_for(x, x->state());
1173 
1174   LIR_Opr reg = result_register_for(x->type());
1175   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1176   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1177   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1178   LIR_Opr tmp4 = reg;
1179   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1180 
1181   length.load_item_force(FrameMap::r19_opr);
1182   LIR_Opr len = length.result();
1183 
1184   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1185   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1186   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1187     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1188   }

1189   klass2reg_with_patching(klass_reg, obj, patching_info);
1190   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);




1191 
1192   LIR_Opr result = rlock_result(x);
1193   __ move(reg, result);
1194 }
1195 
1196 
1197 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1198   Values* dims = x->dims();
1199   int i = dims->length();
1200   LIRItemList* items = new LIRItemList(i, i, NULL);
1201   while (i-- > 0) {
1202     LIRItem* size = new LIRItem(dims->at(i), this);
1203     items->at_put(i, size);
1204   }
1205 
1206   // Evaluate state_for early since it may emit code.
1207   CodeEmitInfo* patching_info = NULL;
1208   if (!x->klass()->is_loaded() || PatchALot) {
1209     patching_info = state_for(x, x->state_before());
1210 

1246 
1247 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1248   // nothing to do for now
1249 }
1250 
1251 void LIRGenerator::do_CheckCast(CheckCast* x) {
1252   LIRItem obj(x->obj(), this);
1253 
1254   CodeEmitInfo* patching_info = NULL;
1255   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1256     // must do this before locking the destination register as an oop register,
1257     // and before the obj is loaded (the latter is for deoptimization)
1258     patching_info = state_for(x, x->state_before());
1259   }
1260   obj.load_item();
1261 
1262   // info for exceptions
1263   CodeEmitInfo* info_for_exception =
1264       (x->needs_exception_state() ? state_for(x) :
1265                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));



1266 
1267   CodeStub* stub;
1268   if (x->is_incompatible_class_change_check()) {
1269     assert(patching_info == NULL, "can't patch this");
1270     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1271   } else if (x->is_invokespecial_receiver_check()) {
1272     assert(patching_info == NULL, "can't patch this");
1273     stub = new DeoptimizeStub(info_for_exception,
1274                               Deoptimization::Reason_class_check,
1275                               Deoptimization::Action_none);
1276   } else {
1277     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1278   }
1279   LIR_Opr reg = rlock_result(x);
1280   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1281   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1282     tmp3 = new_register(objectType);
1283   }


1284   __ checkcast(reg, obj.result(), x->klass(),
1285                new_register(objectType), new_register(objectType), tmp3,
1286                x->direct_compare(), info_for_exception, patching_info, stub,
1287                x->profiled_method(), x->profiled_bci());

1288 }
1289 
1290 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1291   LIRItem obj(x->obj(), this);
1292 
1293   // result and test object may not be in same register
1294   LIR_Opr reg = rlock_result(x);
1295   CodeEmitInfo* patching_info = NULL;
1296   if ((!x->klass()->is_loaded() || PatchALot)) {
1297     // must do this before locking the destination register as an oop register
1298     patching_info = state_for(x, x->state_before());
1299   }
1300   obj.load_item();
1301   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1302   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1303     tmp3 = new_register(objectType);
1304   }
1305   __ instanceof(reg, obj.result(), x->klass(),
1306                 new_register(objectType), new_register(objectType), tmp3,
1307                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1345     } else {
1346       yin->load_item();
1347     }
1348   } else {
1349     yin->load_item();
1350   }
1351 
1352   set_no_result(x);
1353 
1354   LIR_Opr left = xin->result();
1355   LIR_Opr right = yin->result();
1356 
1357   // add safepoint before generating condition code so it can be recomputed
1358   if (x->is_safepoint()) {
1359     // increment backedge counter if needed
1360     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1361         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1362     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1363   }
1364 
1365   __ cmp(lir_cond(cond), left, right);





1366   // Generate branch profiling. Profiling code doesn't kill flags.
1367   profile_branch(x, cond);
1368   move_to_phi(x->state());
1369   if (x->x()->type()->is_float_kind()) {
1370     __ branch(lir_cond(cond), x->tsux(), x->usux());
1371   } else {
1372     __ branch(lir_cond(cond), x->tsux());
1373   }
1374   assert(x->default_sux() == x->fsux(), "wrong destination above");
1375   __ jump(x->default_sux());
1376 }
1377 
1378 LIR_Opr LIRGenerator::getThreadPointer() {
1379    return FrameMap::as_pointer_opr(rthread);
1380 }
1381 
1382 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1383 
1384 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1385                                         CodeEmitInfo* info) {

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciInlineKlass.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 
  56 void LIRItem::load_nonconstant() {

  86     case longTag:    opr = FrameMap::long0_opr;        break;
  87     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 
  99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 100   LIR_Opr reg = new_register(T_INT);
 101   set_vreg_flag(reg, LIRGenerator::byte_reg);
 102   return reg;
 103 }
 104 
 105 
 106 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 107   tmp1 = new_register(T_INT);
 108   tmp2 = LIR_OprFact::illegalOpr;
 109 }
 110 
 111 
 112 //--------- loading items into registers --------------------------------
 113 
 114 
 115 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 116   if (v->type()->as_IntConstant() != NULL) {
 117     return v->type()->as_IntConstant()->value() == 0L;
 118   } else if (v->type()->as_LongConstant() != NULL) {
 119     return v->type()->as_LongConstant()->value() == 0L;
 120   } else if (v->type()->as_ObjectConstant() != NULL) {
 121     return v->type()->as_ObjectConstant()->value()->is_null_object();
 122   } else {
 123     return false;
 124   }
 125 }
 126 
 127 bool LIRGenerator::can_inline_as_constant(Value v) const {
 128   // FIXME: Just a guess
 129   if (v->type()->as_IntConstant() != NULL) {
 130     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 131   } else if (v->type()->as_LongConstant() != NULL) {

 321 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 322     LIR_Opr tmp1 = new_register(objectType);
 323     LIR_Opr tmp2 = new_register(objectType);
 324     LIR_Opr tmp3 = new_register(objectType);
 325     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 326 }
 327 
 328 //----------------------------------------------------------------------
 329 //             visitor functions
 330 //----------------------------------------------------------------------
 331 
 332 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 333   assert(x->is_pinned(),"");
 334   LIRItem obj(x->obj(), this);
 335   obj.load_item();
 336 
 337   set_no_result(x);
 338 
 339   // "lock" stores the address of the monitor stack slot, so this is not an oop
 340   LIR_Opr lock = new_register(T_INT);
 341   // Need a scratch register for inline type
 342   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 343   if (EnableValhalla && x->maybe_inlinetype()) {
 344     scratch = new_register(T_INT);
 345   }
 346 
 347   CodeEmitInfo* info_for_exception = NULL;
 348   if (x->needs_null_check()) {
 349     info_for_exception = state_for(x);
 350   }
 351 
 352   CodeStub* throw_imse_stub =
 353       x->maybe_inlinetype() ?
 354       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
 355       NULL;
 356 
 357   // this CodeEmitInfo must not have the xhandlers because here the
 358   // object is already locked (xhandlers expect object to be unlocked)
 359   CodeEmitInfo* info = state_for(x, x->state(), true);
 360   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 361                         x->monitor_no(), info_for_exception, info, throw_imse_stub);
 362 }
 363 
 364 
 365 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 366   assert(x->is_pinned(),"");
 367 
 368   LIRItem obj(x->obj(), this);
 369   obj.dont_load_item();
 370 
 371   LIR_Opr lock = new_register(T_INT);
 372   LIR_Opr obj_temp = new_register(T_INT);
 373   set_no_result(x);
 374   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 375 }
 376 
 377 
 378 void LIRGenerator::do_NegateOp(NegateOp* x) {
 379 
 380   LIRItem from(x->x(), this);
 381   from.load_item();

1128 
1129   // arguments of lir_convert
1130   LIR_Opr conv_input = input;
1131   LIR_Opr conv_result = result;
1132 
1133   __ convert(x->op(), conv_input, conv_result);
1134 
1135   assert(result->is_virtual(), "result must be virtual register");
1136   set_result(x, result);
1137 }
1138 
1139 void LIRGenerator::do_NewInstance(NewInstance* x) {
1140 #ifndef PRODUCT
1141   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1142     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1143   }
1144 #endif
1145   CodeEmitInfo* info = state_for(x, x->state());
1146   LIR_Opr reg = result_register_for(x->type());
1147   new_instance(reg, x->klass(), x->is_unresolved(),
1148                /* allow_inline */ false,
1149                FrameMap::r10_oop_opr,
1150                FrameMap::r11_oop_opr,
1151                FrameMap::r4_oop_opr,
1152                LIR_OprFact::illegalOpr,
1153                FrameMap::r3_metadata_opr, info);
1154   LIR_Opr result = rlock_result(x);
1155   __ move(reg, result);
1156 }
1157 
1158 void LIRGenerator::do_NewInlineTypeInstance(NewInlineTypeInstance* x) {
1159   // Mapping to do_NewInstance (same code) but use state_before for reexecution.
1160   CodeEmitInfo* info = state_for(x, x->state_before());
1161   x->set_to_object_type();
1162   LIR_Opr reg = result_register_for(x->type());
1163   new_instance(reg, x->klass(), false,
1164                /* allow_inline */ true,
1165                FrameMap::r10_oop_opr,
1166                FrameMap::r11_oop_opr,
1167                FrameMap::r4_oop_opr,
1168                LIR_OprFact::illegalOpr,
1169                FrameMap::r3_metadata_opr, info);
1170   LIR_Opr result = rlock_result(x);
1171   __ move(reg, result);
1172 
1173 }
1174 
1175 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1176   CodeEmitInfo* info = state_for(x, x->state());
1177 
1178   LIRItem length(x->length(), this);
1179   length.load_item_force(FrameMap::r19_opr);
1180 
1181   LIR_Opr reg = result_register_for(x->type());
1182   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1183   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1184   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1185   LIR_Opr tmp4 = reg;
1186   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1187   LIR_Opr len = length.result();
1188   BasicType elem_type = x->elt_type();
1189 
1190   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1191 
1192   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);

1200   LIRItem length(x->length(), this);
1201   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1202   // and therefore provide the state before the parameters have been consumed
1203   CodeEmitInfo* patching_info = NULL;
1204   if (!x->klass()->is_loaded() || PatchALot) {
1205     patching_info =  state_for(x, x->state_before());
1206   }
1207 
1208   CodeEmitInfo* info = state_for(x, x->state());
1209 
1210   LIR_Opr reg = result_register_for(x->type());
1211   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1212   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1213   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1214   LIR_Opr tmp4 = reg;
1215   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1216 
1217   length.load_item_force(FrameMap::r19_opr);
1218   LIR_Opr len = length.result();
1219 
1220   ciKlass* obj = (ciKlass*) x->exact_type();
1221   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1222   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1223     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1224   }
1225 
1226   klass2reg_with_patching(klass_reg, obj, patching_info);
1227   if (x->is_null_free()) {
1228     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_INLINE_TYPE, klass_reg, slow_path);
1229   } else {
1230     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1231   }
1232 
1233   LIR_Opr result = rlock_result(x);
1234   __ move(reg, result);
1235 }
1236 
1237 
1238 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1239   Values* dims = x->dims();
1240   int i = dims->length();
1241   LIRItemList* items = new LIRItemList(i, i, NULL);
1242   while (i-- > 0) {
1243     LIRItem* size = new LIRItem(dims->at(i), this);
1244     items->at_put(i, size);
1245   }
1246 
1247   // Evaluate state_for early since it may emit code.
1248   CodeEmitInfo* patching_info = NULL;
1249   if (!x->klass()->is_loaded() || PatchALot) {
1250     patching_info = state_for(x, x->state_before());
1251 

1287 
1288 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1289   // nothing to do for now
1290 }
1291 
1292 void LIRGenerator::do_CheckCast(CheckCast* x) {
1293   LIRItem obj(x->obj(), this);
1294 
1295   CodeEmitInfo* patching_info = NULL;
1296   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1297     // must do this before locking the destination register as an oop register,
1298     // and before the obj is loaded (the latter is for deoptimization)
1299     patching_info = state_for(x, x->state_before());
1300   }
1301   obj.load_item();
1302 
1303   // info for exceptions
1304   CodeEmitInfo* info_for_exception =
1305       (x->needs_exception_state() ? state_for(x) :
1306                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1307   if (x->is_null_free()) {
1308     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1309   }
1310 
1311   CodeStub* stub;
1312   if (x->is_incompatible_class_change_check()) {
1313     assert(patching_info == NULL, "can't patch this");
1314     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1315   } else if (x->is_invokespecial_receiver_check()) {
1316     assert(patching_info == NULL, "can't patch this");
1317     stub = new DeoptimizeStub(info_for_exception,
1318                               Deoptimization::Reason_class_check,
1319                               Deoptimization::Action_none);
1320   } else {
1321     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1322   }
1323   LIR_Opr reg = rlock_result(x);
1324   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1325   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1326     tmp3 = new_register(objectType);
1327   }
1328 
1329 
1330   __ checkcast(reg, obj.result(), x->klass(),
1331                new_register(objectType), new_register(objectType), tmp3,
1332                x->direct_compare(), info_for_exception, patching_info, stub,
1333                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1334 
1335 }
1336 
1337 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1338   LIRItem obj(x->obj(), this);
1339 
1340   // result and test object may not be in same register
1341   LIR_Opr reg = rlock_result(x);
1342   CodeEmitInfo* patching_info = NULL;
1343   if ((!x->klass()->is_loaded() || PatchALot)) {
1344     // must do this before locking the destination register as an oop register
1345     patching_info = state_for(x, x->state_before());
1346   }
1347   obj.load_item();
1348   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1349   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1350     tmp3 = new_register(objectType);
1351   }
1352   __ instanceof(reg, obj.result(), x->klass(),
1353                 new_register(objectType), new_register(objectType), tmp3,
1354                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1392     } else {
1393       yin->load_item();
1394     }
1395   } else {
1396     yin->load_item();
1397   }
1398 
1399   set_no_result(x);
1400 
1401   LIR_Opr left = xin->result();
1402   LIR_Opr right = yin->result();
1403 
1404   // add safepoint before generating condition code so it can be recomputed
1405   if (x->is_safepoint()) {
1406     // increment backedge counter if needed
1407     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1408         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1409     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1410   }
1411 
1412   if (x->substitutability_check()) {
1413     substitutability_check(x, *xin, *yin);
1414   } else {
1415     __ cmp(lir_cond(cond), left, right);
1416   }
1417 
1418   // Generate branch profiling. Profiling code doesn't kill flags.
1419   profile_branch(x, cond);
1420   move_to_phi(x->state());
1421   if (x->x()->type()->is_float_kind()) {
1422     __ branch(lir_cond(cond), x->tsux(), x->usux());
1423   } else {
1424     __ branch(lir_cond(cond), x->tsux());
1425   }
1426   assert(x->default_sux() == x->fsux(), "wrong destination above");
1427   __ jump(x->default_sux());
1428 }
1429 
1430 LIR_Opr LIRGenerator::getThreadPointer() {
1431    return FrameMap::as_pointer_opr(rthread);
1432 }
1433 
1434 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1435 
1436 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1437                                         CodeEmitInfo* info) {
< prev index next >