< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"

  35 #include "ci/ciObjArrayKlass.hpp"
  36 #include "ci/ciTypeArrayKlass.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 

  85     case longTag:    opr = FrameMap::long0_opr;        break;
  86     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  87     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  88 
  89     case addressTag:
  90     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  91   }
  92 
  93   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  94   return opr;
  95 }
  96 
  97 
  98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
  99   LIR_Opr reg = new_register(T_INT);
 100   set_vreg_flag(reg, LIRGenerator::byte_reg);
 101   return reg;
 102 }
 103 
 104 






 105 //--------- loading items into registers --------------------------------
 106 
 107 
 108 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 109   if (v->type()->as_IntConstant() != nullptr) {
 110     return v->type()->as_IntConstant()->value() == 0L;
 111   } else if (v->type()->as_LongConstant() != nullptr) {
 112     return v->type()->as_LongConstant()->value() == 0L;
 113   } else if (v->type()->as_ObjectConstant() != nullptr) {
 114     return v->type()->as_ObjectConstant()->value()->is_null_object();
 115   } else {
 116     return false;
 117   }
 118 }
 119 
 120 bool LIRGenerator::can_inline_as_constant(Value v) const {
 121   // FIXME: Just a guess
 122   if (v->type()->as_IntConstant() != nullptr) {
 123     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 124   } else if (v->type()->as_LongConstant() != nullptr) {

 306 
 307 //----------------------------------------------------------------------
 308 //             visitor functions
 309 //----------------------------------------------------------------------
 310 
 311 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 312   assert(x->is_pinned(),"");
 313   LIRItem obj(x->obj(), this);
 314   obj.load_item();
 315 
 316   set_no_result(x);
 317 
 318   // "lock" stores the address of the monitor stack slot, so this is not an oop
 319   LIR_Opr lock = new_register(T_INT);
 320   LIR_Opr scratch = new_register(T_INT);
 321 
 322   CodeEmitInfo* info_for_exception = nullptr;
 323   if (x->needs_null_check()) {
 324     info_for_exception = state_for(x);
 325   }






 326   // this CodeEmitInfo must not have the xhandlers because here the
 327   // object is already locked (xhandlers expect object to be unlocked)
 328   CodeEmitInfo* info = state_for(x, x->state(), true);
 329   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 330                         x->monitor_no(), info_for_exception, info);
 331 }
 332 
 333 
 334 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 335   assert(x->is_pinned(),"");
 336 
 337   LIRItem obj(x->obj(), this);
 338   obj.dont_load_item();
 339 
 340   LIR_Opr lock = new_register(T_INT);
 341   LIR_Opr obj_temp = new_register(T_INT);
 342   LIR_Opr scratch = new_register(T_INT);
 343   set_no_result(x);
 344   monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
 345 }
 346 
 347 void LIRGenerator::do_NegateOp(NegateOp* x) {
 348 
 349   LIRItem from(x->x(), this);
 350   from.load_item();

1110   value.load_item();
1111   LIR_Opr input = value.result();
1112   LIR_Opr result = rlock(x);
1113 
1114   // arguments of lir_convert
1115   LIR_Opr conv_input = input;
1116   LIR_Opr conv_result = result;
1117 
1118   __ convert(x->op(), conv_input, conv_result);
1119 
1120   assert(result->is_virtual(), "result must be virtual register");
1121   set_result(x, result);
1122 }
1123 
1124 void LIRGenerator::do_NewInstance(NewInstance* x) {
1125 #ifndef PRODUCT
1126   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1127     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1128   }
1129 #endif
1130   CodeEmitInfo* info = state_for(x, x->state());
1131   LIR_Opr reg = result_register_for(x->type());
1132   new_instance(reg, x->klass(), x->is_unresolved(),
1133                        FrameMap::r10_oop_opr,
1134                        FrameMap::r11_oop_opr,
1135                        FrameMap::r4_oop_opr,
1136                        LIR_OprFact::illegalOpr,
1137                        FrameMap::r3_metadata_opr, info);

1138   LIR_Opr result = rlock_result(x);
1139   __ move(reg, result);
1140 }
1141 
1142 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1143   CodeEmitInfo* info = nullptr;
1144   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1145     info = state_for(x, x->state_before());
1146     info->set_force_reexecute();
1147   } else {
1148     info = state_for(x, x->state());
1149   }
1150 
1151   LIRItem length(x->length(), this);
1152   length.load_item_force(FrameMap::r19_opr);
1153 
1154   LIR_Opr reg = result_register_for(x->type());
1155   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1156   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1157   LIR_Opr tmp3 = FrameMap::r5_oop_opr;

1173   LIRItem length(x->length(), this);
1174   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1175   // and therefore provide the state before the parameters have been consumed
1176   CodeEmitInfo* patching_info = nullptr;
1177   if (!x->klass()->is_loaded() || PatchALot) {
1178     patching_info =  state_for(x, x->state_before());
1179   }
1180 
1181   CodeEmitInfo* info = state_for(x, x->state());
1182 
1183   LIR_Opr reg = result_register_for(x->type());
1184   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1185   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1186   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1187   LIR_Opr tmp4 = reg;
1188   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1189 
1190   length.load_item_force(FrameMap::r19_opr);
1191   LIR_Opr len = length.result();
1192 
1193   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1194   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1195   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1196     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1197   }

1198   klass2reg_with_patching(klass_reg, obj, patching_info);
1199   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1200 
1201   LIR_Opr result = rlock_result(x);
1202   __ move(reg, result);
1203 }
1204 
1205 
1206 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1207   Values* dims = x->dims();
1208   int i = dims->length();
1209   LIRItemList* items = new LIRItemList(i, i, nullptr);
1210   while (i-- > 0) {
1211     LIRItem* size = new LIRItem(dims->at(i), this);
1212     items->at_put(i, size);
1213   }
1214 
1215   // Evaluate state_for early since it may emit code.
1216   CodeEmitInfo* patching_info = nullptr;
1217   if (!x->klass()->is_loaded() || PatchALot) {
1218     patching_info = state_for(x, x->state_before());
1219 

1255 
1256 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1257   // nothing to do for now
1258 }
1259 
1260 void LIRGenerator::do_CheckCast(CheckCast* x) {
1261   LIRItem obj(x->obj(), this);
1262 
1263   CodeEmitInfo* patching_info = nullptr;
1264   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1265     // must do this before locking the destination register as an oop register,
1266     // and before the obj is loaded (the latter is for deoptimization)
1267     patching_info = state_for(x, x->state_before());
1268   }
1269   obj.load_item();
1270 
1271   // info for exceptions
1272   CodeEmitInfo* info_for_exception =
1273       (x->needs_exception_state() ? state_for(x) :
1274                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));



1275 
1276   CodeStub* stub;
1277   if (x->is_incompatible_class_change_check()) {
1278     assert(patching_info == nullptr, "can't patch this");
1279     stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1280   } else if (x->is_invokespecial_receiver_check()) {
1281     assert(patching_info == nullptr, "can't patch this");
1282     stub = new DeoptimizeStub(info_for_exception,
1283                               Deoptimization::Reason_class_check,
1284                               Deoptimization::Action_none);
1285   } else {
1286     stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1287   }
1288   LIR_Opr reg = rlock_result(x);
1289   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1290   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1291     tmp3 = new_register(objectType);
1292   }


1293   __ checkcast(reg, obj.result(), x->klass(),
1294                new_register(objectType), new_register(objectType), tmp3,
1295                x->direct_compare(), info_for_exception, patching_info, stub,
1296                x->profiled_method(), x->profiled_bci());

1297 }
1298 
1299 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1300   LIRItem obj(x->obj(), this);
1301 
1302   // result and test object may not be in same register
1303   LIR_Opr reg = rlock_result(x);
1304   CodeEmitInfo* patching_info = nullptr;
1305   if ((!x->klass()->is_loaded() || PatchALot)) {
1306     // must do this before locking the destination register as an oop register
1307     patching_info = state_for(x, x->state_before());
1308   }
1309   obj.load_item();
1310   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1311   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1312     tmp3 = new_register(objectType);
1313   }
1314   __ instanceof(reg, obj.result(), x->klass(),
1315                 new_register(objectType), new_register(objectType), tmp3,
1316                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1359     } else {
1360       yin->load_item();
1361     }
1362   } else {
1363     yin->load_item();
1364   }
1365 
1366   set_no_result(x);
1367 
1368   LIR_Opr left = xin->result();
1369   LIR_Opr right = yin->result();
1370 
1371   // add safepoint before generating condition code so it can be recomputed
1372   if (x->is_safepoint()) {
1373     // increment backedge counter if needed
1374     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1375         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1376     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1377   }
1378 
1379   __ cmp(lir_cond(cond), left, right);





1380   // Generate branch profiling. Profiling code doesn't kill flags.
1381   profile_branch(x, cond);
1382   move_to_phi(x->state());
1383   if (x->x()->type()->is_float_kind()) {
1384     __ branch(lir_cond(cond), x->tsux(), x->usux());
1385   } else {
1386     __ branch(lir_cond(cond), x->tsux());
1387   }
1388   assert(x->default_sux() == x->fsux(), "wrong destination above");
1389   __ jump(x->default_sux());
1390 }
1391 
1392 LIR_Opr LIRGenerator::getThreadPointer() {
1393    return FrameMap::as_pointer_opr(rthread);
1394 }
1395 
1396 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1397 
1398 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1399                                         CodeEmitInfo* info) {

  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArray.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 

  86     case longTag:    opr = FrameMap::long0_opr;        break;
  87     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 
  99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 100   LIR_Opr reg = new_register(T_INT);
 101   set_vreg_flag(reg, LIRGenerator::byte_reg);
 102   return reg;
 103 }
 104 
 105 
 106 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 107   tmp1 = new_register(T_INT);
 108   tmp2 = LIR_OprFact::illegalOpr;
 109 }
 110 
 111 
 112 //--------- loading items into registers --------------------------------
 113 
 114 
 115 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 116   if (v->type()->as_IntConstant() != nullptr) {
 117     return v->type()->as_IntConstant()->value() == 0L;
 118   } else if (v->type()->as_LongConstant() != nullptr) {
 119     return v->type()->as_LongConstant()->value() == 0L;
 120   } else if (v->type()->as_ObjectConstant() != nullptr) {
 121     return v->type()->as_ObjectConstant()->value()->is_null_object();
 122   } else {
 123     return false;
 124   }
 125 }
 126 
 127 bool LIRGenerator::can_inline_as_constant(Value v) const {
 128   // FIXME: Just a guess
 129   if (v->type()->as_IntConstant() != nullptr) {
 130     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 131   } else if (v->type()->as_LongConstant() != nullptr) {

 313 
 314 //----------------------------------------------------------------------
 315 //             visitor functions
 316 //----------------------------------------------------------------------
 317 
 318 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 319   assert(x->is_pinned(),"");
 320   LIRItem obj(x->obj(), this);
 321   obj.load_item();
 322 
 323   set_no_result(x);
 324 
 325   // "lock" stores the address of the monitor stack slot, so this is not an oop
 326   LIR_Opr lock = new_register(T_INT);
 327   LIR_Opr scratch = new_register(T_INT);
 328 
 329   CodeEmitInfo* info_for_exception = nullptr;
 330   if (x->needs_null_check()) {
 331     info_for_exception = state_for(x);
 332   }
 333 
 334   CodeStub* throw_ie_stub =
 335       x->maybe_inlinetype() ?
 336       new SimpleExceptionStub(C1StubId::throw_identity_exception_id, obj.result(), state_for(x)) :
 337       nullptr;
 338 
 339   // this CodeEmitInfo must not have the xhandlers because here the
 340   // object is already locked (xhandlers expect object to be unlocked)
 341   CodeEmitInfo* info = state_for(x, x->state(), true);
 342   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 343                 x->monitor_no(), info_for_exception, info, throw_ie_stub);
 344 }
 345 
 346 
 347 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 348   assert(x->is_pinned(),"");
 349 
 350   LIRItem obj(x->obj(), this);
 351   obj.dont_load_item();
 352 
 353   LIR_Opr lock = new_register(T_INT);
 354   LIR_Opr obj_temp = new_register(T_INT);
 355   LIR_Opr scratch = new_register(T_INT);
 356   set_no_result(x);
 357   monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
 358 }
 359 
 360 void LIRGenerator::do_NegateOp(NegateOp* x) {
 361 
 362   LIRItem from(x->x(), this);
 363   from.load_item();

1123   value.load_item();
1124   LIR_Opr input = value.result();
1125   LIR_Opr result = rlock(x);
1126 
1127   // arguments of lir_convert
1128   LIR_Opr conv_input = input;
1129   LIR_Opr conv_result = result;
1130 
1131   __ convert(x->op(), conv_input, conv_result);
1132 
1133   assert(result->is_virtual(), "result must be virtual register");
1134   set_result(x, result);
1135 }
1136 
1137 void LIRGenerator::do_NewInstance(NewInstance* x) {
1138 #ifndef PRODUCT
1139   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1140     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1141   }
1142 #endif
1143   CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1144   LIR_Opr reg = result_register_for(x->type());
1145   new_instance(reg, x->klass(), x->is_unresolved(),
1146                /* allow_inline */ false,
1147                FrameMap::r10_oop_opr,
1148                FrameMap::r11_oop_opr,
1149                FrameMap::r4_oop_opr,
1150                LIR_OprFact::illegalOpr,
1151                FrameMap::r3_metadata_opr, info);
1152   LIR_Opr result = rlock_result(x);
1153   __ move(reg, result);
1154 }
1155 
1156 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1157   CodeEmitInfo* info = nullptr;
1158   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1159     info = state_for(x, x->state_before());
1160     info->set_force_reexecute();
1161   } else {
1162     info = state_for(x, x->state());
1163   }
1164 
1165   LIRItem length(x->length(), this);
1166   length.load_item_force(FrameMap::r19_opr);
1167 
1168   LIR_Opr reg = result_register_for(x->type());
1169   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1170   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1171   LIR_Opr tmp3 = FrameMap::r5_oop_opr;

1187   LIRItem length(x->length(), this);
1188   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1189   // and therefore provide the state before the parameters have been consumed
1190   CodeEmitInfo* patching_info = nullptr;
1191   if (!x->klass()->is_loaded() || PatchALot) {
1192     patching_info =  state_for(x, x->state_before());
1193   }
1194 
1195   CodeEmitInfo* info = state_for(x, x->state());
1196 
1197   LIR_Opr reg = result_register_for(x->type());
1198   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1199   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1200   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1201   LIR_Opr tmp4 = reg;
1202   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1203 
1204   length.load_item_force(FrameMap::r19_opr);
1205   LIR_Opr len = length.result();
1206 
1207   ciKlass* obj = (ciKlass*) x->exact_type();
1208   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1209   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1210     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1211   }
1212 
1213   klass2reg_with_patching(klass_reg, obj, patching_info);
1214   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1215 
1216   LIR_Opr result = rlock_result(x);
1217   __ move(reg, result);
1218 }
1219 
1220 
1221 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1222   Values* dims = x->dims();
1223   int i = dims->length();
1224   LIRItemList* items = new LIRItemList(i, i, nullptr);
1225   while (i-- > 0) {
1226     LIRItem* size = new LIRItem(dims->at(i), this);
1227     items->at_put(i, size);
1228   }
1229 
1230   // Evaluate state_for early since it may emit code.
1231   CodeEmitInfo* patching_info = nullptr;
1232   if (!x->klass()->is_loaded() || PatchALot) {
1233     patching_info = state_for(x, x->state_before());
1234 

1270 
1271 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1272   // nothing to do for now
1273 }
1274 
1275 void LIRGenerator::do_CheckCast(CheckCast* x) {
1276   LIRItem obj(x->obj(), this);
1277 
1278   CodeEmitInfo* patching_info = nullptr;
1279   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1280     // must do this before locking the destination register as an oop register,
1281     // and before the obj is loaded (the latter is for deoptimization)
1282     patching_info = state_for(x, x->state_before());
1283   }
1284   obj.load_item();
1285 
1286   // info for exceptions
1287   CodeEmitInfo* info_for_exception =
1288       (x->needs_exception_state() ? state_for(x) :
1289                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1290   if (x->is_null_free()) {
1291     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1292   }
1293 
1294   CodeStub* stub;
1295   if (x->is_incompatible_class_change_check()) {
1296     assert(patching_info == nullptr, "can't patch this");
1297     stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1298   } else if (x->is_invokespecial_receiver_check()) {
1299     assert(patching_info == nullptr, "can't patch this");
1300     stub = new DeoptimizeStub(info_for_exception,
1301                               Deoptimization::Reason_class_check,
1302                               Deoptimization::Action_none);
1303   } else {
1304     stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1305   }
1306   LIR_Opr reg = rlock_result(x);
1307   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1308   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1309     tmp3 = new_register(objectType);
1310   }
1311 
1312 
1313   __ checkcast(reg, obj.result(), x->klass(),
1314                new_register(objectType), new_register(objectType), tmp3,
1315                x->direct_compare(), info_for_exception, patching_info, stub,
1316                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1317 
1318 }
1319 
1320 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1321   LIRItem obj(x->obj(), this);
1322 
1323   // result and test object may not be in same register
1324   LIR_Opr reg = rlock_result(x);
1325   CodeEmitInfo* patching_info = nullptr;
1326   if ((!x->klass()->is_loaded() || PatchALot)) {
1327     // must do this before locking the destination register as an oop register
1328     patching_info = state_for(x, x->state_before());
1329   }
1330   obj.load_item();
1331   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1332   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1333     tmp3 = new_register(objectType);
1334   }
1335   __ instanceof(reg, obj.result(), x->klass(),
1336                 new_register(objectType), new_register(objectType), tmp3,
1337                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1380     } else {
1381       yin->load_item();
1382     }
1383   } else {
1384     yin->load_item();
1385   }
1386 
1387   set_no_result(x);
1388 
1389   LIR_Opr left = xin->result();
1390   LIR_Opr right = yin->result();
1391 
1392   // add safepoint before generating condition code so it can be recomputed
1393   if (x->is_safepoint()) {
1394     // increment backedge counter if needed
1395     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1396         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1397     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1398   }
1399 
1400   if (x->substitutability_check()) {
1401     substitutability_check(x, *xin, *yin);
1402   } else {
1403     __ cmp(lir_cond(cond), left, right);
1404   }
1405 
1406   // Generate branch profiling. Profiling code doesn't kill flags.
1407   profile_branch(x, cond);
1408   move_to_phi(x->state());
1409   if (x->x()->type()->is_float_kind()) {
1410     __ branch(lir_cond(cond), x->tsux(), x->usux());
1411   } else {
1412     __ branch(lir_cond(cond), x->tsux());
1413   }
1414   assert(x->default_sux() == x->fsux(), "wrong destination above");
1415   __ jump(x->default_sux());
1416 }
1417 
1418 LIR_Opr LIRGenerator::getThreadPointer() {
1419    return FrameMap::as_pointer_opr(rthread);
1420 }
1421 
1422 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1423 
1424 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1425                                         CodeEmitInfo* info) {
< prev index next >