< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"

  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "compiler/compilerDefinitions.inline.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 #include "vmreg_aarch64.inline.hpp"
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 // Item will be loaded into a byte register; Intel only
  51 void LIRItem::load_byte_item() {
  52   load_item();
  53 }
  54 
  55 

  86     case longTag:    opr = FrameMap::long0_opr;        break;
  87     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  88     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  89 
  90     case addressTag:
  91     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  92   }
  93 
  94   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  95   return opr;
  96 }
  97 
  98 
  99 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 100   LIR_Opr reg = new_register(T_INT);
 101   set_vreg_flag(reg, LIRGenerator::byte_reg);
 102   return reg;
 103 }
 104 
 105 






 106 //--------- loading items into registers --------------------------------
 107 
 108 
 109 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 110   if (v->type()->as_IntConstant() != nullptr) {
 111     return v->type()->as_IntConstant()->value() == 0L;
 112   } else if (v->type()->as_LongConstant() != nullptr) {
 113     return v->type()->as_LongConstant()->value() == 0L;
 114   } else if (v->type()->as_ObjectConstant() != nullptr) {
 115     return v->type()->as_ObjectConstant()->value()->is_null_object();
 116   } else {
 117     return false;
 118   }
 119 }
 120 
 121 bool LIRGenerator::can_inline_as_constant(Value v) const {
 122   // FIXME: Just a guess
 123   if (v->type()->as_IntConstant() != nullptr) {
 124     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 125   } else if (v->type()->as_LongConstant() != nullptr) {

 305 
 306 //----------------------------------------------------------------------
 307 //             visitor functions
 308 //----------------------------------------------------------------------
 309 
 310 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 311   assert(x->is_pinned(),"");
 312   LIRItem obj(x->obj(), this);
 313   obj.load_item();
 314 
 315   set_no_result(x);
 316 
 317   // "lock" stores the address of the monitor stack slot, so this is not an oop
 318   LIR_Opr lock = new_register(T_INT);
 319   LIR_Opr scratch = new_register(T_INT);
 320 
 321   CodeEmitInfo* info_for_exception = nullptr;
 322   if (x->needs_null_check()) {
 323     info_for_exception = state_for(x);
 324   }






 325   // this CodeEmitInfo must not have the xhandlers because here the
 326   // object is already locked (xhandlers expect object to be unlocked)
 327   CodeEmitInfo* info = state_for(x, x->state(), true);
 328   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 329                         x->monitor_no(), info_for_exception, info);
 330 }
 331 
 332 
 333 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 334   assert(x->is_pinned(),"");
 335 
 336   LIRItem obj(x->obj(), this);
 337   obj.dont_load_item();
 338 
 339   LIR_Opr lock = new_register(T_INT);
 340   LIR_Opr obj_temp = new_register(T_INT);
 341   LIR_Opr scratch = new_register(T_INT);
 342   set_no_result(x);
 343   monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
 344 }
 345 
 346 void LIRGenerator::do_NegateOp(NegateOp* x) {
 347 
 348   LIRItem from(x->x(), this);
 349   from.load_item();

1111   value.load_item();
1112   LIR_Opr input = value.result();
1113   LIR_Opr result = rlock(x);
1114 
1115   // arguments of lir_convert
1116   LIR_Opr conv_input = input;
1117   LIR_Opr conv_result = result;
1118 
1119   __ convert(x->op(), conv_input, conv_result);
1120 
1121   assert(result->is_virtual(), "result must be virtual register");
1122   set_result(x, result);
1123 }
1124 
1125 void LIRGenerator::do_NewInstance(NewInstance* x) {
1126 #ifndef PRODUCT
1127   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1128     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1129   }
1130 #endif
1131   CodeEmitInfo* info = state_for(x, x->state());
1132   LIR_Opr reg = result_register_for(x->type());
1133   new_instance(reg, x->klass(), x->is_unresolved(),
1134                        FrameMap::r10_oop_opr,
1135                        FrameMap::r11_oop_opr,
1136                        FrameMap::r4_oop_opr,
1137                        LIR_OprFact::illegalOpr,
1138                        FrameMap::r3_metadata_opr, info);

1139   LIR_Opr result = rlock_result(x);
1140   __ move(reg, result);
1141 }
1142 
1143 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1144   CodeEmitInfo* info = nullptr;
1145   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1146     info = state_for(x, x->state_before());
1147     info->set_force_reexecute();
1148   } else {
1149     info = state_for(x, x->state());
1150   }
1151 
1152   LIRItem length(x->length(), this);
1153   length.load_item_force(FrameMap::r19_opr);
1154 
1155   LIR_Opr reg = result_register_for(x->type());
1156   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1157   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1158   LIR_Opr tmp3 = FrameMap::r5_oop_opr;

1174   LIRItem length(x->length(), this);
1175   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1176   // and therefore provide the state before the parameters have been consumed
1177   CodeEmitInfo* patching_info = nullptr;
1178   if (!x->klass()->is_loaded() || PatchALot) {
1179     patching_info =  state_for(x, x->state_before());
1180   }
1181 
1182   CodeEmitInfo* info = state_for(x, x->state());
1183 
1184   LIR_Opr reg = result_register_for(x->type());
1185   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1186   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1187   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1188   LIR_Opr tmp4 = reg;
1189   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1190 
1191   length.load_item_force(FrameMap::r19_opr);
1192   LIR_Opr len = length.result();
1193 
1194   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1195   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1196   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1197     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1198   }

1199   klass2reg_with_patching(klass_reg, obj, patching_info);
1200   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1201 
1202   LIR_Opr result = rlock_result(x);
1203   __ move(reg, result);
1204 }
1205 
1206 
1207 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1208   Values* dims = x->dims();
1209   int i = dims->length();
1210   LIRItemList* items = new LIRItemList(i, i, nullptr);
1211   while (i-- > 0) {
1212     LIRItem* size = new LIRItem(dims->at(i), this);
1213     items->at_put(i, size);
1214   }
1215 
1216   // Evaluate state_for early since it may emit code.
1217   CodeEmitInfo* patching_info = nullptr;
1218   if (!x->klass()->is_loaded() || PatchALot) {
1219     patching_info = state_for(x, x->state_before());
1220 

1256 
1257 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1258   // nothing to do for now
1259 }
1260 
1261 void LIRGenerator::do_CheckCast(CheckCast* x) {
1262   LIRItem obj(x->obj(), this);
1263 
1264   CodeEmitInfo* patching_info = nullptr;
1265   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1266     // must do this before locking the destination register as an oop register,
1267     // and before the obj is loaded (the latter is for deoptimization)
1268     patching_info = state_for(x, x->state_before());
1269   }
1270   obj.load_item();
1271 
1272   // info for exceptions
1273   CodeEmitInfo* info_for_exception =
1274       (x->needs_exception_state() ? state_for(x) :
1275                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));



1276 
1277   CodeStub* stub;
1278   if (x->is_incompatible_class_change_check()) {
1279     assert(patching_info == nullptr, "can't patch this");
1280     stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1281   } else if (x->is_invokespecial_receiver_check()) {
1282     assert(patching_info == nullptr, "can't patch this");
1283     stub = new DeoptimizeStub(info_for_exception,
1284                               Deoptimization::Reason_class_check,
1285                               Deoptimization::Action_none);
1286   } else {
1287     stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1288   }
1289   LIR_Opr reg = rlock_result(x);
1290   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1291   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1292     tmp3 = new_register(objectType);
1293   }


1294   __ checkcast(reg, obj.result(), x->klass(),
1295                new_register(objectType), new_register(objectType), tmp3,
1296                x->direct_compare(), info_for_exception, patching_info, stub,
1297                x->profiled_method(), x->profiled_bci());

1298 }
1299 
1300 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1301   LIRItem obj(x->obj(), this);
1302 
1303   // result and test object may not be in same register
1304   LIR_Opr reg = rlock_result(x);
1305   CodeEmitInfo* patching_info = nullptr;
1306   if ((!x->klass()->is_loaded() || PatchALot)) {
1307     // must do this before locking the destination register as an oop register
1308     patching_info = state_for(x, x->state_before());
1309   }
1310   obj.load_item();
1311   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1312   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1313     tmp3 = new_register(objectType);
1314   }
1315   __ instanceof(reg, obj.result(), x->klass(),
1316                 new_register(objectType), new_register(objectType), tmp3,
1317                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1355     } else {
1356       yin->load_item();
1357     }
1358   } else {
1359     yin->load_item();
1360   }
1361 
1362   set_no_result(x);
1363 
1364   LIR_Opr left = xin->result();
1365   LIR_Opr right = yin->result();
1366 
1367   // add safepoint before generating condition code so it can be recomputed
1368   if (x->is_safepoint()) {
1369     // increment backedge counter if needed
1370     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1371         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1372     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1373   }
1374 
1375   __ cmp(lir_cond(cond), left, right);





1376   // Generate branch profiling. Profiling code doesn't kill flags.
1377   profile_branch(x, cond);
1378   move_to_phi(x->state());
1379   if (x->x()->type()->is_float_kind()) {
1380     __ branch(lir_cond(cond), x->tsux(), x->usux());
1381   } else {
1382     __ branch(lir_cond(cond), x->tsux());
1383   }
1384   assert(x->default_sux() == x->fsux(), "wrong destination above");
1385   __ jump(x->default_sux());
1386 }
1387 
1388 LIR_Opr LIRGenerator::getThreadPointer() {
1389    return FrameMap::as_pointer_opr(rthread);
1390 }
1391 
1392 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1393 
1394 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1395                                         CodeEmitInfo* info) {

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciInlineKlass.hpp"
  37 #include "ci/ciObjArrayKlass.hpp"
  38 #include "ci/ciTypeArrayKlass.hpp"
  39 #include "compiler/compilerDefinitions.inline.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "utilities/powerOfTwo.hpp"
  43 #include "vmreg_aarch64.inline.hpp"
  44 
  45 #ifdef ASSERT
  46 #define __ gen()->lir(__FILE__, __LINE__)->
  47 #else
  48 #define __ gen()->lir()->
  49 #endif
  50 
  51 // Item will be loaded into a byte register; Intel only
  52 void LIRItem::load_byte_item() {
  53   load_item();
  54 }
  55 
  56 

  87     case longTag:    opr = FrameMap::long0_opr;        break;
  88     case floatTag:   opr = FrameMap::fpu0_float_opr;  break;
  89     case doubleTag:  opr = FrameMap::fpu0_double_opr;  break;
  90 
  91     case addressTag:
  92     default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
  93   }
  94 
  95   assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
  96   return opr;
  97 }
  98 
  99 
 100 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
 101   LIR_Opr reg = new_register(T_INT);
 102   set_vreg_flag(reg, LIRGenerator::byte_reg);
 103   return reg;
 104 }
 105 
 106 
 107 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) {
 108   tmp1 = new_register(T_INT);
 109   tmp2 = LIR_OprFact::illegalOpr;
 110 }
 111 
 112 
 113 //--------- loading items into registers --------------------------------
 114 
 115 
 116 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
 117   if (v->type()->as_IntConstant() != nullptr) {
 118     return v->type()->as_IntConstant()->value() == 0L;
 119   } else if (v->type()->as_LongConstant() != nullptr) {
 120     return v->type()->as_LongConstant()->value() == 0L;
 121   } else if (v->type()->as_ObjectConstant() != nullptr) {
 122     return v->type()->as_ObjectConstant()->value()->is_null_object();
 123   } else {
 124     return false;
 125   }
 126 }
 127 
 128 bool LIRGenerator::can_inline_as_constant(Value v) const {
 129   // FIXME: Just a guess
 130   if (v->type()->as_IntConstant() != nullptr) {
 131     return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
 132   } else if (v->type()->as_LongConstant() != nullptr) {

 312 
 313 //----------------------------------------------------------------------
 314 //             visitor functions
 315 //----------------------------------------------------------------------
 316 
 317 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 318   assert(x->is_pinned(),"");
 319   LIRItem obj(x->obj(), this);
 320   obj.load_item();
 321 
 322   set_no_result(x);
 323 
 324   // "lock" stores the address of the monitor stack slot, so this is not an oop
 325   LIR_Opr lock = new_register(T_INT);
 326   LIR_Opr scratch = new_register(T_INT);
 327 
 328   CodeEmitInfo* info_for_exception = nullptr;
 329   if (x->needs_null_check()) {
 330     info_for_exception = state_for(x);
 331   }
 332 
 333   CodeStub* throw_ie_stub =
 334       x->maybe_inlinetype() ?
 335       new SimpleExceptionStub(C1StubId::throw_identity_exception_id, obj.result(), state_for(x)) :
 336       nullptr;
 337 
 338   // this CodeEmitInfo must not have the xhandlers because here the
 339   // object is already locked (xhandlers expect object to be unlocked)
 340   CodeEmitInfo* info = state_for(x, x->state(), true);
 341   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 342                 x->monitor_no(), info_for_exception, info, throw_ie_stub);
 343 }
 344 
 345 
 346 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 347   assert(x->is_pinned(),"");
 348 
 349   LIRItem obj(x->obj(), this);
 350   obj.dont_load_item();
 351 
 352   LIR_Opr lock = new_register(T_INT);
 353   LIR_Opr obj_temp = new_register(T_INT);
 354   LIR_Opr scratch = new_register(T_INT);
 355   set_no_result(x);
 356   monitor_exit(obj_temp, lock, syncTempOpr(), scratch, x->monitor_no());
 357 }
 358 
 359 void LIRGenerator::do_NegateOp(NegateOp* x) {
 360 
 361   LIRItem from(x->x(), this);
 362   from.load_item();

1124   value.load_item();
1125   LIR_Opr input = value.result();
1126   LIR_Opr result = rlock(x);
1127 
1128   // arguments of lir_convert
1129   LIR_Opr conv_input = input;
1130   LIR_Opr conv_result = result;
1131 
1132   __ convert(x->op(), conv_input, conv_result);
1133 
1134   assert(result->is_virtual(), "result must be virtual register");
1135   set_result(x, result);
1136 }
1137 
1138 void LIRGenerator::do_NewInstance(NewInstance* x) {
1139 #ifndef PRODUCT
1140   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1141     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1142   }
1143 #endif
1144   CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state());
1145   LIR_Opr reg = result_register_for(x->type());
1146   new_instance(reg, x->klass(), x->is_unresolved(),
1147                /* allow_inline */ false,
1148                FrameMap::r10_oop_opr,
1149                FrameMap::r11_oop_opr,
1150                FrameMap::r4_oop_opr,
1151                LIR_OprFact::illegalOpr,
1152                FrameMap::r3_metadata_opr, info);
1153   LIR_Opr result = rlock_result(x);
1154   __ move(reg, result);
1155 }
1156 
1157 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1158   CodeEmitInfo* info = nullptr;
1159   if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
1160     info = state_for(x, x->state_before());
1161     info->set_force_reexecute();
1162   } else {
1163     info = state_for(x, x->state());
1164   }
1165 
1166   LIRItem length(x->length(), this);
1167   length.load_item_force(FrameMap::r19_opr);
1168 
1169   LIR_Opr reg = result_register_for(x->type());
1170   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1171   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1172   LIR_Opr tmp3 = FrameMap::r5_oop_opr;

1188   LIRItem length(x->length(), this);
1189   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1190   // and therefore provide the state before the parameters have been consumed
1191   CodeEmitInfo* patching_info = nullptr;
1192   if (!x->klass()->is_loaded() || PatchALot) {
1193     patching_info =  state_for(x, x->state_before());
1194   }
1195 
1196   CodeEmitInfo* info = state_for(x, x->state());
1197 
1198   LIR_Opr reg = result_register_for(x->type());
1199   LIR_Opr tmp1 = FrameMap::r10_oop_opr;
1200   LIR_Opr tmp2 = FrameMap::r11_oop_opr;
1201   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1202   LIR_Opr tmp4 = reg;
1203   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1204 
1205   length.load_item_force(FrameMap::r19_opr);
1206   LIR_Opr len = length.result();
1207 
1208   ciKlass* obj = (ciKlass*) x->exact_type();
1209   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free());
1210   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1211     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1212   }
1213 
1214   klass2reg_with_patching(klass_reg, obj, patching_info);
1215   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free());
1216 
1217   LIR_Opr result = rlock_result(x);
1218   __ move(reg, result);
1219 }
1220 
1221 
1222 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1223   Values* dims = x->dims();
1224   int i = dims->length();
1225   LIRItemList* items = new LIRItemList(i, i, nullptr);
1226   while (i-- > 0) {
1227     LIRItem* size = new LIRItem(dims->at(i), this);
1228     items->at_put(i, size);
1229   }
1230 
1231   // Evaluate state_for early since it may emit code.
1232   CodeEmitInfo* patching_info = nullptr;
1233   if (!x->klass()->is_loaded() || PatchALot) {
1234     patching_info = state_for(x, x->state_before());
1235 

1271 
1272 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1273   // nothing to do for now
1274 }
1275 
1276 void LIRGenerator::do_CheckCast(CheckCast* x) {
1277   LIRItem obj(x->obj(), this);
1278 
1279   CodeEmitInfo* patching_info = nullptr;
1280   if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1281     // must do this before locking the destination register as an oop register,
1282     // and before the obj is loaded (the latter is for deoptimization)
1283     patching_info = state_for(x, x->state_before());
1284   }
1285   obj.load_item();
1286 
1287   // info for exceptions
1288   CodeEmitInfo* info_for_exception =
1289       (x->needs_exception_state() ? state_for(x) :
1290                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1291   if (x->is_null_free()) {
1292     __ null_check(obj.result(), new CodeEmitInfo(info_for_exception));
1293   }
1294 
1295   CodeStub* stub;
1296   if (x->is_incompatible_class_change_check()) {
1297     assert(patching_info == nullptr, "can't patch this");
1298     stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1299   } else if (x->is_invokespecial_receiver_check()) {
1300     assert(patching_info == nullptr, "can't patch this");
1301     stub = new DeoptimizeStub(info_for_exception,
1302                               Deoptimization::Reason_class_check,
1303                               Deoptimization::Action_none);
1304   } else {
1305     stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
1306   }
1307   LIR_Opr reg = rlock_result(x);
1308   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1309   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1310     tmp3 = new_register(objectType);
1311   }
1312 
1313 
1314   __ checkcast(reg, obj.result(), x->klass(),
1315                new_register(objectType), new_register(objectType), tmp3,
1316                x->direct_compare(), info_for_exception, patching_info, stub,
1317                x->profiled_method(), x->profiled_bci(), x->is_null_free());
1318 
1319 }
1320 
1321 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1322   LIRItem obj(x->obj(), this);
1323 
1324   // result and test object may not be in same register
1325   LIR_Opr reg = rlock_result(x);
1326   CodeEmitInfo* patching_info = nullptr;
1327   if ((!x->klass()->is_loaded() || PatchALot)) {
1328     // must do this before locking the destination register as an oop register
1329     patching_info = state_for(x, x->state_before());
1330   }
1331   obj.load_item();
1332   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1333   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1334     tmp3 = new_register(objectType);
1335   }
1336   __ instanceof(reg, obj.result(), x->klass(),
1337                 new_register(objectType), new_register(objectType), tmp3,
1338                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());

1376     } else {
1377       yin->load_item();
1378     }
1379   } else {
1380     yin->load_item();
1381   }
1382 
1383   set_no_result(x);
1384 
1385   LIR_Opr left = xin->result();
1386   LIR_Opr right = yin->result();
1387 
1388   // add safepoint before generating condition code so it can be recomputed
1389   if (x->is_safepoint()) {
1390     // increment backedge counter if needed
1391     increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1392         x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1393     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1394   }
1395 
1396   if (x->substitutability_check()) {
1397     substitutability_check(x, *xin, *yin);
1398   } else {
1399     __ cmp(lir_cond(cond), left, right);
1400   }
1401 
1402   // Generate branch profiling. Profiling code doesn't kill flags.
1403   profile_branch(x, cond);
1404   move_to_phi(x->state());
1405   if (x->x()->type()->is_float_kind()) {
1406     __ branch(lir_cond(cond), x->tsux(), x->usux());
1407   } else {
1408     __ branch(lir_cond(cond), x->tsux());
1409   }
1410   assert(x->default_sux() == x->fsux(), "wrong destination above");
1411   __ jump(x->default_sux());
1412 }
1413 
1414 LIR_Opr LIRGenerator::getThreadPointer() {
1415    return FrameMap::as_pointer_opr(rthread);
1416 }
1417 
1418 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1419 
1420 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1421                                         CodeEmitInfo* info) {
< prev index next >