< prev index next >

src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp

Print this page

 296   // this CodeEmitInfo must not have the xhandlers because here the
 297   // object is already locked (xhandlers expect object to be unlocked)
 298   CodeEmitInfo* info = state_for(x, x->state(), true);
 299   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 300                         x->monitor_no(), info_for_exception, info);
 301 }
 302 
 303 
 304 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 305   assert(x->is_pinned(),"");
 306 
 307   LIRItem obj(x->obj(), this);
 308   obj.dont_load_item();
 309 
 310   LIR_Opr lock = new_register(T_INT);
 311   LIR_Opr obj_temp = new_register(T_INT);
 312   set_no_result(x);
 313   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 314 }
 315 












 316 
 317 // _ineg, _lneg, _fneg, _dneg
 318 void LIRGenerator::do_NegateOp(NegateOp* x) {
 319   LIRItem value(x->x(), this);
 320   value.set_destroys_register();
 321   value.load_item();
 322   LIR_Opr reg = rlock(x);
 323 
 324   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 325 #ifdef _LP64
 326   if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
 327     if (x->type()->tag() == doubleTag) {
 328       tmp = new_register(T_DOUBLE);
 329       __ move(LIR_OprFact::doubleConst(-0.0), tmp);
 330     }
 331     else if (x->type()->tag() == floatTag) {
 332       tmp = new_register(T_FLOAT);
 333       __ move(LIR_OprFact::floatConst(-0.0), tmp);
 334     }
 335   }
 336 #endif
 337   __ negate(value.result(), reg, tmp);
 338 
 339   set_result(x, round_item(reg));
 340 }
 341 
 342 
 343 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 344 //      _dadd, _dmul, _dsub, _ddiv, _drem
 345 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 346   LIRItem left(x->x(),  this);
 347   LIRItem right(x->y(), this);
 348   LIRItem* left_arg  = &left;
 349   LIRItem* right_arg = &right;
 350   assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands");
 351   bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem);
 352   if (left.is_register() || x->x()->type()->is_constant() || must_load_both) {
 353     left.load_item();
 354   } else {
 355     left.dont_load_item();
 356   }
 357 
 358 #ifndef _LP64
 359   // do not load right operand if it is a constant.  only 0 and 1 are
 360   // loaded because there are special instructions for loading them
 361   // without memory access (not needed for SSE2 instructions)
 362   bool must_load_right = false;

 296   // this CodeEmitInfo must not have the xhandlers because here the
 297   // object is already locked (xhandlers expect object to be unlocked)
 298   CodeEmitInfo* info = state_for(x, x->state(), true);
 299   monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
 300                         x->monitor_no(), info_for_exception, info);
 301 }
 302 
 303 
 304 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 305   assert(x->is_pinned(),"");
 306 
 307   LIRItem obj(x->obj(), this);
 308   obj.dont_load_item();
 309 
 310   LIR_Opr lock = new_register(T_INT);
 311   LIR_Opr obj_temp = new_register(T_INT);
 312   set_no_result(x);
 313   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 314 }
 315 
 316 void LIRGenerator::do_continuation_doYield(Intrinsic* x) {
 317   BasicTypeList signature(0);
 318   // signature.append(T_INT);
 319   CallingConvention* cc = frame_map()->java_calling_convention(&signature, true);
 320 
 321   const LIR_Opr result_reg = result_register_for(x->type());
 322   address entry = StubRoutines::cont_doYield();
 323   LIR_Opr result = rlock_result(x);
 324   CodeEmitInfo* info = state_for(x, x->state());
 325   __ call_runtime(entry, LIR_OprFact::illegalOpr, result_reg, cc->args(), info);
 326   __ move(result_reg, result);
 327 }
 328 
 329 // _ineg, _lneg, _fneg, _dneg
 330 void LIRGenerator::do_NegateOp(NegateOp* x) {
 331   LIRItem value(x->x(), this);
 332   value.set_destroys_register();
 333   value.load_item();
 334   LIR_Opr reg = rlock(x);
 335 
 336   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 337 #ifdef _LP64
 338   if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
 339     if (x->type()->tag() == doubleTag) {
 340       tmp = new_register(T_DOUBLE);
 341       __ move(LIR_OprFact::doubleConst(-0.0), tmp);
 342     }
 343     else if (x->type()->tag() == floatTag) {
 344       tmp = new_register(T_FLOAT);
 345       __ move(LIR_OprFact::floatConst(-0.0), tmp);
 346     }
 347   }
 348 #endif
 349   __ negate(value.result(), reg, tmp);
 350 
 351   set_result(x, round_item(reg));
 352 }
 353 

 354 // for  _fadd, _fmul, _fsub, _fdiv, _frem
 355 //      _dadd, _dmul, _dsub, _ddiv, _drem
 356 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
 357   LIRItem left(x->x(),  this);
 358   LIRItem right(x->y(), this);
 359   LIRItem* left_arg  = &left;
 360   LIRItem* right_arg = &right;
 361   assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands");
 362   bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem);
 363   if (left.is_register() || x->x()->type()->is_constant() || must_load_both) {
 364     left.load_item();
 365   } else {
 366     left.dont_load_item();
 367   }
 368 
 369 #ifndef _LP64
 370   // do not load right operand if it is a constant.  only 0 and 1 are
 371   // loaded because there are special instructions for loading them
 372   // without memory access (not needed for SSE2 instructions)
 373   bool must_load_right = false;
< prev index next >