1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_Compilation.hpp" 26 #include "c1/c1_FrameMap.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_LIRGenerator.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArray.hpp" 33 #include "ci/ciObjArrayKlass.hpp" 34 #include "ci/ciTypeArrayKlass.hpp" 35 #include "gc/shared/c1/barrierSetC1.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/powerOfTwo.hpp" 39 #include "vmreg_x86.inline.hpp" 40 41 #ifdef ASSERT 42 #define __ gen()->lir(__FILE__, __LINE__)-> 43 #else 44 #define __ gen()->lir()-> 45 #endif 46 47 // Item will be loaded into a byte register; Intel only 48 void LIRItem::load_byte_item() { 49 load_item(); 50 LIR_Opr res = result(); 51 52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 53 // make sure that it is a byte register 54 assert(!value()->type()->is_float() && !value()->type()->is_double(), 55 "can't load floats in byte register"); 56 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 57 __ move(res, reg); 58 59 _result = reg; 60 } 61 } 62 63 64 void LIRItem::load_nonconstant() { 65 LIR_Opr r = value()->operand(); 66 if (r->is_constant()) { 67 _result = r; 68 } else { 69 load_item(); 70 } 71 } 72 73 //-------------------------------------------------------------- 74 // LIRGenerator 75 //-------------------------------------------------------------- 76 77 78 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 79 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 80 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 81 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 83 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 84 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 85 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 86 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 87 88 89 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 90 LIR_Opr opr; 91 switch (type->tag()) { 92 case intTag: opr = FrameMap::rax_opr; break; 93 case objectTag: opr = FrameMap::rax_oop_opr; break; 94 case longTag: opr = FrameMap::long0_opr; break; 95 case floatTag: opr = FrameMap::xmm0_float_opr; break; 96 case doubleTag: opr = FrameMap::xmm0_double_opr; break; 97 case addressTag: 98 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 99 } 100 101 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 102 return opr; 103 } 104 105 106 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 107 LIR_Opr reg = new_register(T_INT); 108 set_vreg_flag(reg, LIRGenerator::byte_reg); 109 return reg; 110 } 111 112 113 //--------- loading items into registers -------------------------------- 114 115 116 // i486 instructions can inline constants 117 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 118 if (type == T_SHORT || type == T_CHAR) { 119 return false; 120 } 121 Constant* c = v->as_Constant(); 122 if (c && c->state_before() == nullptr) { 123 // constants of any type can be stored directly, except for 124 // unloaded object constants. 125 return true; 126 } 127 return false; 128 } 129 130 131 bool LIRGenerator::can_inline_as_constant(Value v) const { 132 if (v->type()->tag() == longTag) return false; 133 return v->type()->tag() != objectTag || 134 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 135 } 136 137 138 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 139 if (c->type() == T_LONG) return false; 140 return c->type() != T_OBJECT || c->as_jobject() == nullptr; 141 } 142 143 144 LIR_Opr LIRGenerator::safepoint_poll_register() { 145 NOT_LP64( return new_register(T_ADDRESS); ) 146 return LIR_OprFact::illegalOpr; 147 } 148 149 150 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 151 int shift, int disp, BasicType type) { 152 assert(base->is_register(), "must be"); 153 if (index->is_constant()) { 154 LIR_Const *constant = index->as_constant_ptr(); 155 #ifdef _LP64 156 jlong c; 157 if (constant->type() == T_INT) { 158 c = (jlong(index->as_jint()) << shift) + disp; 159 } else { 160 assert(constant->type() == T_LONG, "should be"); 161 c = (index->as_jlong() << shift) + disp; 162 } 163 if ((jlong)((jint)c) == c) { 164 return new LIR_Address(base, (jint)c, type); 165 } else { 166 LIR_Opr tmp = new_register(T_LONG); 167 __ move(index, tmp); 168 return new LIR_Address(base, tmp, type); 169 } 170 #else 171 return new LIR_Address(base, 172 ((intx)(constant->as_jint()) << shift) + disp, 173 type); 174 #endif 175 } else { 176 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 177 } 178 } 179 180 181 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 182 BasicType type) { 183 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 184 185 LIR_Address* addr; 186 if (index_opr->is_constant()) { 187 int elem_size = type2aelembytes(type); 188 #ifdef _LP64 189 jint index = index_opr->as_jint(); 190 jlong disp = offset_in_bytes + (jlong)(index) * elem_size; 191 if (disp > max_jint) { 192 // Displacement overflow. Cannot directly use instruction with 32-bit displacement for 64-bit addresses. 193 // Convert array index to long to do array offset computation with 64-bit values. 194 index_opr = new_register(T_LONG); 195 __ move(LIR_OprFact::longConst(index), index_opr); 196 addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type); 197 } else { 198 addr = new LIR_Address(array_opr, (intx)disp, type); 199 } 200 #else 201 // A displacement overflow can also occur for x86 but that is not a problem due to the 32-bit address range! 202 // Let's assume an array 'a' and an access with displacement 'disp'. When disp overflows, then "a + disp" will 203 // always be negative (i.e. underflows the 32-bit address range): 204 // Let N = 2^32: a + signed_overflow(disp) = a + disp - N. 205 // "a + disp" is always smaller than N. If an index was chosen which would point to an address beyond N, then 206 // range checks would catch that and throw an exception. Thus, a + disp < 0 holds which means that it always 207 // underflows the 32-bit address range: 208 // unsigned_underflow(a + signed_overflow(disp)) = unsigned_underflow(a + disp - N) 209 // = (a + disp - N) + N = a + disp 210 // This shows that we still end up at the correct address with a displacement overflow due to the 32-bit address 211 // range limitation. This overflow only needs to be handled if addresses can be larger as on 64-bit platforms. 212 addr = new LIR_Address(array_opr, offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type); 213 #endif // _LP64 214 } else { 215 #ifdef _LP64 216 if (index_opr->type() == T_INT) { 217 LIR_Opr tmp = new_register(T_LONG); 218 __ convert(Bytecodes::_i2l, index_opr, tmp); 219 index_opr = tmp; 220 } 221 #endif // _LP64 222 addr = new LIR_Address(array_opr, 223 index_opr, 224 LIR_Address::scale(type), 225 offset_in_bytes, type); 226 } 227 return addr; 228 } 229 230 231 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) { 232 LIR_Opr r; 233 if (type == T_LONG) { 234 r = LIR_OprFact::longConst(x); 235 } else if (type == T_INT) { 236 r = LIR_OprFact::intConst(checked_cast<jint>(x)); 237 } else { 238 ShouldNotReachHere(); 239 } 240 return r; 241 } 242 243 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 244 LIR_Opr pointer = new_pointer_register(); 245 __ move(LIR_OprFact::intptrConst(counter), pointer); 246 LIR_Address* addr = new LIR_Address(pointer, type); 247 increment_counter(addr, step); 248 } 249 250 251 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 252 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 253 } 254 255 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 256 __ cmp_mem_int(condition, base, disp, c, info); 257 } 258 259 260 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 261 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 262 } 263 264 265 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { 266 if (tmp->is_valid() && c > 0 && c < max_jint) { 267 if (is_power_of_2(c + 1)) { 268 __ move(left, tmp); 269 __ shift_left(left, log2i_exact(c + 1), left); 270 __ sub(left, tmp, result); 271 return true; 272 } else if (is_power_of_2(c - 1)) { 273 __ move(left, tmp); 274 __ shift_left(left, log2i_exact(c - 1), left); 275 __ add(left, tmp, result); 276 return true; 277 } 278 } 279 return false; 280 } 281 282 283 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 284 BasicType type = item->type(); 285 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 286 } 287 288 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 289 LIR_Opr tmp1 = new_register(objectType); 290 LIR_Opr tmp2 = new_register(objectType); 291 LIR_Opr tmp3 = new_register(objectType); 292 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 293 } 294 295 //---------------------------------------------------------------------- 296 // visitor functions 297 //---------------------------------------------------------------------- 298 299 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 300 assert(x->is_pinned(),""); 301 LIRItem obj(x->obj(), this); 302 obj.load_item(); 303 304 set_no_result(x); 305 306 // "lock" stores the address of the monitor stack slot, so this is not an oop 307 LIR_Opr lock = new_register(T_INT); 308 309 CodeEmitInfo* info_for_exception = nullptr; 310 if (x->needs_null_check()) { 311 info_for_exception = state_for(x); 312 } 313 // this CodeEmitInfo must not have the xhandlers because here the 314 // object is already locked (xhandlers expect object to be unlocked) 315 CodeEmitInfo* info = state_for(x, x->state(), true); 316 LIR_Opr tmp = LockingMode == LM_LIGHTWEIGHT ? new_register(T_ADDRESS) : LIR_OprFact::illegalOpr; 317 monitor_enter(obj.result(), lock, syncTempOpr(), tmp, 318 x->monitor_no(), info_for_exception, info); 319 } 320 321 322 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 323 assert(x->is_pinned(),""); 324 325 LIRItem obj(x->obj(), this); 326 obj.dont_load_item(); 327 328 LIR_Opr lock = new_register(T_INT); 329 LIR_Opr obj_temp = new_register(T_INT); 330 set_no_result(x); 331 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 332 } 333 334 // _ineg, _lneg, _fneg, _dneg 335 void LIRGenerator::do_NegateOp(NegateOp* x) { 336 LIRItem value(x->x(), this); 337 value.set_destroys_register(); 338 value.load_item(); 339 LIR_Opr reg = rlock(x); 340 341 __ negate(value.result(), reg); 342 343 set_result(x, reg); 344 } 345 346 // for _fadd, _fmul, _fsub, _fdiv, _frem 347 // _dadd, _dmul, _dsub, _ddiv, _drem 348 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 349 LIRItem left(x->x(), this); 350 LIRItem right(x->y(), this); 351 LIRItem* left_arg = &left; 352 LIRItem* right_arg = &right; 353 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 354 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 355 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 356 left.load_item(); 357 } else { 358 left.dont_load_item(); 359 } 360 361 #ifndef _LP64 362 // do not load right operand if it is a constant. only 0 and 1 are 363 // loaded because there are special instructions for loading them 364 // without memory access (not needed for SSE2 instructions) 365 bool must_load_right = false; 366 if (right.is_constant()) { 367 LIR_Const* c = right.result()->as_constant_ptr(); 368 assert(c != nullptr, "invalid constant"); 369 assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); 370 371 if (c->type() == T_FLOAT) { 372 must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); 373 } else { 374 must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); 375 } 376 } 377 #endif // !LP64 378 379 if (must_load_both) { 380 // frem and drem destroy also right operand, so move it to a new register 381 right.set_destroys_register(); 382 right.load_item(); 383 } else if (right.is_register()) { 384 right.load_item(); 385 #ifndef _LP64 386 } else if (must_load_right) { 387 right.load_item(); 388 #endif // !LP64 389 } else { 390 right.dont_load_item(); 391 } 392 LIR_Opr reg = rlock(x); 393 LIR_Opr tmp = LIR_OprFact::illegalOpr; 394 if (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv) { 395 tmp = new_register(T_DOUBLE); 396 } 397 398 #ifdef _LP64 399 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 400 // frem and drem are implemented as a direct call into the runtime. 401 LIRItem left(x->x(), this); 402 LIRItem right(x->y(), this); 403 404 BasicType bt = as_BasicType(x->type()); 405 BasicTypeList signature(2); 406 signature.append(bt); 407 signature.append(bt); 408 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 409 410 const LIR_Opr result_reg = result_register_for(x->type()); 411 left.load_item_force(cc->at(0)); 412 right.load_item_force(cc->at(1)); 413 414 address entry = nullptr; 415 switch (x->op()) { 416 case Bytecodes::_frem: 417 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 418 break; 419 case Bytecodes::_drem: 420 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 421 break; 422 default: 423 ShouldNotReachHere(); 424 } 425 426 LIR_Opr result = rlock_result(x); 427 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 428 __ move(result_reg, result); 429 } else { 430 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); 431 set_result(x, reg); 432 } 433 #else 434 if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { 435 // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots 436 LIR_Opr fpu0, fpu1; 437 if (x->op() == Bytecodes::_frem) { 438 fpu0 = LIR_OprFact::single_fpu(0); 439 fpu1 = LIR_OprFact::single_fpu(1); 440 } else { 441 fpu0 = LIR_OprFact::double_fpu(0); 442 fpu1 = LIR_OprFact::double_fpu(1); 443 } 444 __ move(right.result(), fpu1); // order of left and right operand is important! 445 __ move(left.result(), fpu0); 446 __ rem (fpu0, fpu1, fpu0); 447 __ move(fpu0, reg); 448 449 } else { 450 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); 451 } 452 set_result(x, reg); 453 #endif // _LP64 454 } 455 456 457 // for _ladd, _lmul, _lsub, _ldiv, _lrem 458 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 459 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 460 // long division is implemented as a direct call into the runtime 461 LIRItem left(x->x(), this); 462 LIRItem right(x->y(), this); 463 464 // the check for division by zero destroys the right operand 465 right.set_destroys_register(); 466 467 BasicTypeList signature(2); 468 signature.append(T_LONG); 469 signature.append(T_LONG); 470 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 471 472 // check for division by zero (destroys registers of right operand!) 473 CodeEmitInfo* info = state_for(x); 474 475 const LIR_Opr result_reg = result_register_for(x->type()); 476 left.load_item_force(cc->at(1)); 477 right.load_item(); 478 479 __ move(right.result(), cc->at(0)); 480 481 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 482 __ branch(lir_cond_equal, new DivByZeroStub(info)); 483 484 address entry = nullptr; 485 switch (x->op()) { 486 case Bytecodes::_lrem: 487 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 488 break; // check if dividend is 0 is done elsewhere 489 case Bytecodes::_ldiv: 490 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 491 break; // check if dividend is 0 is done elsewhere 492 default: 493 ShouldNotReachHere(); 494 } 495 496 LIR_Opr result = rlock_result(x); 497 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 498 __ move(result_reg, result); 499 } else if (x->op() == Bytecodes::_lmul) { 500 // missing test if instr is commutative and if we should swap 501 LIRItem left(x->x(), this); 502 LIRItem right(x->y(), this); 503 504 // right register is destroyed by the long mul, so it must be 505 // copied to a new register. 506 right.set_destroys_register(); 507 508 left.load_item(); 509 right.load_item(); 510 511 LIR_Opr reg = FrameMap::long0_opr; 512 arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr); 513 LIR_Opr result = rlock_result(x); 514 __ move(reg, result); 515 } else { 516 // missing test if instr is commutative and if we should swap 517 LIRItem left(x->x(), this); 518 LIRItem right(x->y(), this); 519 520 left.load_item(); 521 // don't load constants to save register 522 right.load_nonconstant(); 523 rlock_result(x); 524 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr); 525 } 526 } 527 528 529 530 // for: _iadd, _imul, _isub, _idiv, _irem 531 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 532 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 533 // The requirements for division and modulo 534 // input : rax,: dividend min_int 535 // reg: divisor (may not be rax,/rdx) -1 536 // 537 // output: rax,: quotient (= rax, idiv reg) min_int 538 // rdx: remainder (= rax, irem reg) 0 539 540 // rax, and rdx will be destroyed 541 542 // Note: does this invalidate the spec ??? 543 LIRItem right(x->y(), this); 544 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 545 546 // call state_for before load_item_force because state_for may 547 // force the evaluation of other instructions that are needed for 548 // correct debug info. Otherwise the live range of the fix 549 // register might be too long. 550 CodeEmitInfo* info = state_for(x); 551 552 left.load_item_force(divInOpr()); 553 554 right.load_item(); 555 556 LIR_Opr result = rlock_result(x); 557 LIR_Opr result_reg; 558 if (x->op() == Bytecodes::_idiv) { 559 result_reg = divOutOpr(); 560 } else { 561 result_reg = remOutOpr(); 562 } 563 564 if (!ImplicitDiv0Checks) { 565 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 566 __ branch(lir_cond_equal, new DivByZeroStub(info)); 567 // Idiv/irem cannot trap (passing info would generate an assertion). 568 info = nullptr; 569 } 570 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 571 if (x->op() == Bytecodes::_irem) { 572 __ irem(left.result(), right.result(), result_reg, tmp, info); 573 } else if (x->op() == Bytecodes::_idiv) { 574 __ idiv(left.result(), right.result(), result_reg, tmp, info); 575 } else { 576 ShouldNotReachHere(); 577 } 578 579 __ move(result_reg, result); 580 } else { 581 // missing test if instr is commutative and if we should swap 582 LIRItem left(x->x(), this); 583 LIRItem right(x->y(), this); 584 LIRItem* left_arg = &left; 585 LIRItem* right_arg = &right; 586 if (x->is_commutative() && left.is_stack() && right.is_register()) { 587 // swap them if left is real stack (or cached) and right is real register(not cached) 588 left_arg = &right; 589 right_arg = &left; 590 } 591 592 left_arg->load_item(); 593 594 // do not need to load right, as we can handle stack and constants 595 if (x->op() == Bytecodes::_imul ) { 596 // check if we can use shift instead 597 bool use_constant = false; 598 bool use_tmp = false; 599 if (right_arg->is_constant()) { 600 jint iconst = right_arg->get_jint_constant(); 601 if (iconst > 0 && iconst < max_jint) { 602 if (is_power_of_2(iconst)) { 603 use_constant = true; 604 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 605 use_constant = true; 606 use_tmp = true; 607 } 608 } 609 } 610 if (use_constant) { 611 right_arg->dont_load_item(); 612 } else { 613 right_arg->load_item(); 614 } 615 LIR_Opr tmp = LIR_OprFact::illegalOpr; 616 if (use_tmp) { 617 tmp = new_register(T_INT); 618 } 619 rlock_result(x); 620 621 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 622 } else { 623 right_arg->dont_load_item(); 624 rlock_result(x); 625 LIR_Opr tmp = LIR_OprFact::illegalOpr; 626 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 627 } 628 } 629 } 630 631 632 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 633 // when an operand with use count 1 is the left operand, then it is 634 // likely that no move for 2-operand-LIR-form is necessary 635 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 636 x->swap_operands(); 637 } 638 639 ValueTag tag = x->type()->tag(); 640 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 641 switch (tag) { 642 case floatTag: 643 case doubleTag: do_ArithmeticOp_FPU(x); return; 644 case longTag: do_ArithmeticOp_Long(x); return; 645 case intTag: do_ArithmeticOp_Int(x); return; 646 default: ShouldNotReachHere(); return; 647 } 648 } 649 650 651 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 652 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 653 // count must always be in rcx 654 LIRItem value(x->x(), this); 655 LIRItem count(x->y(), this); 656 657 ValueTag elemType = x->type()->tag(); 658 bool must_load_count = !count.is_constant() || elemType == longTag; 659 if (must_load_count) { 660 // count for long must be in register 661 count.load_item_force(shiftCountOpr()); 662 } else { 663 count.dont_load_item(); 664 } 665 value.load_item(); 666 LIR_Opr reg = rlock_result(x); 667 668 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 669 } 670 671 672 // _iand, _land, _ior, _lor, _ixor, _lxor 673 void LIRGenerator::do_LogicOp(LogicOp* x) { 674 // when an operand with use count 1 is the left operand, then it is 675 // likely that no move for 2-operand-LIR-form is necessary 676 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 677 x->swap_operands(); 678 } 679 680 LIRItem left(x->x(), this); 681 LIRItem right(x->y(), this); 682 683 left.load_item(); 684 right.load_nonconstant(); 685 LIR_Opr reg = rlock_result(x); 686 687 logic_op(x->op(), reg, left.result(), right.result()); 688 } 689 690 691 692 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 693 void LIRGenerator::do_CompareOp(CompareOp* x) { 694 LIRItem left(x->x(), this); 695 LIRItem right(x->y(), this); 696 ValueTag tag = x->x()->type()->tag(); 697 if (tag == longTag) { 698 left.set_destroys_register(); 699 } 700 left.load_item(); 701 right.load_item(); 702 LIR_Opr reg = rlock_result(x); 703 704 if (x->x()->type()->is_float_kind()) { 705 Bytecodes::Code code = x->op(); 706 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 707 } else if (x->x()->type()->tag() == longTag) { 708 __ lcmp2int(left.result(), right.result(), reg); 709 } else { 710 Unimplemented(); 711 } 712 } 713 714 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 715 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 716 if (is_reference_type(type)) { 717 cmp_value.load_item_force(FrameMap::rax_oop_opr); 718 new_value.load_item(); 719 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 720 } else if (type == T_INT) { 721 cmp_value.load_item_force(FrameMap::rax_opr); 722 new_value.load_item(); 723 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 724 } else if (type == T_LONG) { 725 cmp_value.load_item_force(FrameMap::long0_opr); 726 new_value.load_item_force(FrameMap::long1_opr); 727 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 728 } else { 729 Unimplemented(); 730 } 731 LIR_Opr result = new_register(T_INT); 732 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 733 result, T_INT); 734 return result; 735 } 736 737 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 738 bool is_oop = is_reference_type(type); 739 LIR_Opr result = new_register(type); 740 value.load_item(); 741 // Because we want a 2-arg form of xchg and xadd 742 __ move(value.result(), result); 743 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type"); 744 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 745 return result; 746 } 747 748 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 749 LIR_Opr result = new_register(type); 750 value.load_item(); 751 // Because we want a 2-arg form of xchg and xadd 752 __ move(value.result(), result); 753 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type"); 754 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 755 return result; 756 } 757 758 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 759 assert(x->number_of_arguments() == 3, "wrong type"); 760 assert(UseFMA, "Needs FMA instructions support."); 761 LIRItem value(x->argument_at(0), this); 762 LIRItem value1(x->argument_at(1), this); 763 LIRItem value2(x->argument_at(2), this); 764 765 value2.set_destroys_register(); 766 767 value.load_item(); 768 value1.load_item(); 769 value2.load_item(); 770 771 LIR_Opr calc_input = value.result(); 772 LIR_Opr calc_input1 = value1.result(); 773 LIR_Opr calc_input2 = value2.result(); 774 LIR_Opr calc_result = rlock_result(x); 775 776 switch (x->id()) { 777 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 778 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 779 default: ShouldNotReachHere(); 780 } 781 782 } 783 784 785 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 786 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 787 788 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 789 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 790 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 791 x->id() == vmIntrinsics::_dlog10 792 #ifdef _LP64 793 || x->id() == vmIntrinsics::_dtanh 794 #endif 795 ) { 796 do_LibmIntrinsic(x); 797 return; 798 } 799 800 LIRItem value(x->argument_at(0), this); 801 802 bool use_fpu = false; 803 #ifndef _LP64 804 if (UseSSE < 2) { 805 value.set_destroys_register(); 806 } 807 #endif // !LP64 808 value.load_item(); 809 810 LIR_Opr calc_input = value.result(); 811 LIR_Opr calc_result = rlock_result(x); 812 813 LIR_Opr tmp = LIR_OprFact::illegalOpr; 814 if (x->id() == vmIntrinsics::_floatToFloat16) { 815 tmp = new_register(T_FLOAT); 816 } 817 818 switch(x->id()) { 819 case vmIntrinsics::_dabs: 820 __ abs(calc_input, calc_result, tmp); 821 break; 822 case vmIntrinsics::_dsqrt: 823 case vmIntrinsics::_dsqrt_strict: 824 __ sqrt(calc_input, calc_result, LIR_OprFact::illegalOpr); 825 break; 826 case vmIntrinsics::_floatToFloat16: 827 __ f2hf(calc_input, calc_result, tmp); 828 break; 829 case vmIntrinsics::_float16ToFloat: 830 __ hf2f(calc_input, calc_result, LIR_OprFact::illegalOpr); 831 break; 832 default: 833 ShouldNotReachHere(); 834 } 835 836 if (use_fpu) { 837 __ move(calc_result, x->operand()); 838 } 839 } 840 841 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 842 LIRItem value(x->argument_at(0), this); 843 value.set_destroys_register(); 844 845 LIR_Opr calc_result = rlock_result(x); 846 LIR_Opr result_reg = result_register_for(x->type()); 847 848 CallingConvention* cc = nullptr; 849 850 if (x->id() == vmIntrinsics::_dpow) { 851 LIRItem value1(x->argument_at(1), this); 852 853 value1.set_destroys_register(); 854 855 BasicTypeList signature(2); 856 signature.append(T_DOUBLE); 857 signature.append(T_DOUBLE); 858 cc = frame_map()->c_calling_convention(&signature); 859 value.load_item_force(cc->at(0)); 860 value1.load_item_force(cc->at(1)); 861 } else { 862 BasicTypeList signature(1); 863 signature.append(T_DOUBLE); 864 cc = frame_map()->c_calling_convention(&signature); 865 value.load_item_force(cc->at(0)); 866 } 867 868 switch (x->id()) { 869 case vmIntrinsics::_dexp: 870 if (StubRoutines::dexp() != nullptr) { 871 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 872 } else { 873 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 874 } 875 break; 876 case vmIntrinsics::_dlog: 877 if (StubRoutines::dlog() != nullptr) { 878 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 879 } else { 880 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 881 } 882 break; 883 case vmIntrinsics::_dlog10: 884 if (StubRoutines::dlog10() != nullptr) { 885 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 886 } else { 887 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 888 } 889 break; 890 case vmIntrinsics::_dpow: 891 if (StubRoutines::dpow() != nullptr) { 892 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 893 } else { 894 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 895 } 896 break; 897 case vmIntrinsics::_dsin: 898 if (StubRoutines::dsin() != nullptr) { 899 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 900 } else { 901 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 902 } 903 break; 904 case vmIntrinsics::_dcos: 905 if (StubRoutines::dcos() != nullptr) { 906 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 907 } else { 908 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 909 } 910 break; 911 case vmIntrinsics::_dtan: 912 if (StubRoutines::dtan() != nullptr) { 913 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 914 } else { 915 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 916 } 917 break; 918 case vmIntrinsics::_dtanh: 919 assert(StubRoutines::dtanh() != nullptr, "tanh intrinsic not found"); 920 if (StubRoutines::dtanh() != nullptr) { 921 __ call_runtime_leaf(StubRoutines::dtanh(), getThreadTemp(), result_reg, cc->args()); 922 } 923 break; 924 default: ShouldNotReachHere(); 925 } 926 927 __ move(result_reg, calc_result); 928 } 929 930 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 931 assert(x->number_of_arguments() == 5, "wrong type"); 932 933 // Make all state_for calls early since they can emit code 934 CodeEmitInfo* info = nullptr; 935 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 936 info = state_for(x, x->state_before()); 937 info->set_force_reexecute(); 938 } else { 939 info = state_for(x, x->state()); 940 } 941 942 LIRItem src(x->argument_at(0), this); 943 LIRItem src_pos(x->argument_at(1), this); 944 LIRItem dst(x->argument_at(2), this); 945 LIRItem dst_pos(x->argument_at(3), this); 946 LIRItem length(x->argument_at(4), this); 947 948 // operands for arraycopy must use fixed registers, otherwise 949 // LinearScan will fail allocation (because arraycopy always needs a 950 // call) 951 952 int flags; 953 ciArrayKlass* expected_type; 954 arraycopy_helper(x, &flags, &expected_type); 955 if (x->check_flag(Instruction::OmitChecksFlag)) { 956 flags = 0; 957 } 958 959 #ifndef _LP64 960 src.load_item_force (FrameMap::rcx_oop_opr); 961 src_pos.load_item_force (FrameMap::rdx_opr); 962 dst.load_item_force (FrameMap::rax_oop_opr); 963 dst_pos.load_item_force (FrameMap::rbx_opr); 964 length.load_item_force (FrameMap::rdi_opr); 965 LIR_Opr tmp = (FrameMap::rsi_opr); 966 967 if (expected_type != nullptr && flags == 0) { 968 FrameMap* f = Compilation::current()->frame_map(); 969 f->update_reserved_argument_area_size(3 * BytesPerWord); 970 } 971 #else 972 973 // The java calling convention will give us enough registers 974 // so that on the stub side the args will be perfect already. 975 // On the other slow/special case side we call C and the arg 976 // positions are not similar enough to pick one as the best. 977 // Also because the java calling convention is a "shifted" version 978 // of the C convention we can process the java args trivially into C 979 // args without worry of overwriting during the xfer 980 981 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 982 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 983 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 984 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 985 length.load_item_force (FrameMap::as_opr(j_rarg4)); 986 987 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 988 #endif // LP64 989 990 set_no_result(x); 991 992 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 993 } 994 995 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 996 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support"); 997 // Make all state_for calls early since they can emit code 998 LIR_Opr result = rlock_result(x); 999 int flags = 0; 1000 switch (x->id()) { 1001 case vmIntrinsics::_updateCRC32: { 1002 LIRItem crc(x->argument_at(0), this); 1003 LIRItem val(x->argument_at(1), this); 1004 // val is destroyed by update_crc32 1005 val.set_destroys_register(); 1006 crc.load_item(); 1007 val.load_item(); 1008 __ update_crc32(crc.result(), val.result(), result); 1009 break; 1010 } 1011 case vmIntrinsics::_updateBytesCRC32: 1012 case vmIntrinsics::_updateByteBufferCRC32: { 1013 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 1014 1015 LIRItem crc(x->argument_at(0), this); 1016 LIRItem buf(x->argument_at(1), this); 1017 LIRItem off(x->argument_at(2), this); 1018 LIRItem len(x->argument_at(3), this); 1019 buf.load_item(); 1020 off.load_nonconstant(); 1021 1022 LIR_Opr index = off.result(); 1023 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1024 if(off.result()->is_constant()) { 1025 index = LIR_OprFact::illegalOpr; 1026 offset += off.result()->as_jint(); 1027 } 1028 LIR_Opr base_op = buf.result(); 1029 1030 #ifndef _LP64 1031 if (!is_updateBytes) { // long b raw address 1032 base_op = new_register(T_INT); 1033 __ convert(Bytecodes::_l2i, buf.result(), base_op); 1034 } 1035 #else 1036 if (index->is_valid()) { 1037 LIR_Opr tmp = new_register(T_LONG); 1038 __ convert(Bytecodes::_i2l, index, tmp); 1039 index = tmp; 1040 } 1041 #endif 1042 1043 LIR_Address* a = new LIR_Address(base_op, 1044 index, 1045 offset, 1046 T_BYTE); 1047 BasicTypeList signature(3); 1048 signature.append(T_INT); 1049 signature.append(T_ADDRESS); 1050 signature.append(T_INT); 1051 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1052 const LIR_Opr result_reg = result_register_for(x->type()); 1053 1054 LIR_Opr addr = new_pointer_register(); 1055 __ leal(LIR_OprFact::address(a), addr); 1056 1057 crc.load_item_force(cc->at(0)); 1058 __ move(addr, cc->at(1)); 1059 len.load_item_force(cc->at(2)); 1060 1061 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 1062 __ move(result_reg, result); 1063 1064 break; 1065 } 1066 default: { 1067 ShouldNotReachHere(); 1068 } 1069 } 1070 } 1071 1072 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1073 assert(UseCRC32CIntrinsics, "need AVX and CLMUL instructions support"); 1074 LIR_Opr result = rlock_result(x); 1075 1076 switch (x->id()) { 1077 case vmIntrinsics::_updateBytesCRC32C: 1078 case vmIntrinsics::_updateDirectByteBufferCRC32C: { 1079 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); 1080 1081 LIRItem crc(x->argument_at(0), this); 1082 LIRItem buf(x->argument_at(1), this); 1083 LIRItem off(x->argument_at(2), this); 1084 LIRItem end(x->argument_at(3), this); 1085 buf.load_item(); 1086 off.load_nonconstant(); 1087 end.load_nonconstant(); 1088 1089 // len = end - off 1090 LIR_Opr len = end.result(); 1091 LIR_Opr tmpA = new_register(T_INT); 1092 LIR_Opr tmpB = new_register(T_INT); 1093 __ move(end.result(), tmpA); 1094 __ move(off.result(), tmpB); 1095 __ sub(tmpA, tmpB, tmpA); 1096 len = tmpA; 1097 1098 LIR_Opr index = off.result(); 1099 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1100 if (off.result()->is_constant()) { 1101 index = LIR_OprFact::illegalOpr; 1102 offset += off.result()->as_jint(); 1103 } 1104 LIR_Opr base_op = buf.result(); 1105 LIR_Address* a = nullptr; 1106 1107 if (index->is_valid()) { 1108 LIR_Opr tmp = new_register(T_LONG); 1109 __ convert(Bytecodes::_i2l, index, tmp); 1110 index = tmp; 1111 a = new LIR_Address(base_op, index, offset, T_BYTE); 1112 } else { 1113 a = new LIR_Address(base_op, offset, T_BYTE); 1114 } 1115 1116 BasicTypeList signature(3); 1117 signature.append(T_INT); 1118 signature.append(T_ADDRESS); 1119 signature.append(T_INT); 1120 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1121 const LIR_Opr result_reg = result_register_for(x->type()); 1122 1123 LIR_Opr arg1 = cc->at(0), 1124 arg2 = cc->at(1), 1125 arg3 = cc->at(2); 1126 1127 crc.load_item_force(arg1); 1128 __ leal(LIR_OprFact::address(a), arg2); 1129 __ move(len, arg3); 1130 1131 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); 1132 __ move(result_reg, result); 1133 break; 1134 } 1135 default: { 1136 ShouldNotReachHere(); 1137 } 1138 } 1139 } 1140 1141 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1142 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1143 1144 // Make all state_for calls early since they can emit code 1145 LIR_Opr result = rlock_result(x); 1146 1147 LIRItem a(x->argument_at(0), this); // Object 1148 LIRItem aOffset(x->argument_at(1), this); // long 1149 LIRItem b(x->argument_at(2), this); // Object 1150 LIRItem bOffset(x->argument_at(3), this); // long 1151 LIRItem length(x->argument_at(4), this); // int 1152 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1153 1154 a.load_item(); 1155 aOffset.load_nonconstant(); 1156 b.load_item(); 1157 bOffset.load_nonconstant(); 1158 1159 long constant_aOffset = 0; 1160 LIR_Opr result_aOffset = aOffset.result(); 1161 if (result_aOffset->is_constant()) { 1162 constant_aOffset = result_aOffset->as_jlong(); 1163 result_aOffset = LIR_OprFact::illegalOpr; 1164 } 1165 LIR_Opr result_a = a.result(); 1166 1167 long constant_bOffset = 0; 1168 LIR_Opr result_bOffset = bOffset.result(); 1169 if (result_bOffset->is_constant()) { 1170 constant_bOffset = result_bOffset->as_jlong(); 1171 result_bOffset = LIR_OprFact::illegalOpr; 1172 } 1173 LIR_Opr result_b = b.result(); 1174 1175 #ifndef _LP64 1176 result_a = new_register(T_INT); 1177 __ convert(Bytecodes::_l2i, a.result(), result_a); 1178 result_b = new_register(T_INT); 1179 __ convert(Bytecodes::_l2i, b.result(), result_b); 1180 #endif 1181 1182 1183 LIR_Address* addr_a = new LIR_Address(result_a, 1184 result_aOffset, 1185 constant_aOffset, 1186 T_BYTE); 1187 1188 LIR_Address* addr_b = new LIR_Address(result_b, 1189 result_bOffset, 1190 constant_bOffset, 1191 T_BYTE); 1192 1193 BasicTypeList signature(4); 1194 signature.append(T_ADDRESS); 1195 signature.append(T_ADDRESS); 1196 signature.append(T_INT); 1197 signature.append(T_INT); 1198 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1199 const LIR_Opr result_reg = result_register_for(x->type()); 1200 1201 LIR_Opr ptr_addr_a = new_pointer_register(); 1202 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1203 1204 LIR_Opr ptr_addr_b = new_pointer_register(); 1205 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1206 1207 __ move(ptr_addr_a, cc->at(0)); 1208 __ move(ptr_addr_b, cc->at(1)); 1209 length.load_item_force(cc->at(2)); 1210 log2ArrayIndexScale.load_item_force(cc->at(3)); 1211 1212 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1213 __ move(result_reg, result); 1214 } 1215 1216 void LIRGenerator::do_Convert(Convert* x) { 1217 #ifdef _LP64 1218 LIRItem value(x->value(), this); 1219 value.load_item(); 1220 LIR_Opr input = value.result(); 1221 LIR_Opr result = rlock(x); 1222 __ convert(x->op(), input, result); 1223 assert(result->is_virtual(), "result must be virtual register"); 1224 set_result(x, result); 1225 #else 1226 // flags that vary for the different operations and different SSE-settings 1227 bool fixed_input = false, fixed_result = false, round_result = false, needs_stub = false; 1228 1229 switch (x->op()) { 1230 case Bytecodes::_i2l: // fall through 1231 case Bytecodes::_l2i: // fall through 1232 case Bytecodes::_i2b: // fall through 1233 case Bytecodes::_i2c: // fall through 1234 case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1235 1236 case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; 1237 case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; 1238 case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; 1239 case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; 1240 case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1241 case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; 1242 case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; 1243 case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; 1244 case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1245 case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; 1246 default: ShouldNotReachHere(); 1247 } 1248 1249 LIRItem value(x->value(), this); 1250 value.load_item(); 1251 LIR_Opr input = value.result(); 1252 LIR_Opr result = rlock(x); 1253 1254 // arguments of lir_convert 1255 LIR_Opr conv_input = input; 1256 LIR_Opr conv_result = result; 1257 ConversionStub* stub = nullptr; 1258 1259 if (fixed_input) { 1260 conv_input = fixed_register_for(input->type()); 1261 __ move(input, conv_input); 1262 } 1263 1264 assert(fixed_result == false || round_result == false, "cannot set both"); 1265 if (fixed_result) { 1266 conv_result = fixed_register_for(result->type()); 1267 } else if (round_result) { 1268 result = new_register(result->type()); 1269 set_vreg_flag(result, must_start_in_memory); 1270 } 1271 1272 if (needs_stub) { 1273 stub = new ConversionStub(x->op(), conv_input, conv_result); 1274 } 1275 1276 __ convert(x->op(), conv_input, conv_result, stub); 1277 1278 if (result != conv_result) { 1279 __ move(conv_result, result); 1280 } 1281 1282 assert(result->is_virtual(), "result must be virtual register"); 1283 set_result(x, result); 1284 #endif // _LP64 1285 } 1286 1287 1288 void LIRGenerator::do_NewInstance(NewInstance* x) { 1289 print_if_not_loaded(x); 1290 1291 CodeEmitInfo* info = state_for(x, x->state()); 1292 LIR_Opr reg = result_register_for(x->type()); 1293 new_instance(reg, x->klass(), x->is_unresolved(), 1294 FrameMap::rcx_oop_opr, 1295 FrameMap::rdi_oop_opr, 1296 FrameMap::rsi_oop_opr, 1297 LIR_OprFact::illegalOpr, 1298 FrameMap::rdx_metadata_opr, info); 1299 LIR_Opr result = rlock_result(x); 1300 __ move(reg, result); 1301 } 1302 1303 1304 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1305 CodeEmitInfo* info = nullptr; 1306 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 1307 info = state_for(x, x->state_before()); 1308 info->set_force_reexecute(); 1309 } else { 1310 info = state_for(x, x->state()); 1311 } 1312 1313 LIRItem length(x->length(), this); 1314 length.load_item_force(FrameMap::rbx_opr); 1315 1316 LIR_Opr reg = result_register_for(x->type()); 1317 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1318 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1319 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1320 LIR_Opr tmp4 = reg; 1321 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1322 LIR_Opr len = length.result(); 1323 BasicType elem_type = x->elt_type(); 1324 1325 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1326 1327 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1328 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); 1329 1330 LIR_Opr result = rlock_result(x); 1331 __ move(reg, result); 1332 } 1333 1334 1335 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1336 LIRItem length(x->length(), this); 1337 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1338 // and therefore provide the state before the parameters have been consumed 1339 CodeEmitInfo* patching_info = nullptr; 1340 if (!x->klass()->is_loaded() || PatchALot) { 1341 patching_info = state_for(x, x->state_before()); 1342 } 1343 1344 CodeEmitInfo* info = state_for(x, x->state()); 1345 1346 const LIR_Opr reg = result_register_for(x->type()); 1347 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1348 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1349 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1350 LIR_Opr tmp4 = reg; 1351 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1352 1353 length.load_item_force(FrameMap::rbx_opr); 1354 LIR_Opr len = length.result(); 1355 1356 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 1357 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); 1358 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1359 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1360 } 1361 klass2reg_with_patching(klass_reg, obj, patching_info); 1362 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); 1363 1364 LIR_Opr result = rlock_result(x); 1365 __ move(reg, result); 1366 } 1367 1368 1369 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1370 Values* dims = x->dims(); 1371 int i = dims->length(); 1372 LIRItemList* items = new LIRItemList(i, i, nullptr); 1373 while (i-- > 0) { 1374 LIRItem* size = new LIRItem(dims->at(i), this); 1375 items->at_put(i, size); 1376 } 1377 1378 // Evaluate state_for early since it may emit code. 1379 CodeEmitInfo* patching_info = nullptr; 1380 if (!x->klass()->is_loaded() || PatchALot) { 1381 patching_info = state_for(x, x->state_before()); 1382 1383 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1384 // clone all handlers (NOTE: Usually this is handled transparently 1385 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1386 // is done explicitly here because a stub isn't being used). 1387 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1388 } 1389 CodeEmitInfo* info = state_for(x, x->state()); 1390 1391 i = dims->length(); 1392 while (i-- > 0) { 1393 LIRItem* size = items->at(i); 1394 size->load_nonconstant(); 1395 1396 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1397 } 1398 1399 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1400 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1401 1402 LIR_Opr rank = FrameMap::rbx_opr; 1403 __ move(LIR_OprFact::intConst(x->rank()), rank); 1404 LIR_Opr varargs = FrameMap::rcx_opr; 1405 __ move(FrameMap::rsp_opr, varargs); 1406 LIR_OprList* args = new LIR_OprList(3); 1407 args->append(klass_reg); 1408 args->append(rank); 1409 args->append(varargs); 1410 LIR_Opr reg = result_register_for(x->type()); 1411 __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), 1412 LIR_OprFact::illegalOpr, 1413 reg, args, info); 1414 1415 LIR_Opr result = rlock_result(x); 1416 __ move(reg, result); 1417 } 1418 1419 1420 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1421 // nothing to do for now 1422 } 1423 1424 1425 void LIRGenerator::do_CheckCast(CheckCast* x) { 1426 LIRItem obj(x->obj(), this); 1427 1428 CodeEmitInfo* patching_info = nullptr; 1429 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1430 // must do this before locking the destination register as an oop register, 1431 // and before the obj is loaded (the latter is for deoptimization) 1432 patching_info = state_for(x, x->state_before()); 1433 } 1434 obj.load_item(); 1435 1436 // info for exceptions 1437 CodeEmitInfo* info_for_exception = 1438 (x->needs_exception_state() ? state_for(x) : 1439 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1440 1441 CodeStub* stub; 1442 if (x->is_incompatible_class_change_check()) { 1443 assert(patching_info == nullptr, "can't patch this"); 1444 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1445 } else if (x->is_invokespecial_receiver_check()) { 1446 assert(patching_info == nullptr, "can't patch this"); 1447 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); 1448 } else { 1449 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); 1450 } 1451 LIR_Opr reg = rlock_result(x); 1452 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1453 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1454 tmp3 = new_register(objectType); 1455 } 1456 __ checkcast(reg, obj.result(), x->klass(), 1457 new_register(objectType), new_register(objectType), tmp3, 1458 x->direct_compare(), info_for_exception, patching_info, stub, 1459 x->profiled_method(), x->profiled_bci()); 1460 } 1461 1462 1463 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1464 LIRItem obj(x->obj(), this); 1465 1466 // result and test object may not be in same register 1467 LIR_Opr reg = rlock_result(x); 1468 CodeEmitInfo* patching_info = nullptr; 1469 if ((!x->klass()->is_loaded() || PatchALot)) { 1470 // must do this before locking the destination register as an oop register 1471 patching_info = state_for(x, x->state_before()); 1472 } 1473 obj.load_item(); 1474 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1475 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1476 tmp3 = new_register(objectType); 1477 } 1478 __ instanceof(reg, obj.result(), x->klass(), 1479 new_register(objectType), new_register(objectType), tmp3, 1480 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1481 } 1482 1483 // Intrinsic for Class::isInstance 1484 address LIRGenerator::isInstance_entry() { 1485 return Runtime1::entry_for(C1StubId::is_instance_of_id); 1486 } 1487 1488 1489 void LIRGenerator::do_If(If* x) { 1490 assert(x->number_of_sux() == 2, "inconsistency"); 1491 ValueTag tag = x->x()->type()->tag(); 1492 bool is_safepoint = x->is_safepoint(); 1493 1494 If::Condition cond = x->cond(); 1495 1496 LIRItem xitem(x->x(), this); 1497 LIRItem yitem(x->y(), this); 1498 LIRItem* xin = &xitem; 1499 LIRItem* yin = &yitem; 1500 1501 if (tag == longTag) { 1502 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1503 // mirror for other conditions 1504 if (cond == If::gtr || cond == If::leq) { 1505 cond = Instruction::mirror(cond); 1506 xin = &yitem; 1507 yin = &xitem; 1508 } 1509 xin->set_destroys_register(); 1510 } 1511 xin->load_item(); 1512 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1513 // inline long zero 1514 yin->dont_load_item(); 1515 } else if (tag == longTag || tag == floatTag || tag == doubleTag) { 1516 // longs cannot handle constants at right side 1517 yin->load_item(); 1518 } else { 1519 yin->dont_load_item(); 1520 } 1521 1522 LIR_Opr left = xin->result(); 1523 LIR_Opr right = yin->result(); 1524 1525 set_no_result(x); 1526 1527 // add safepoint before generating condition code so it can be recomputed 1528 if (x->is_safepoint()) { 1529 // increment backedge counter if needed 1530 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1531 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1532 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1533 } 1534 1535 __ cmp(lir_cond(cond), left, right); 1536 // Generate branch profiling. Profiling code doesn't kill flags. 1537 profile_branch(x, cond); 1538 move_to_phi(x->state()); 1539 if (x->x()->type()->is_float_kind()) { 1540 __ branch(lir_cond(cond), x->tsux(), x->usux()); 1541 } else { 1542 __ branch(lir_cond(cond), x->tsux()); 1543 } 1544 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1545 __ jump(x->default_sux()); 1546 } 1547 1548 1549 LIR_Opr LIRGenerator::getThreadPointer() { 1550 #ifdef _LP64 1551 return FrameMap::as_pointer_opr(r15_thread); 1552 #else 1553 LIR_Opr result = new_register(T_INT); 1554 __ get_thread(result); 1555 return result; 1556 #endif // 1557 } 1558 1559 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1560 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1561 LIR_OprList* args = new LIR_OprList(); 1562 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1563 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1564 } 1565 1566 1567 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1568 CodeEmitInfo* info) { 1569 if (address->type() == T_LONG) { 1570 address = new LIR_Address(address->base(), 1571 address->index(), address->scale(), 1572 address->disp(), T_DOUBLE); 1573 // Transfer the value atomically by using FP moves. This means 1574 // the value has to be moved between CPU and FPU registers. It 1575 // always has to be moved through spill slot since there's no 1576 // quick way to pack the value into an SSE register. 1577 LIR_Opr temp_double = new_register(T_DOUBLE); 1578 LIR_Opr spill = new_register(T_LONG); 1579 set_vreg_flag(spill, must_start_in_memory); 1580 __ move(value, spill); 1581 __ volatile_move(spill, temp_double, T_LONG); 1582 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1583 } else { 1584 __ store(value, address, info); 1585 } 1586 } 1587 1588 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1589 CodeEmitInfo* info) { 1590 if (address->type() == T_LONG) { 1591 address = new LIR_Address(address->base(), 1592 address->index(), address->scale(), 1593 address->disp(), T_DOUBLE); 1594 // Transfer the value atomically by using FP moves. This means 1595 // the value has to be moved between CPU and FPU registers. In 1596 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1597 // SSE2+ mode it can be moved directly. 1598 LIR_Opr temp_double = new_register(T_DOUBLE); 1599 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1600 __ volatile_move(temp_double, result, T_LONG); 1601 #ifndef _LP64 1602 if (UseSSE < 2) { 1603 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible 1604 set_vreg_flag(result, must_start_in_memory); 1605 } 1606 #endif // !LP64 1607 } else { 1608 __ load(address, result, info); 1609 } 1610 }