1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_Compilation.hpp" 26 #include "c1/c1_FrameMap.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_LIRGenerator.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArray.hpp" 33 #include "ci/ciInlineKlass.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "gc/shared/c1/barrierSetC1.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 #include "vmreg_x86.inline.hpp" 41 42 #ifdef ASSERT 43 #define __ gen()->lir(__FILE__, __LINE__)-> 44 #else 45 #define __ gen()->lir()-> 46 #endif 47 48 // Item will be loaded into a byte register; Intel only 49 void LIRItem::load_byte_item() { 50 load_item(); 51 LIR_Opr res = result(); 52 53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 54 // make sure that it is a byte register 55 assert(!value()->type()->is_float() && !value()->type()->is_double(), 56 "can't load floats in byte register"); 57 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 58 __ move(res, reg); 59 60 _result = reg; 61 } 62 } 63 64 65 void LIRItem::load_nonconstant() { 66 LIR_Opr r = value()->operand(); 67 if (r->is_constant()) { 68 _result = r; 69 } else { 70 load_item(); 71 } 72 } 73 74 //-------------------------------------------------------------- 75 // LIRGenerator 76 //-------------------------------------------------------------- 77 78 79 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 80 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 81 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 83 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 84 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 85 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 86 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 87 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 88 89 90 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 91 LIR_Opr opr; 92 switch (type->tag()) { 93 case intTag: opr = FrameMap::rax_opr; break; 94 case objectTag: opr = FrameMap::rax_oop_opr; break; 95 case longTag: opr = FrameMap::long0_opr; break; 96 case floatTag: opr = FrameMap::xmm0_float_opr; break; 97 case doubleTag: opr = FrameMap::xmm0_double_opr; break; 98 case addressTag: 99 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 100 } 101 102 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 103 return opr; 104 } 105 106 107 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 108 LIR_Opr reg = new_register(T_INT); 109 set_vreg_flag(reg, LIRGenerator::byte_reg); 110 return reg; 111 } 112 113 114 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) { 115 // We just need one 32-bit temp register for x86/x64, to check whether both 116 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck(). 117 // @temp = %r10d 118 // mov $0x405, %r10d 119 // and (%left), %r10d /* if need to check left */ 120 // and (%right), %r10d /* if need to check right */ 121 // cmp $0x405, $r10d 122 // jne L_oops_not_equal 123 tmp1 = new_register(T_INT); 124 tmp2 = LIR_OprFact::illegalOpr; 125 } 126 127 //--------- loading items into registers -------------------------------- 128 129 130 // i486 instructions can inline constants 131 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 132 if (type == T_SHORT || type == T_CHAR) { 133 return false; 134 } 135 Constant* c = v->as_Constant(); 136 if (c && c->state_before() == nullptr) { 137 // constants of any type can be stored directly, except for 138 // unloaded object constants. 139 return true; 140 } 141 return false; 142 } 143 144 145 bool LIRGenerator::can_inline_as_constant(Value v) const { 146 if (v->type()->tag() == longTag) return false; 147 return v->type()->tag() != objectTag || 148 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 149 } 150 151 152 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 153 if (c->type() == T_LONG) return false; 154 return c->type() != T_OBJECT || c->as_jobject() == nullptr; 155 } 156 157 158 LIR_Opr LIRGenerator::safepoint_poll_register() { 159 return LIR_OprFact::illegalOpr; 160 } 161 162 163 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 164 int shift, int disp, BasicType type) { 165 assert(base->is_register(), "must be"); 166 if (index->is_constant()) { 167 LIR_Const *constant = index->as_constant_ptr(); 168 jlong c; 169 if (constant->type() == T_INT) { 170 c = (jlong(index->as_jint()) << shift) + disp; 171 } else { 172 assert(constant->type() == T_LONG, "should be"); 173 c = (index->as_jlong() << shift) + disp; 174 } 175 if ((jlong)((jint)c) == c) { 176 return new LIR_Address(base, (jint)c, type); 177 } else { 178 LIR_Opr tmp = new_register(T_LONG); 179 __ move(index, tmp); 180 return new LIR_Address(base, tmp, type); 181 } 182 } else { 183 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 184 } 185 } 186 187 188 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 189 BasicType type) { 190 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 191 192 LIR_Address* addr; 193 if (index_opr->is_constant()) { 194 int elem_size = type2aelembytes(type); 195 jint index = index_opr->as_jint(); 196 jlong disp = offset_in_bytes + (jlong)(index) * elem_size; 197 if (disp > max_jint) { 198 // Displacement overflow. Cannot directly use instruction with 32-bit displacement for 64-bit addresses. 199 // Convert array index to long to do array offset computation with 64-bit values. 200 index_opr = new_register(T_LONG); 201 __ move(LIR_OprFact::longConst(index), index_opr); 202 addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type); 203 } else { 204 addr = new LIR_Address(array_opr, (intx)disp, type); 205 } 206 } else { 207 if (index_opr->type() == T_INT) { 208 LIR_Opr tmp = new_register(T_LONG); 209 __ convert(Bytecodes::_i2l, index_opr, tmp); 210 index_opr = tmp; 211 } 212 addr = new LIR_Address(array_opr, 213 index_opr, 214 LIR_Address::scale(type), 215 offset_in_bytes, type); 216 } 217 return addr; 218 } 219 220 221 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) { 222 LIR_Opr r; 223 if (type == T_LONG) { 224 r = LIR_OprFact::longConst(x); 225 } else if (type == T_INT) { 226 r = LIR_OprFact::intConst(checked_cast<jint>(x)); 227 } else { 228 ShouldNotReachHere(); 229 } 230 return r; 231 } 232 233 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 234 LIR_Opr pointer = new_pointer_register(); 235 __ move(LIR_OprFact::intptrConst(counter), pointer); 236 LIR_Address* addr = new LIR_Address(pointer, type); 237 increment_counter(addr, step); 238 } 239 240 241 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 242 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 243 } 244 245 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 246 __ cmp_mem_int(condition, base, disp, c, info); 247 } 248 249 250 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 251 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 252 } 253 254 255 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { 256 if (tmp->is_valid() && c > 0 && c < max_jint) { 257 if (is_power_of_2(c + 1)) { 258 __ move(left, tmp); 259 __ shift_left(left, log2i_exact(c + 1), left); 260 __ sub(left, tmp, result); 261 return true; 262 } else if (is_power_of_2(c - 1)) { 263 __ move(left, tmp); 264 __ shift_left(left, log2i_exact(c - 1), left); 265 __ add(left, tmp, result); 266 return true; 267 } 268 } 269 return false; 270 } 271 272 273 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 274 BasicType type = item->type(); 275 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 276 } 277 278 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 279 LIR_Opr tmp1 = new_register(objectType); 280 LIR_Opr tmp2 = new_register(objectType); 281 LIR_Opr tmp3 = new_register(objectType); 282 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 283 } 284 285 //---------------------------------------------------------------------- 286 // visitor functions 287 //---------------------------------------------------------------------- 288 289 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 290 assert(x->is_pinned(),""); 291 LIRItem obj(x->obj(), this); 292 obj.load_item(); 293 294 set_no_result(x); 295 296 // "lock" stores the address of the monitor stack slot, so this is not an oop 297 LIR_Opr lock = new_register(T_INT); 298 // Need a scratch register for inline types on x86 299 LIR_Opr scratch = LIR_OprFact::illegalOpr; 300 if ((LockingMode == LM_LIGHTWEIGHT) || 301 (EnableValhalla && x->maybe_inlinetype())) { 302 scratch = new_register(T_ADDRESS); 303 } 304 305 CodeEmitInfo* info_for_exception = nullptr; 306 if (x->needs_null_check()) { 307 info_for_exception = state_for(x); 308 } 309 310 CodeStub* throw_ie_stub = x->maybe_inlinetype() ? 311 new SimpleExceptionStub(C1StubId::throw_identity_exception_id, 312 obj.result(), state_for(x)) 313 : nullptr; 314 315 // this CodeEmitInfo must not have the xhandlers because here the 316 // object is already locked (xhandlers expect object to be unlocked) 317 CodeEmitInfo* info = state_for(x, x->state(), true); 318 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 319 x->monitor_no(), info_for_exception, info, throw_ie_stub); 320 } 321 322 323 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 324 assert(x->is_pinned(),""); 325 326 LIRItem obj(x->obj(), this); 327 obj.dont_load_item(); 328 329 LIR_Opr lock = new_register(T_INT); 330 LIR_Opr obj_temp = new_register(T_INT); 331 set_no_result(x); 332 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 333 } 334 335 // _ineg, _lneg, _fneg, _dneg 336 void LIRGenerator::do_NegateOp(NegateOp* x) { 337 LIRItem value(x->x(), this); 338 value.set_destroys_register(); 339 value.load_item(); 340 LIR_Opr reg = rlock(x); 341 342 __ negate(value.result(), reg); 343 344 set_result(x, reg); 345 } 346 347 // for _fadd, _fmul, _fsub, _fdiv, _frem 348 // _dadd, _dmul, _dsub, _ddiv, _drem 349 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 350 LIRItem left(x->x(), this); 351 LIRItem right(x->y(), this); 352 LIRItem* left_arg = &left; 353 LIRItem* right_arg = &right; 354 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 355 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 356 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 357 left.load_item(); 358 } else { 359 left.dont_load_item(); 360 } 361 362 if (must_load_both) { 363 // frem and drem destroy also right operand, so move it to a new register 364 right.set_destroys_register(); 365 right.load_item(); 366 } else if (right.is_register()) { 367 right.load_item(); 368 } else { 369 right.dont_load_item(); 370 } 371 LIR_Opr reg = rlock(x); 372 LIR_Opr tmp = LIR_OprFact::illegalOpr; 373 if (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv) { 374 tmp = new_register(T_DOUBLE); 375 } 376 377 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 378 // frem and drem are implemented as a direct call into the runtime. 379 LIRItem left(x->x(), this); 380 LIRItem right(x->y(), this); 381 382 BasicType bt = as_BasicType(x->type()); 383 BasicTypeList signature(2); 384 signature.append(bt); 385 signature.append(bt); 386 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 387 388 const LIR_Opr result_reg = result_register_for(x->type()); 389 left.load_item_force(cc->at(0)); 390 right.load_item_force(cc->at(1)); 391 392 address entry = nullptr; 393 switch (x->op()) { 394 case Bytecodes::_frem: 395 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 396 break; 397 case Bytecodes::_drem: 398 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 399 break; 400 default: 401 ShouldNotReachHere(); 402 } 403 404 LIR_Opr result = rlock_result(x); 405 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 406 __ move(result_reg, result); 407 } else { 408 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); 409 set_result(x, reg); 410 } 411 } 412 413 414 // for _ladd, _lmul, _lsub, _ldiv, _lrem 415 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 416 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 417 // long division is implemented as a direct call into the runtime 418 LIRItem left(x->x(), this); 419 LIRItem right(x->y(), this); 420 421 // the check for division by zero destroys the right operand 422 right.set_destroys_register(); 423 424 BasicTypeList signature(2); 425 signature.append(T_LONG); 426 signature.append(T_LONG); 427 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 428 429 // check for division by zero (destroys registers of right operand!) 430 CodeEmitInfo* info = state_for(x); 431 432 const LIR_Opr result_reg = result_register_for(x->type()); 433 left.load_item_force(cc->at(1)); 434 right.load_item(); 435 436 __ move(right.result(), cc->at(0)); 437 438 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 439 __ branch(lir_cond_equal, new DivByZeroStub(info)); 440 441 address entry = nullptr; 442 switch (x->op()) { 443 case Bytecodes::_lrem: 444 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 445 break; // check if dividend is 0 is done elsewhere 446 case Bytecodes::_ldiv: 447 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 448 break; // check if dividend is 0 is done elsewhere 449 default: 450 ShouldNotReachHere(); 451 } 452 453 LIR_Opr result = rlock_result(x); 454 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 455 __ move(result_reg, result); 456 } else if (x->op() == Bytecodes::_lmul) { 457 // missing test if instr is commutative and if we should swap 458 LIRItem left(x->x(), this); 459 LIRItem right(x->y(), this); 460 461 // right register is destroyed by the long mul, so it must be 462 // copied to a new register. 463 right.set_destroys_register(); 464 465 left.load_item(); 466 right.load_item(); 467 468 LIR_Opr reg = FrameMap::long0_opr; 469 arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr); 470 LIR_Opr result = rlock_result(x); 471 __ move(reg, result); 472 } else { 473 // missing test if instr is commutative and if we should swap 474 LIRItem left(x->x(), this); 475 LIRItem right(x->y(), this); 476 477 left.load_item(); 478 // don't load constants to save register 479 right.load_nonconstant(); 480 rlock_result(x); 481 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr); 482 } 483 } 484 485 486 487 // for: _iadd, _imul, _isub, _idiv, _irem 488 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 489 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 490 // The requirements for division and modulo 491 // input : rax,: dividend min_int 492 // reg: divisor (may not be rax,/rdx) -1 493 // 494 // output: rax,: quotient (= rax, idiv reg) min_int 495 // rdx: remainder (= rax, irem reg) 0 496 497 // rax, and rdx will be destroyed 498 499 // Note: does this invalidate the spec ??? 500 LIRItem right(x->y(), this); 501 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 502 503 // call state_for before load_item_force because state_for may 504 // force the evaluation of other instructions that are needed for 505 // correct debug info. Otherwise the live range of the fix 506 // register might be too long. 507 CodeEmitInfo* info = state_for(x); 508 509 left.load_item_force(divInOpr()); 510 511 right.load_item(); 512 513 LIR_Opr result = rlock_result(x); 514 LIR_Opr result_reg; 515 if (x->op() == Bytecodes::_idiv) { 516 result_reg = divOutOpr(); 517 } else { 518 result_reg = remOutOpr(); 519 } 520 521 if (!ImplicitDiv0Checks) { 522 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 523 __ branch(lir_cond_equal, new DivByZeroStub(info)); 524 // Idiv/irem cannot trap (passing info would generate an assertion). 525 info = nullptr; 526 } 527 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 528 if (x->op() == Bytecodes::_irem) { 529 __ irem(left.result(), right.result(), result_reg, tmp, info); 530 } else if (x->op() == Bytecodes::_idiv) { 531 __ idiv(left.result(), right.result(), result_reg, tmp, info); 532 } else { 533 ShouldNotReachHere(); 534 } 535 536 __ move(result_reg, result); 537 } else { 538 // missing test if instr is commutative and if we should swap 539 LIRItem left(x->x(), this); 540 LIRItem right(x->y(), this); 541 LIRItem* left_arg = &left; 542 LIRItem* right_arg = &right; 543 if (x->is_commutative() && left.is_stack() && right.is_register()) { 544 // swap them if left is real stack (or cached) and right is real register(not cached) 545 left_arg = &right; 546 right_arg = &left; 547 } 548 549 left_arg->load_item(); 550 551 // do not need to load right, as we can handle stack and constants 552 if (x->op() == Bytecodes::_imul ) { 553 // check if we can use shift instead 554 bool use_constant = false; 555 bool use_tmp = false; 556 if (right_arg->is_constant()) { 557 jint iconst = right_arg->get_jint_constant(); 558 if (iconst > 0 && iconst < max_jint) { 559 if (is_power_of_2(iconst)) { 560 use_constant = true; 561 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 562 use_constant = true; 563 use_tmp = true; 564 } 565 } 566 } 567 if (use_constant) { 568 right_arg->dont_load_item(); 569 } else { 570 right_arg->load_item(); 571 } 572 LIR_Opr tmp = LIR_OprFact::illegalOpr; 573 if (use_tmp) { 574 tmp = new_register(T_INT); 575 } 576 rlock_result(x); 577 578 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 579 } else { 580 right_arg->dont_load_item(); 581 rlock_result(x); 582 LIR_Opr tmp = LIR_OprFact::illegalOpr; 583 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 584 } 585 } 586 } 587 588 589 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 590 // when an operand with use count 1 is the left operand, then it is 591 // likely that no move for 2-operand-LIR-form is necessary 592 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 593 x->swap_operands(); 594 } 595 596 ValueTag tag = x->type()->tag(); 597 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 598 switch (tag) { 599 case floatTag: 600 case doubleTag: do_ArithmeticOp_FPU(x); return; 601 case longTag: do_ArithmeticOp_Long(x); return; 602 case intTag: do_ArithmeticOp_Int(x); return; 603 default: ShouldNotReachHere(); return; 604 } 605 } 606 607 608 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 609 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 610 // count must always be in rcx 611 LIRItem value(x->x(), this); 612 LIRItem count(x->y(), this); 613 614 ValueTag elemType = x->type()->tag(); 615 bool must_load_count = !count.is_constant() || elemType == longTag; 616 if (must_load_count) { 617 // count for long must be in register 618 count.load_item_force(shiftCountOpr()); 619 } else { 620 count.dont_load_item(); 621 } 622 value.load_item(); 623 LIR_Opr reg = rlock_result(x); 624 625 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 626 } 627 628 629 // _iand, _land, _ior, _lor, _ixor, _lxor 630 void LIRGenerator::do_LogicOp(LogicOp* x) { 631 // when an operand with use count 1 is the left operand, then it is 632 // likely that no move for 2-operand-LIR-form is necessary 633 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 634 x->swap_operands(); 635 } 636 637 LIRItem left(x->x(), this); 638 LIRItem right(x->y(), this); 639 640 left.load_item(); 641 right.load_nonconstant(); 642 LIR_Opr reg = rlock_result(x); 643 644 logic_op(x->op(), reg, left.result(), right.result()); 645 } 646 647 648 649 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 650 void LIRGenerator::do_CompareOp(CompareOp* x) { 651 LIRItem left(x->x(), this); 652 LIRItem right(x->y(), this); 653 ValueTag tag = x->x()->type()->tag(); 654 if (tag == longTag) { 655 left.set_destroys_register(); 656 } 657 left.load_item(); 658 right.load_item(); 659 LIR_Opr reg = rlock_result(x); 660 661 if (x->x()->type()->is_float_kind()) { 662 Bytecodes::Code code = x->op(); 663 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 664 } else if (x->x()->type()->tag() == longTag) { 665 __ lcmp2int(left.result(), right.result(), reg); 666 } else { 667 Unimplemented(); 668 } 669 } 670 671 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 672 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 673 if (is_reference_type(type)) { 674 cmp_value.load_item_force(FrameMap::rax_oop_opr); 675 new_value.load_item(); 676 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 677 } else if (type == T_INT) { 678 cmp_value.load_item_force(FrameMap::rax_opr); 679 new_value.load_item(); 680 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 681 } else if (type == T_LONG) { 682 cmp_value.load_item_force(FrameMap::long0_opr); 683 new_value.load_item_force(FrameMap::long1_opr); 684 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 685 } else { 686 Unimplemented(); 687 } 688 LIR_Opr result = new_register(T_INT); 689 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 690 result, T_INT); 691 return result; 692 } 693 694 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 695 bool is_oop = is_reference_type(type); 696 LIR_Opr result = new_register(type); 697 value.load_item(); 698 // Because we want a 2-arg form of xchg and xadd 699 __ move(value.result(), result); 700 assert(type == T_INT || is_oop || type == T_LONG, "unexpected type"); 701 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 702 return result; 703 } 704 705 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 706 LIR_Opr result = new_register(type); 707 value.load_item(); 708 // Because we want a 2-arg form of xchg and xadd 709 __ move(value.result(), result); 710 assert(type == T_INT || type == T_LONG, "unexpected type"); 711 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 712 return result; 713 } 714 715 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 716 assert(x->number_of_arguments() == 3, "wrong type"); 717 assert(UseFMA, "Needs FMA instructions support."); 718 LIRItem value(x->argument_at(0), this); 719 LIRItem value1(x->argument_at(1), this); 720 LIRItem value2(x->argument_at(2), this); 721 722 value2.set_destroys_register(); 723 724 value.load_item(); 725 value1.load_item(); 726 value2.load_item(); 727 728 LIR_Opr calc_input = value.result(); 729 LIR_Opr calc_input1 = value1.result(); 730 LIR_Opr calc_input2 = value2.result(); 731 LIR_Opr calc_result = rlock_result(x); 732 733 switch (x->id()) { 734 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 735 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 736 default: ShouldNotReachHere(); 737 } 738 739 } 740 741 742 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 743 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 744 745 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 746 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 747 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 748 x->id() == vmIntrinsics::_dlog10 || x->id() == vmIntrinsics::_dtanh 749 ) { 750 do_LibmIntrinsic(x); 751 return; 752 } 753 754 LIRItem value(x->argument_at(0), this); 755 756 value.load_item(); 757 758 LIR_Opr calc_input = value.result(); 759 LIR_Opr calc_result = rlock_result(x); 760 761 LIR_Opr tmp = LIR_OprFact::illegalOpr; 762 if (x->id() == vmIntrinsics::_floatToFloat16) { 763 tmp = new_register(T_FLOAT); 764 } 765 766 switch(x->id()) { 767 case vmIntrinsics::_dabs: 768 __ abs(calc_input, calc_result, tmp); 769 break; 770 case vmIntrinsics::_dsqrt: 771 case vmIntrinsics::_dsqrt_strict: 772 __ sqrt(calc_input, calc_result, LIR_OprFact::illegalOpr); 773 break; 774 case vmIntrinsics::_floatToFloat16: 775 __ f2hf(calc_input, calc_result, tmp); 776 break; 777 case vmIntrinsics::_float16ToFloat: 778 __ hf2f(calc_input, calc_result, LIR_OprFact::illegalOpr); 779 break; 780 default: 781 ShouldNotReachHere(); 782 } 783 } 784 785 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 786 LIRItem value(x->argument_at(0), this); 787 value.set_destroys_register(); 788 789 LIR_Opr calc_result = rlock_result(x); 790 LIR_Opr result_reg = result_register_for(x->type()); 791 792 CallingConvention* cc = nullptr; 793 794 if (x->id() == vmIntrinsics::_dpow) { 795 LIRItem value1(x->argument_at(1), this); 796 797 value1.set_destroys_register(); 798 799 BasicTypeList signature(2); 800 signature.append(T_DOUBLE); 801 signature.append(T_DOUBLE); 802 cc = frame_map()->c_calling_convention(&signature); 803 value.load_item_force(cc->at(0)); 804 value1.load_item_force(cc->at(1)); 805 } else { 806 BasicTypeList signature(1); 807 signature.append(T_DOUBLE); 808 cc = frame_map()->c_calling_convention(&signature); 809 value.load_item_force(cc->at(0)); 810 } 811 812 switch (x->id()) { 813 case vmIntrinsics::_dexp: 814 if (StubRoutines::dexp() != nullptr) { 815 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 816 } else { 817 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 818 } 819 break; 820 case vmIntrinsics::_dlog: 821 if (StubRoutines::dlog() != nullptr) { 822 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 823 } else { 824 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 825 } 826 break; 827 case vmIntrinsics::_dlog10: 828 if (StubRoutines::dlog10() != nullptr) { 829 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 830 } else { 831 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 832 } 833 break; 834 case vmIntrinsics::_dpow: 835 if (StubRoutines::dpow() != nullptr) { 836 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 837 } else { 838 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 839 } 840 break; 841 case vmIntrinsics::_dsin: 842 if (StubRoutines::dsin() != nullptr) { 843 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 844 } else { 845 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 846 } 847 break; 848 case vmIntrinsics::_dcos: 849 if (StubRoutines::dcos() != nullptr) { 850 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 851 } else { 852 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 853 } 854 break; 855 case vmIntrinsics::_dtan: 856 if (StubRoutines::dtan() != nullptr) { 857 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 858 } else { 859 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 860 } 861 break; 862 case vmIntrinsics::_dtanh: 863 assert(StubRoutines::dtanh() != nullptr, "tanh intrinsic not found"); 864 if (StubRoutines::dtanh() != nullptr) { 865 __ call_runtime_leaf(StubRoutines::dtanh(), getThreadTemp(), result_reg, cc->args()); 866 } 867 break; 868 default: ShouldNotReachHere(); 869 } 870 871 __ move(result_reg, calc_result); 872 } 873 874 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 875 assert(x->number_of_arguments() == 5, "wrong type"); 876 877 // Make all state_for calls early since they can emit code 878 CodeEmitInfo* info = nullptr; 879 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 880 info = state_for(x, x->state_before()); 881 info->set_force_reexecute(); 882 } else { 883 info = state_for(x, x->state()); 884 } 885 886 LIRItem src(x->argument_at(0), this); 887 LIRItem src_pos(x->argument_at(1), this); 888 LIRItem dst(x->argument_at(2), this); 889 LIRItem dst_pos(x->argument_at(3), this); 890 LIRItem length(x->argument_at(4), this); 891 892 // operands for arraycopy must use fixed registers, otherwise 893 // LinearScan will fail allocation (because arraycopy always needs a 894 // call) 895 896 int flags; 897 ciArrayKlass* expected_type; 898 arraycopy_helper(x, &flags, &expected_type); 899 if (x->check_flag(Instruction::OmitChecksFlag)) { 900 flags = 0; 901 } 902 903 // The java calling convention will give us enough registers 904 // so that on the stub side the args will be perfect already. 905 // On the other slow/special case side we call C and the arg 906 // positions are not similar enough to pick one as the best. 907 // Also because the java calling convention is a "shifted" version 908 // of the C convention we can process the java args trivially into C 909 // args without worry of overwriting during the xfer 910 911 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 912 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 913 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 914 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 915 length.load_item_force (FrameMap::as_opr(j_rarg4)); 916 917 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 918 919 set_no_result(x); 920 921 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 922 } 923 924 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 925 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support"); 926 // Make all state_for calls early since they can emit code 927 LIR_Opr result = rlock_result(x); 928 int flags = 0; 929 switch (x->id()) { 930 case vmIntrinsics::_updateCRC32: { 931 LIRItem crc(x->argument_at(0), this); 932 LIRItem val(x->argument_at(1), this); 933 // val is destroyed by update_crc32 934 val.set_destroys_register(); 935 crc.load_item(); 936 val.load_item(); 937 __ update_crc32(crc.result(), val.result(), result); 938 break; 939 } 940 case vmIntrinsics::_updateBytesCRC32: 941 case vmIntrinsics::_updateByteBufferCRC32: { 942 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 943 944 LIRItem crc(x->argument_at(0), this); 945 LIRItem buf(x->argument_at(1), this); 946 LIRItem off(x->argument_at(2), this); 947 LIRItem len(x->argument_at(3), this); 948 buf.load_item(); 949 off.load_nonconstant(); 950 951 LIR_Opr index = off.result(); 952 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 953 if(off.result()->is_constant()) { 954 index = LIR_OprFact::illegalOpr; 955 offset += off.result()->as_jint(); 956 } 957 LIR_Opr base_op = buf.result(); 958 959 if (index->is_valid()) { 960 LIR_Opr tmp = new_register(T_LONG); 961 __ convert(Bytecodes::_i2l, index, tmp); 962 index = tmp; 963 } 964 965 LIR_Address* a = new LIR_Address(base_op, 966 index, 967 offset, 968 T_BYTE); 969 BasicTypeList signature(3); 970 signature.append(T_INT); 971 signature.append(T_ADDRESS); 972 signature.append(T_INT); 973 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 974 const LIR_Opr result_reg = result_register_for(x->type()); 975 976 LIR_Opr addr = new_pointer_register(); 977 __ leal(LIR_OprFact::address(a), addr); 978 979 crc.load_item_force(cc->at(0)); 980 __ move(addr, cc->at(1)); 981 len.load_item_force(cc->at(2)); 982 983 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 984 __ move(result_reg, result); 985 986 break; 987 } 988 default: { 989 ShouldNotReachHere(); 990 } 991 } 992 } 993 994 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 995 assert(UseCRC32CIntrinsics, "need AVX and CLMUL instructions support"); 996 LIR_Opr result = rlock_result(x); 997 998 switch (x->id()) { 999 case vmIntrinsics::_updateBytesCRC32C: 1000 case vmIntrinsics::_updateDirectByteBufferCRC32C: { 1001 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); 1002 1003 LIRItem crc(x->argument_at(0), this); 1004 LIRItem buf(x->argument_at(1), this); 1005 LIRItem off(x->argument_at(2), this); 1006 LIRItem end(x->argument_at(3), this); 1007 buf.load_item(); 1008 off.load_nonconstant(); 1009 end.load_nonconstant(); 1010 1011 // len = end - off 1012 LIR_Opr len = end.result(); 1013 LIR_Opr tmpA = new_register(T_INT); 1014 LIR_Opr tmpB = new_register(T_INT); 1015 __ move(end.result(), tmpA); 1016 __ move(off.result(), tmpB); 1017 __ sub(tmpA, tmpB, tmpA); 1018 len = tmpA; 1019 1020 LIR_Opr index = off.result(); 1021 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1022 if (off.result()->is_constant()) { 1023 index = LIR_OprFact::illegalOpr; 1024 offset += off.result()->as_jint(); 1025 } 1026 LIR_Opr base_op = buf.result(); 1027 LIR_Address* a = nullptr; 1028 1029 if (index->is_valid()) { 1030 LIR_Opr tmp = new_register(T_LONG); 1031 __ convert(Bytecodes::_i2l, index, tmp); 1032 index = tmp; 1033 a = new LIR_Address(base_op, index, offset, T_BYTE); 1034 } else { 1035 a = new LIR_Address(base_op, offset, T_BYTE); 1036 } 1037 1038 BasicTypeList signature(3); 1039 signature.append(T_INT); 1040 signature.append(T_ADDRESS); 1041 signature.append(T_INT); 1042 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1043 const LIR_Opr result_reg = result_register_for(x->type()); 1044 1045 LIR_Opr arg1 = cc->at(0), 1046 arg2 = cc->at(1), 1047 arg3 = cc->at(2); 1048 1049 crc.load_item_force(arg1); 1050 __ leal(LIR_OprFact::address(a), arg2); 1051 __ move(len, arg3); 1052 1053 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); 1054 __ move(result_reg, result); 1055 break; 1056 } 1057 default: { 1058 ShouldNotReachHere(); 1059 } 1060 } 1061 } 1062 1063 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1064 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1065 1066 // Make all state_for calls early since they can emit code 1067 LIR_Opr result = rlock_result(x); 1068 1069 LIRItem a(x->argument_at(0), this); // Object 1070 LIRItem aOffset(x->argument_at(1), this); // long 1071 LIRItem b(x->argument_at(2), this); // Object 1072 LIRItem bOffset(x->argument_at(3), this); // long 1073 LIRItem length(x->argument_at(4), this); // int 1074 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1075 1076 a.load_item(); 1077 aOffset.load_nonconstant(); 1078 b.load_item(); 1079 bOffset.load_nonconstant(); 1080 1081 long constant_aOffset = 0; 1082 LIR_Opr result_aOffset = aOffset.result(); 1083 if (result_aOffset->is_constant()) { 1084 constant_aOffset = result_aOffset->as_jlong(); 1085 result_aOffset = LIR_OprFact::illegalOpr; 1086 } 1087 LIR_Opr result_a = a.result(); 1088 1089 long constant_bOffset = 0; 1090 LIR_Opr result_bOffset = bOffset.result(); 1091 if (result_bOffset->is_constant()) { 1092 constant_bOffset = result_bOffset->as_jlong(); 1093 result_bOffset = LIR_OprFact::illegalOpr; 1094 } 1095 LIR_Opr result_b = b.result(); 1096 1097 LIR_Address* addr_a = new LIR_Address(result_a, 1098 result_aOffset, 1099 constant_aOffset, 1100 T_BYTE); 1101 1102 LIR_Address* addr_b = new LIR_Address(result_b, 1103 result_bOffset, 1104 constant_bOffset, 1105 T_BYTE); 1106 1107 BasicTypeList signature(4); 1108 signature.append(T_ADDRESS); 1109 signature.append(T_ADDRESS); 1110 signature.append(T_INT); 1111 signature.append(T_INT); 1112 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1113 const LIR_Opr result_reg = result_register_for(x->type()); 1114 1115 LIR_Opr ptr_addr_a = new_pointer_register(); 1116 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1117 1118 LIR_Opr ptr_addr_b = new_pointer_register(); 1119 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1120 1121 __ move(ptr_addr_a, cc->at(0)); 1122 __ move(ptr_addr_b, cc->at(1)); 1123 length.load_item_force(cc->at(2)); 1124 log2ArrayIndexScale.load_item_force(cc->at(3)); 1125 1126 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1127 __ move(result_reg, result); 1128 } 1129 1130 void LIRGenerator::do_Convert(Convert* x) { 1131 LIRItem value(x->value(), this); 1132 value.load_item(); 1133 LIR_Opr input = value.result(); 1134 LIR_Opr result = rlock(x); 1135 __ convert(x->op(), input, result); 1136 assert(result->is_virtual(), "result must be virtual register"); 1137 set_result(x, result); 1138 } 1139 1140 1141 void LIRGenerator::do_NewInstance(NewInstance* x) { 1142 print_if_not_loaded(x); 1143 1144 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state()); 1145 LIR_Opr reg = result_register_for(x->type()); 1146 new_instance(reg, x->klass(), x->is_unresolved(), 1147 !x->is_unresolved() && x->klass()->is_inlinetype(), 1148 FrameMap::rcx_oop_opr, 1149 FrameMap::rdi_oop_opr, 1150 FrameMap::rsi_oop_opr, 1151 LIR_OprFact::illegalOpr, 1152 FrameMap::rdx_metadata_opr, info); 1153 LIR_Opr result = rlock_result(x); 1154 __ move(reg, result); 1155 } 1156 1157 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1158 CodeEmitInfo* info = nullptr; 1159 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 1160 info = state_for(x, x->state_before()); 1161 info->set_force_reexecute(); 1162 } else { 1163 info = state_for(x, x->state()); 1164 } 1165 1166 LIRItem length(x->length(), this); 1167 length.load_item_force(FrameMap::rbx_opr); 1168 1169 LIR_Opr reg = result_register_for(x->type()); 1170 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1171 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1172 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1173 LIR_Opr tmp4 = reg; 1174 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1175 LIR_Opr len = length.result(); 1176 BasicType elem_type = x->elt_type(); 1177 1178 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1179 1180 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1181 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); 1182 1183 LIR_Opr result = rlock_result(x); 1184 __ move(reg, result); 1185 } 1186 1187 1188 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1189 LIRItem length(x->length(), this); 1190 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1191 // and therefore provide the state before the parameters have been consumed 1192 CodeEmitInfo* patching_info = nullptr; 1193 if (!x->klass()->is_loaded() || PatchALot) { 1194 patching_info = state_for(x, x->state_before()); 1195 } 1196 1197 CodeEmitInfo* info = state_for(x, x->state()); 1198 1199 const LIR_Opr reg = result_register_for(x->type()); 1200 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1201 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1202 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1203 LIR_Opr tmp4 = reg; 1204 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1205 1206 length.load_item_force(FrameMap::rbx_opr); 1207 LIR_Opr len = length.result(); 1208 1209 ciKlass* obj = (ciKlass*) x->exact_type(); 1210 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, x->is_null_free()); 1211 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1212 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1213 } 1214 klass2reg_with_patching(klass_reg, obj, patching_info); 1215 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, x->is_null_free()); 1216 1217 LIR_Opr result = rlock_result(x); 1218 __ move(reg, result); 1219 } 1220 1221 1222 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1223 Values* dims = x->dims(); 1224 int i = dims->length(); 1225 LIRItemList* items = new LIRItemList(i, i, nullptr); 1226 while (i-- > 0) { 1227 LIRItem* size = new LIRItem(dims->at(i), this); 1228 items->at_put(i, size); 1229 } 1230 1231 // Evaluate state_for early since it may emit code. 1232 CodeEmitInfo* patching_info = nullptr; 1233 if (!x->klass()->is_loaded() || PatchALot) { 1234 patching_info = state_for(x, x->state_before()); 1235 1236 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1237 // clone all handlers (NOTE: Usually this is handled transparently 1238 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1239 // is done explicitly here because a stub isn't being used). 1240 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1241 } 1242 CodeEmitInfo* info = state_for(x, x->state()); 1243 1244 i = dims->length(); 1245 while (i-- > 0) { 1246 LIRItem* size = items->at(i); 1247 size->load_nonconstant(); 1248 1249 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1250 } 1251 1252 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1253 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1254 1255 LIR_Opr rank = FrameMap::rbx_opr; 1256 __ move(LIR_OprFact::intConst(x->rank()), rank); 1257 LIR_Opr varargs = FrameMap::rcx_opr; 1258 __ move(FrameMap::rsp_opr, varargs); 1259 LIR_OprList* args = new LIR_OprList(3); 1260 args->append(klass_reg); 1261 args->append(rank); 1262 args->append(varargs); 1263 LIR_Opr reg = result_register_for(x->type()); 1264 __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), 1265 LIR_OprFact::illegalOpr, 1266 reg, args, info); 1267 1268 LIR_Opr result = rlock_result(x); 1269 __ move(reg, result); 1270 } 1271 1272 1273 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1274 // nothing to do for now 1275 } 1276 1277 1278 void LIRGenerator::do_CheckCast(CheckCast* x) { 1279 LIRItem obj(x->obj(), this); 1280 1281 CodeEmitInfo* patching_info = nullptr; 1282 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1283 // must do this before locking the destination register as an oop register, 1284 // and before the obj is loaded (the latter is for deoptimization) 1285 patching_info = state_for(x, x->state_before()); 1286 } 1287 obj.load_item(); 1288 1289 // info for exceptions 1290 CodeEmitInfo* info_for_exception = 1291 (x->needs_exception_state() ? state_for(x) : 1292 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1293 1294 if (x->is_null_free()) { 1295 __ null_check(obj.result(), new CodeEmitInfo(info_for_exception)); 1296 } 1297 1298 CodeStub* stub; 1299 if (x->is_incompatible_class_change_check()) { 1300 assert(patching_info == nullptr, "can't patch this"); 1301 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1302 } else if (x->is_invokespecial_receiver_check()) { 1303 assert(patching_info == nullptr, "can't patch this"); 1304 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); 1305 } else { 1306 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); 1307 } 1308 LIR_Opr reg = rlock_result(x); 1309 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1310 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1311 tmp3 = new_register(objectType); 1312 } 1313 __ checkcast(reg, obj.result(), x->klass(), 1314 new_register(objectType), new_register(objectType), tmp3, 1315 x->direct_compare(), info_for_exception, patching_info, stub, 1316 x->profiled_method(), x->profiled_bci(), x->is_null_free()); 1317 } 1318 1319 1320 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1321 LIRItem obj(x->obj(), this); 1322 1323 // result and test object may not be in same register 1324 LIR_Opr reg = rlock_result(x); 1325 CodeEmitInfo* patching_info = nullptr; 1326 if ((!x->klass()->is_loaded() || PatchALot)) { 1327 // must do this before locking the destination register as an oop register 1328 patching_info = state_for(x, x->state_before()); 1329 } 1330 obj.load_item(); 1331 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1332 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1333 tmp3 = new_register(objectType); 1334 } 1335 __ instanceof(reg, obj.result(), x->klass(), 1336 new_register(objectType), new_register(objectType), tmp3, 1337 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1338 } 1339 1340 // Intrinsic for Class::isInstance 1341 address LIRGenerator::isInstance_entry() { 1342 return Runtime1::entry_for(C1StubId::is_instance_of_id); 1343 } 1344 1345 1346 void LIRGenerator::do_If(If* x) { 1347 assert(x->number_of_sux() == 2, "inconsistency"); 1348 ValueTag tag = x->x()->type()->tag(); 1349 bool is_safepoint = x->is_safepoint(); 1350 1351 If::Condition cond = x->cond(); 1352 1353 LIRItem xitem(x->x(), this); 1354 LIRItem yitem(x->y(), this); 1355 LIRItem* xin = &xitem; 1356 LIRItem* yin = &yitem; 1357 1358 if (tag == longTag) { 1359 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1360 // mirror for other conditions 1361 if (cond == If::gtr || cond == If::leq) { 1362 cond = Instruction::mirror(cond); 1363 xin = &yitem; 1364 yin = &xitem; 1365 } 1366 xin->set_destroys_register(); 1367 } 1368 xin->load_item(); 1369 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1370 // inline long zero 1371 yin->dont_load_item(); 1372 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) { 1373 // longs cannot handle constants at right side 1374 yin->load_item(); 1375 } else { 1376 yin->dont_load_item(); 1377 } 1378 1379 LIR_Opr left = xin->result(); 1380 LIR_Opr right = yin->result(); 1381 1382 set_no_result(x); 1383 1384 // add safepoint before generating condition code so it can be recomputed 1385 if (x->is_safepoint()) { 1386 // increment backedge counter if needed 1387 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1388 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1389 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1390 } 1391 1392 if (x->substitutability_check()) { 1393 substitutability_check(x, *xin, *yin); 1394 } else { 1395 __ cmp(lir_cond(cond), left, right); 1396 } 1397 // Generate branch profiling. Profiling code doesn't kill flags. 1398 profile_branch(x, cond); 1399 move_to_phi(x->state()); 1400 if (x->x()->type()->is_float_kind()) { 1401 __ branch(lir_cond(cond), x->tsux(), x->usux()); 1402 } else { 1403 __ branch(lir_cond(cond), x->tsux()); 1404 } 1405 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1406 __ jump(x->default_sux()); 1407 } 1408 1409 1410 LIR_Opr LIRGenerator::getThreadPointer() { 1411 return FrameMap::as_pointer_opr(r15_thread); 1412 } 1413 1414 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1415 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1416 LIR_OprList* args = new LIR_OprList(); 1417 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1418 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1419 } 1420 1421 1422 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1423 CodeEmitInfo* info) { 1424 if (address->type() == T_LONG) { 1425 address = new LIR_Address(address->base(), 1426 address->index(), address->scale(), 1427 address->disp(), T_DOUBLE); 1428 // Transfer the value atomically by using FP moves. This means 1429 // the value has to be moved between CPU and FPU registers. It 1430 // always has to be moved through spill slot since there's no 1431 // quick way to pack the value into an SSE register. 1432 LIR_Opr temp_double = new_register(T_DOUBLE); 1433 LIR_Opr spill = new_register(T_LONG); 1434 set_vreg_flag(spill, must_start_in_memory); 1435 __ move(value, spill); 1436 __ volatile_move(spill, temp_double, T_LONG); 1437 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1438 } else { 1439 __ store(value, address, info); 1440 } 1441 } 1442 1443 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1444 CodeEmitInfo* info) { 1445 if (address->type() == T_LONG) { 1446 address = new LIR_Address(address->base(), 1447 address->index(), address->scale(), 1448 address->disp(), T_DOUBLE); 1449 // Transfer the value atomically by using FP moves. This means 1450 // the value has to be moved between CPU and FPU registers. In 1451 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1452 // SSE2+ mode it can be moved directly. 1453 LIR_Opr temp_double = new_register(T_DOUBLE); 1454 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1455 __ volatile_move(temp_double, result, T_LONG); 1456 } else { 1457 __ load(address, result, info); 1458 } 1459 }