1 /* 2 * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "c1/c1_Compilation.hpp" 26 #include "c1/c1_FrameMap.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_LIRGenerator.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArray.hpp" 33 #include "ci/ciInlineKlass.hpp" 34 #include "ci/ciObjArrayKlass.hpp" 35 #include "ci/ciTypeArrayKlass.hpp" 36 #include "gc/shared/c1/barrierSetC1.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 #include "vmreg_x86.inline.hpp" 41 42 #ifdef ASSERT 43 #define __ gen()->lir(__FILE__, __LINE__)-> 44 #else 45 #define __ gen()->lir()-> 46 #endif 47 48 // Item will be loaded into a byte register; Intel only 49 void LIRItem::load_byte_item() { 50 load_item(); 51 LIR_Opr res = result(); 52 53 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { 54 // make sure that it is a byte register 55 assert(!value()->type()->is_float() && !value()->type()->is_double(), 56 "can't load floats in byte register"); 57 LIR_Opr reg = _gen->rlock_byte(T_BYTE); 58 __ move(res, reg); 59 60 _result = reg; 61 } 62 } 63 64 65 void LIRItem::load_nonconstant() { 66 LIR_Opr r = value()->operand(); 67 if (r->is_constant()) { 68 _result = r; 69 } else { 70 load_item(); 71 } 72 } 73 74 //-------------------------------------------------------------- 75 // LIRGenerator 76 //-------------------------------------------------------------- 77 78 79 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } 80 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } 81 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } 82 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } 83 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } 84 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } 85 LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); } 86 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } 87 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } 88 89 90 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { 91 LIR_Opr opr; 92 switch (type->tag()) { 93 case intTag: opr = FrameMap::rax_opr; break; 94 case objectTag: opr = FrameMap::rax_oop_opr; break; 95 case longTag: opr = FrameMap::long0_opr; break; 96 case floatTag: opr = FrameMap::xmm0_float_opr; break; 97 case doubleTag: opr = FrameMap::xmm0_double_opr; break; 98 case addressTag: 99 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; 100 } 101 102 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); 103 return opr; 104 } 105 106 107 LIR_Opr LIRGenerator::rlock_byte(BasicType type) { 108 LIR_Opr reg = new_register(T_INT); 109 set_vreg_flag(reg, LIRGenerator::byte_reg); 110 return reg; 111 } 112 113 114 void LIRGenerator::init_temps_for_substitutability_check(LIR_Opr& tmp1, LIR_Opr& tmp2) { 115 // We just need one 32-bit temp register for x86/x64, to check whether both 116 // oops have markWord::always_locked_pattern. See LIR_Assembler::emit_opSubstitutabilityCheck(). 117 // @temp = %r10d 118 // mov $0x405, %r10d 119 // and (%left), %r10d /* if need to check left */ 120 // and (%right), %r10d /* if need to check right */ 121 // cmp $0x405, $r10d 122 // jne L_oops_not_equal 123 tmp1 = new_register(T_INT); 124 tmp2 = LIR_OprFact::illegalOpr; 125 } 126 127 //--------- loading items into registers -------------------------------- 128 129 130 // i486 instructions can inline constants 131 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { 132 if (type == T_SHORT || type == T_CHAR) { 133 return false; 134 } 135 Constant* c = v->as_Constant(); 136 if (c && c->state_before() == nullptr) { 137 // constants of any type can be stored directly, except for 138 // unloaded object constants. 139 return true; 140 } 141 return false; 142 } 143 144 145 bool LIRGenerator::can_inline_as_constant(Value v) const { 146 if (v->type()->tag() == longTag) return false; 147 return v->type()->tag() != objectTag || 148 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); 149 } 150 151 152 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { 153 if (c->type() == T_LONG) return false; 154 return c->type() != T_OBJECT || c->as_jobject() == nullptr; 155 } 156 157 158 LIR_Opr LIRGenerator::safepoint_poll_register() { 159 return LIR_OprFact::illegalOpr; 160 } 161 162 163 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, 164 int shift, int disp, BasicType type) { 165 assert(base->is_register(), "must be"); 166 if (index->is_constant()) { 167 LIR_Const *constant = index->as_constant_ptr(); 168 jlong c; 169 if (constant->type() == T_INT) { 170 c = (jlong(index->as_jint()) << shift) + disp; 171 } else { 172 assert(constant->type() == T_LONG, "should be"); 173 c = (index->as_jlong() << shift) + disp; 174 } 175 if ((jlong)((jint)c) == c) { 176 return new LIR_Address(base, (jint)c, type); 177 } else { 178 LIR_Opr tmp = new_register(T_LONG); 179 __ move(index, tmp); 180 return new LIR_Address(base, tmp, type); 181 } 182 } else { 183 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); 184 } 185 } 186 187 188 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, 189 BasicType type) { 190 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); 191 192 LIR_Address* addr; 193 if (index_opr->is_constant()) { 194 int elem_size = type2aelembytes(type); 195 jint index = index_opr->as_jint(); 196 jlong disp = offset_in_bytes + (jlong)(index) * elem_size; 197 if (disp > max_jint) { 198 // Displacement overflow. Cannot directly use instruction with 32-bit displacement for 64-bit addresses. 199 // Convert array index to long to do array offset computation with 64-bit values. 200 index_opr = new_register(T_LONG); 201 __ move(LIR_OprFact::longConst(index), index_opr); 202 addr = new LIR_Address(array_opr, index_opr, LIR_Address::scale(type), offset_in_bytes, type); 203 } else { 204 addr = new LIR_Address(array_opr, (intx)disp, type); 205 } 206 } else { 207 if (index_opr->type() == T_INT) { 208 LIR_Opr tmp = new_register(T_LONG); 209 __ convert(Bytecodes::_i2l, index_opr, tmp); 210 index_opr = tmp; 211 } 212 addr = new LIR_Address(array_opr, 213 index_opr, 214 LIR_Address::scale(type), 215 offset_in_bytes, type); 216 } 217 return addr; 218 } 219 220 221 LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) { 222 LIR_Opr r; 223 if (type == T_LONG) { 224 r = LIR_OprFact::longConst(x); 225 } else if (type == T_INT) { 226 r = LIR_OprFact::intConst(checked_cast<jint>(x)); 227 } else { 228 ShouldNotReachHere(); 229 } 230 return r; 231 } 232 233 void LIRGenerator::increment_counter(address counter, BasicType type, int step) { 234 LIR_Opr pointer = new_pointer_register(); 235 __ move(LIR_OprFact::intptrConst(counter), pointer); 236 LIR_Address* addr = new LIR_Address(pointer, type); 237 increment_counter(addr, step); 238 } 239 240 241 void LIRGenerator::increment_counter(LIR_Address* addr, int step) { 242 __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); 243 } 244 245 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { 246 __ cmp_mem_int(condition, base, disp, c, info); 247 } 248 249 250 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { 251 __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); 252 } 253 254 255 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) { 256 if (tmp->is_valid() && c > 0 && c < max_jint) { 257 if (is_power_of_2(c + 1)) { 258 __ move(left, tmp); 259 __ shift_left(left, log2i_exact(c + 1), left); 260 __ sub(left, tmp, result); 261 return true; 262 } else if (is_power_of_2(c - 1)) { 263 __ move(left, tmp); 264 __ shift_left(left, log2i_exact(c - 1), left); 265 __ add(left, tmp, result); 266 return true; 267 } 268 } 269 return false; 270 } 271 272 273 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { 274 BasicType type = item->type(); 275 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); 276 } 277 278 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) { 279 LIR_Opr tmp1 = new_register(objectType); 280 LIR_Opr tmp2 = new_register(objectType); 281 LIR_Opr tmp3 = new_register(objectType); 282 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci); 283 } 284 285 //---------------------------------------------------------------------- 286 // visitor functions 287 //---------------------------------------------------------------------- 288 289 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { 290 assert(x->is_pinned(),""); 291 LIRItem obj(x->obj(), this); 292 obj.load_item(); 293 294 set_no_result(x); 295 296 // "lock" stores the address of the monitor stack slot, so this is not an oop 297 LIR_Opr lock = new_register(T_INT); 298 // Need a scratch register for inline types on x86 299 LIR_Opr scratch = LIR_OprFact::illegalOpr; 300 if ((LockingMode == LM_LIGHTWEIGHT) || 301 (EnableValhalla && x->maybe_inlinetype())) { 302 scratch = new_register(T_ADDRESS); 303 } 304 305 CodeEmitInfo* info_for_exception = nullptr; 306 if (x->needs_null_check()) { 307 info_for_exception = state_for(x); 308 } 309 310 CodeStub* throw_ie_stub = x->maybe_inlinetype() ? 311 new SimpleExceptionStub(C1StubId::throw_identity_exception_id, 312 obj.result(), state_for(x)) 313 : nullptr; 314 315 // this CodeEmitInfo must not have the xhandlers because here the 316 // object is already locked (xhandlers expect object to be unlocked) 317 CodeEmitInfo* info = state_for(x, x->state(), true); 318 monitor_enter(obj.result(), lock, syncTempOpr(), scratch, 319 x->monitor_no(), info_for_exception, info, throw_ie_stub); 320 } 321 322 323 void LIRGenerator::do_MonitorExit(MonitorExit* x) { 324 assert(x->is_pinned(),""); 325 326 LIRItem obj(x->obj(), this); 327 obj.dont_load_item(); 328 329 LIR_Opr lock = new_register(T_INT); 330 LIR_Opr obj_temp = new_register(T_INT); 331 set_no_result(x); 332 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); 333 } 334 335 // _ineg, _lneg, _fneg, _dneg 336 void LIRGenerator::do_NegateOp(NegateOp* x) { 337 LIRItem value(x->x(), this); 338 value.set_destroys_register(); 339 value.load_item(); 340 LIR_Opr reg = rlock(x); 341 342 __ negate(value.result(), reg); 343 344 set_result(x, reg); 345 } 346 347 // for _fadd, _fmul, _fsub, _fdiv, _frem 348 // _dadd, _dmul, _dsub, _ddiv, _drem 349 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { 350 LIRItem left(x->x(), this); 351 LIRItem right(x->y(), this); 352 LIRItem* left_arg = &left; 353 LIRItem* right_arg = &right; 354 assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); 355 bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); 356 if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { 357 left.load_item(); 358 } else { 359 left.dont_load_item(); 360 } 361 362 if (must_load_both) { 363 // frem and drem destroy also right operand, so move it to a new register 364 right.set_destroys_register(); 365 right.load_item(); 366 } else if (right.is_register()) { 367 right.load_item(); 368 } else { 369 right.dont_load_item(); 370 } 371 LIR_Opr reg = rlock(x); 372 LIR_Opr tmp = LIR_OprFact::illegalOpr; 373 if (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv) { 374 tmp = new_register(T_DOUBLE); 375 } 376 377 if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { 378 // frem and drem are implemented as a direct call into the runtime. 379 LIRItem left(x->x(), this); 380 LIRItem right(x->y(), this); 381 382 BasicType bt = as_BasicType(x->type()); 383 BasicTypeList signature(2); 384 signature.append(bt); 385 signature.append(bt); 386 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 387 388 const LIR_Opr result_reg = result_register_for(x->type()); 389 left.load_item_force(cc->at(0)); 390 right.load_item_force(cc->at(1)); 391 392 address entry = nullptr; 393 switch (x->op()) { 394 case Bytecodes::_frem: 395 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); 396 break; 397 case Bytecodes::_drem: 398 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); 399 break; 400 default: 401 ShouldNotReachHere(); 402 } 403 404 LIR_Opr result = rlock_result(x); 405 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 406 __ move(result_reg, result); 407 } else { 408 arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), tmp); 409 set_result(x, reg); 410 } 411 } 412 413 414 // for _ladd, _lmul, _lsub, _ldiv, _lrem 415 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { 416 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { 417 // long division is implemented as a direct call into the runtime 418 LIRItem left(x->x(), this); 419 LIRItem right(x->y(), this); 420 421 // the check for division by zero destroys the right operand 422 right.set_destroys_register(); 423 424 BasicTypeList signature(2); 425 signature.append(T_LONG); 426 signature.append(T_LONG); 427 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 428 429 // check for division by zero (destroys registers of right operand!) 430 CodeEmitInfo* info = state_for(x); 431 432 const LIR_Opr result_reg = result_register_for(x->type()); 433 left.load_item_force(cc->at(1)); 434 right.load_item(); 435 436 __ move(right.result(), cc->at(0)); 437 438 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); 439 __ branch(lir_cond_equal, new DivByZeroStub(info)); 440 441 address entry = nullptr; 442 switch (x->op()) { 443 case Bytecodes::_lrem: 444 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); 445 break; // check if dividend is 0 is done elsewhere 446 case Bytecodes::_ldiv: 447 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); 448 break; // check if dividend is 0 is done elsewhere 449 default: 450 ShouldNotReachHere(); 451 } 452 453 LIR_Opr result = rlock_result(x); 454 __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); 455 __ move(result_reg, result); 456 } else if (x->op() == Bytecodes::_lmul) { 457 // missing test if instr is commutative and if we should swap 458 LIRItem left(x->x(), this); 459 LIRItem right(x->y(), this); 460 461 // right register is destroyed by the long mul, so it must be 462 // copied to a new register. 463 right.set_destroys_register(); 464 465 left.load_item(); 466 right.load_item(); 467 468 LIR_Opr reg = FrameMap::long0_opr; 469 arithmetic_op_long(x->op(), reg, left.result(), right.result(), nullptr); 470 LIR_Opr result = rlock_result(x); 471 __ move(reg, result); 472 } else { 473 // missing test if instr is commutative and if we should swap 474 LIRItem left(x->x(), this); 475 LIRItem right(x->y(), this); 476 477 left.load_item(); 478 // don't load constants to save register 479 right.load_nonconstant(); 480 rlock_result(x); 481 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), nullptr); 482 } 483 } 484 485 486 487 // for: _iadd, _imul, _isub, _idiv, _irem 488 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { 489 if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { 490 // The requirements for division and modulo 491 // input : rax,: dividend min_int 492 // reg: divisor (may not be rax,/rdx) -1 493 // 494 // output: rax,: quotient (= rax, idiv reg) min_int 495 // rdx: remainder (= rax, irem reg) 0 496 497 // rax, and rdx will be destroyed 498 499 // Note: does this invalidate the spec ??? 500 LIRItem right(x->y(), this); 501 LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid 502 503 // call state_for before load_item_force because state_for may 504 // force the evaluation of other instructions that are needed for 505 // correct debug info. Otherwise the live range of the fix 506 // register might be too long. 507 CodeEmitInfo* info = state_for(x); 508 509 left.load_item_force(divInOpr()); 510 511 right.load_item(); 512 513 LIR_Opr result = rlock_result(x); 514 LIR_Opr result_reg; 515 if (x->op() == Bytecodes::_idiv) { 516 result_reg = divOutOpr(); 517 } else { 518 result_reg = remOutOpr(); 519 } 520 521 if (!ImplicitDiv0Checks) { 522 __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); 523 __ branch(lir_cond_equal, new DivByZeroStub(info)); 524 // Idiv/irem cannot trap (passing info would generate an assertion). 525 info = nullptr; 526 } 527 LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation 528 if (x->op() == Bytecodes::_irem) { 529 __ irem(left.result(), right.result(), result_reg, tmp, info); 530 } else if (x->op() == Bytecodes::_idiv) { 531 __ idiv(left.result(), right.result(), result_reg, tmp, info); 532 } else { 533 ShouldNotReachHere(); 534 } 535 536 __ move(result_reg, result); 537 } else { 538 // missing test if instr is commutative and if we should swap 539 LIRItem left(x->x(), this); 540 LIRItem right(x->y(), this); 541 LIRItem* left_arg = &left; 542 LIRItem* right_arg = &right; 543 if (x->is_commutative() && left.is_stack() && right.is_register()) { 544 // swap them if left is real stack (or cached) and right is real register(not cached) 545 left_arg = &right; 546 right_arg = &left; 547 } 548 549 left_arg->load_item(); 550 551 // do not need to load right, as we can handle stack and constants 552 if (x->op() == Bytecodes::_imul ) { 553 // check if we can use shift instead 554 bool use_constant = false; 555 bool use_tmp = false; 556 if (right_arg->is_constant()) { 557 jint iconst = right_arg->get_jint_constant(); 558 if (iconst > 0 && iconst < max_jint) { 559 if (is_power_of_2(iconst)) { 560 use_constant = true; 561 } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { 562 use_constant = true; 563 use_tmp = true; 564 } 565 } 566 } 567 if (use_constant) { 568 right_arg->dont_load_item(); 569 } else { 570 right_arg->load_item(); 571 } 572 LIR_Opr tmp = LIR_OprFact::illegalOpr; 573 if (use_tmp) { 574 tmp = new_register(T_INT); 575 } 576 rlock_result(x); 577 578 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 579 } else { 580 right_arg->dont_load_item(); 581 rlock_result(x); 582 LIR_Opr tmp = LIR_OprFact::illegalOpr; 583 arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); 584 } 585 } 586 } 587 588 589 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { 590 // when an operand with use count 1 is the left operand, then it is 591 // likely that no move for 2-operand-LIR-form is necessary 592 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 593 x->swap_operands(); 594 } 595 596 ValueTag tag = x->type()->tag(); 597 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); 598 switch (tag) { 599 case floatTag: 600 case doubleTag: do_ArithmeticOp_FPU(x); return; 601 case longTag: do_ArithmeticOp_Long(x); return; 602 case intTag: do_ArithmeticOp_Int(x); return; 603 default: ShouldNotReachHere(); return; 604 } 605 } 606 607 608 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr 609 void LIRGenerator::do_ShiftOp(ShiftOp* x) { 610 // count must always be in rcx 611 LIRItem value(x->x(), this); 612 LIRItem count(x->y(), this); 613 614 ValueTag elemType = x->type()->tag(); 615 bool must_load_count = !count.is_constant() || elemType == longTag; 616 if (must_load_count) { 617 // count for long must be in register 618 count.load_item_force(shiftCountOpr()); 619 } else { 620 count.dont_load_item(); 621 } 622 value.load_item(); 623 LIR_Opr reg = rlock_result(x); 624 625 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); 626 } 627 628 629 // _iand, _land, _ior, _lor, _ixor, _lxor 630 void LIRGenerator::do_LogicOp(LogicOp* x) { 631 // when an operand with use count 1 is the left operand, then it is 632 // likely that no move for 2-operand-LIR-form is necessary 633 if (x->is_commutative() && x->y()->as_Constant() == nullptr && x->x()->use_count() > x->y()->use_count()) { 634 x->swap_operands(); 635 } 636 637 LIRItem left(x->x(), this); 638 LIRItem right(x->y(), this); 639 640 left.load_item(); 641 right.load_nonconstant(); 642 LIR_Opr reg = rlock_result(x); 643 644 logic_op(x->op(), reg, left.result(), right.result()); 645 } 646 647 648 649 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg 650 void LIRGenerator::do_CompareOp(CompareOp* x) { 651 LIRItem left(x->x(), this); 652 LIRItem right(x->y(), this); 653 ValueTag tag = x->x()->type()->tag(); 654 if (tag == longTag) { 655 left.set_destroys_register(); 656 } 657 left.load_item(); 658 right.load_item(); 659 LIR_Opr reg = rlock_result(x); 660 661 if (x->x()->type()->is_float_kind()) { 662 Bytecodes::Code code = x->op(); 663 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); 664 } else if (x->x()->type()->tag() == longTag) { 665 __ lcmp2int(left.result(), right.result(), reg); 666 } else { 667 Unimplemented(); 668 } 669 } 670 671 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) { 672 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience 673 if (is_reference_type(type)) { 674 cmp_value.load_item_force(FrameMap::rax_oop_opr); 675 new_value.load_item(); 676 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 677 } else if (type == T_INT) { 678 cmp_value.load_item_force(FrameMap::rax_opr); 679 new_value.load_item(); 680 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 681 } else if (type == T_LONG) { 682 cmp_value.load_item_force(FrameMap::long0_opr); 683 new_value.load_item_force(FrameMap::long1_opr); 684 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill); 685 } else { 686 Unimplemented(); 687 } 688 LIR_Opr result = new_register(T_INT); 689 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), 690 result, T_INT); 691 return result; 692 } 693 694 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) { 695 bool is_oop = is_reference_type(type); 696 LIR_Opr result = new_register(type); 697 value.load_item(); 698 // Because we want a 2-arg form of xchg and xadd 699 __ move(value.result(), result); 700 assert(type == T_INT || is_oop || type == T_LONG, "unexpected type"); 701 __ xchg(addr, result, result, LIR_OprFact::illegalOpr); 702 return result; 703 } 704 705 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) { 706 LIR_Opr result = new_register(type); 707 value.load_item(); 708 // Because we want a 2-arg form of xchg and xadd 709 __ move(value.result(), result); 710 assert(type == T_INT || type == T_LONG, "unexpected type"); 711 __ xadd(addr, result, result, LIR_OprFact::illegalOpr); 712 return result; 713 } 714 715 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) { 716 assert(x->number_of_arguments() == 3, "wrong type"); 717 assert(UseFMA, "Needs FMA instructions support."); 718 LIRItem value(x->argument_at(0), this); 719 LIRItem value1(x->argument_at(1), this); 720 LIRItem value2(x->argument_at(2), this); 721 722 value2.set_destroys_register(); 723 724 value.load_item(); 725 value1.load_item(); 726 value2.load_item(); 727 728 LIR_Opr calc_input = value.result(); 729 LIR_Opr calc_input1 = value1.result(); 730 LIR_Opr calc_input2 = value2.result(); 731 LIR_Opr calc_result = rlock_result(x); 732 733 switch (x->id()) { 734 case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break; 735 case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break; 736 default: ShouldNotReachHere(); 737 } 738 739 } 740 741 742 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { 743 assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type"); 744 745 if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog || 746 x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos || 747 x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan || 748 x->id() == vmIntrinsics::_dlog10 || x->id() == vmIntrinsics::_dtanh || 749 x->id() == vmIntrinsics::_dcbrt 750 ) { 751 do_LibmIntrinsic(x); 752 return; 753 } 754 755 LIRItem value(x->argument_at(0), this); 756 757 value.load_item(); 758 759 LIR_Opr calc_input = value.result(); 760 LIR_Opr calc_result = rlock_result(x); 761 762 LIR_Opr tmp = LIR_OprFact::illegalOpr; 763 if (x->id() == vmIntrinsics::_floatToFloat16) { 764 tmp = new_register(T_FLOAT); 765 } 766 767 switch(x->id()) { 768 case vmIntrinsics::_dabs: 769 __ abs(calc_input, calc_result, tmp); 770 break; 771 case vmIntrinsics::_dsqrt: 772 case vmIntrinsics::_dsqrt_strict: 773 __ sqrt(calc_input, calc_result, LIR_OprFact::illegalOpr); 774 break; 775 case vmIntrinsics::_floatToFloat16: 776 __ f2hf(calc_input, calc_result, tmp); 777 break; 778 case vmIntrinsics::_float16ToFloat: 779 __ hf2f(calc_input, calc_result, LIR_OprFact::illegalOpr); 780 break; 781 default: 782 ShouldNotReachHere(); 783 } 784 } 785 786 void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) { 787 LIRItem value(x->argument_at(0), this); 788 value.set_destroys_register(); 789 790 LIR_Opr calc_result = rlock_result(x); 791 LIR_Opr result_reg = result_register_for(x->type()); 792 793 CallingConvention* cc = nullptr; 794 795 if (x->id() == vmIntrinsics::_dpow) { 796 LIRItem value1(x->argument_at(1), this); 797 798 value1.set_destroys_register(); 799 800 BasicTypeList signature(2); 801 signature.append(T_DOUBLE); 802 signature.append(T_DOUBLE); 803 cc = frame_map()->c_calling_convention(&signature); 804 value.load_item_force(cc->at(0)); 805 value1.load_item_force(cc->at(1)); 806 } else { 807 BasicTypeList signature(1); 808 signature.append(T_DOUBLE); 809 cc = frame_map()->c_calling_convention(&signature); 810 value.load_item_force(cc->at(0)); 811 } 812 813 switch (x->id()) { 814 case vmIntrinsics::_dexp: 815 if (StubRoutines::dexp() != nullptr) { 816 __ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args()); 817 } else { 818 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args()); 819 } 820 break; 821 case vmIntrinsics::_dlog: 822 if (StubRoutines::dlog() != nullptr) { 823 __ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args()); 824 } else { 825 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args()); 826 } 827 break; 828 case vmIntrinsics::_dlog10: 829 if (StubRoutines::dlog10() != nullptr) { 830 __ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args()); 831 } else { 832 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args()); 833 } 834 break; 835 case vmIntrinsics::_dpow: 836 if (StubRoutines::dpow() != nullptr) { 837 __ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args()); 838 } else { 839 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args()); 840 } 841 break; 842 case vmIntrinsics::_dsin: 843 if (StubRoutines::dsin() != nullptr) { 844 __ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args()); 845 } else { 846 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args()); 847 } 848 break; 849 case vmIntrinsics::_dcos: 850 if (StubRoutines::dcos() != nullptr) { 851 __ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args()); 852 } else { 853 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args()); 854 } 855 break; 856 case vmIntrinsics::_dtan: 857 if (StubRoutines::dtan() != nullptr) { 858 __ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args()); 859 } else { 860 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args()); 861 } 862 break; 863 case vmIntrinsics::_dtanh: 864 assert(StubRoutines::dtanh() != nullptr, "tanh intrinsic not found"); 865 if (StubRoutines::dtanh() != nullptr) { 866 __ call_runtime_leaf(StubRoutines::dtanh(), getThreadTemp(), result_reg, cc->args()); 867 } 868 break; 869 case vmIntrinsics::_dcbrt: 870 assert(StubRoutines::dcbrt() != nullptr, "cbrt intrinsic not found"); 871 if (StubRoutines::dcbrt() != nullptr) { 872 __ call_runtime_leaf(StubRoutines::dcbrt(), getThreadTemp(), result_reg, cc->args()); 873 } 874 break; 875 default: ShouldNotReachHere(); 876 } 877 878 __ move(result_reg, calc_result); 879 } 880 881 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 882 assert(x->number_of_arguments() == 5, "wrong type"); 883 884 // Make all state_for calls early since they can emit code 885 CodeEmitInfo* info = nullptr; 886 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 887 info = state_for(x, x->state_before()); 888 info->set_force_reexecute(); 889 } else { 890 info = state_for(x, x->state()); 891 } 892 893 LIRItem src(x->argument_at(0), this); 894 LIRItem src_pos(x->argument_at(1), this); 895 LIRItem dst(x->argument_at(2), this); 896 LIRItem dst_pos(x->argument_at(3), this); 897 LIRItem length(x->argument_at(4), this); 898 899 // operands for arraycopy must use fixed registers, otherwise 900 // LinearScan will fail allocation (because arraycopy always needs a 901 // call) 902 903 int flags; 904 ciArrayKlass* expected_type; 905 arraycopy_helper(x, &flags, &expected_type); 906 if (x->check_flag(Instruction::OmitChecksFlag)) { 907 flags = 0; 908 } 909 910 // The java calling convention will give us enough registers 911 // so that on the stub side the args will be perfect already. 912 // On the other slow/special case side we call C and the arg 913 // positions are not similar enough to pick one as the best. 914 // Also because the java calling convention is a "shifted" version 915 // of the C convention we can process the java args trivially into C 916 // args without worry of overwriting during the xfer 917 918 src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); 919 src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); 920 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); 921 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); 922 length.load_item_force (FrameMap::as_opr(j_rarg4)); 923 924 LIR_Opr tmp = FrameMap::as_opr(j_rarg5); 925 926 set_no_result(x); 927 928 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 929 } 930 931 void LIRGenerator::do_update_CRC32(Intrinsic* x) { 932 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions support"); 933 // Make all state_for calls early since they can emit code 934 LIR_Opr result = rlock_result(x); 935 int flags = 0; 936 switch (x->id()) { 937 case vmIntrinsics::_updateCRC32: { 938 LIRItem crc(x->argument_at(0), this); 939 LIRItem val(x->argument_at(1), this); 940 // val is destroyed by update_crc32 941 val.set_destroys_register(); 942 crc.load_item(); 943 val.load_item(); 944 __ update_crc32(crc.result(), val.result(), result); 945 break; 946 } 947 case vmIntrinsics::_updateBytesCRC32: 948 case vmIntrinsics::_updateByteBufferCRC32: { 949 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 950 951 LIRItem crc(x->argument_at(0), this); 952 LIRItem buf(x->argument_at(1), this); 953 LIRItem off(x->argument_at(2), this); 954 LIRItem len(x->argument_at(3), this); 955 buf.load_item(); 956 off.load_nonconstant(); 957 958 LIR_Opr index = off.result(); 959 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 960 if(off.result()->is_constant()) { 961 index = LIR_OprFact::illegalOpr; 962 offset += off.result()->as_jint(); 963 } 964 LIR_Opr base_op = buf.result(); 965 966 if (index->is_valid()) { 967 LIR_Opr tmp = new_register(T_LONG); 968 __ convert(Bytecodes::_i2l, index, tmp); 969 index = tmp; 970 } 971 972 LIR_Address* a = new LIR_Address(base_op, 973 index, 974 offset, 975 T_BYTE); 976 BasicTypeList signature(3); 977 signature.append(T_INT); 978 signature.append(T_ADDRESS); 979 signature.append(T_INT); 980 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 981 const LIR_Opr result_reg = result_register_for(x->type()); 982 983 LIR_Opr addr = new_pointer_register(); 984 __ leal(LIR_OprFact::address(a), addr); 985 986 crc.load_item_force(cc->at(0)); 987 __ move(addr, cc->at(1)); 988 len.load_item_force(cc->at(2)); 989 990 __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 991 __ move(result_reg, result); 992 993 break; 994 } 995 default: { 996 ShouldNotReachHere(); 997 } 998 } 999 } 1000 1001 void LIRGenerator::do_update_CRC32C(Intrinsic* x) { 1002 assert(UseCRC32CIntrinsics, "need AVX and CLMUL instructions support"); 1003 LIR_Opr result = rlock_result(x); 1004 1005 switch (x->id()) { 1006 case vmIntrinsics::_updateBytesCRC32C: 1007 case vmIntrinsics::_updateDirectByteBufferCRC32C: { 1008 bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); 1009 1010 LIRItem crc(x->argument_at(0), this); 1011 LIRItem buf(x->argument_at(1), this); 1012 LIRItem off(x->argument_at(2), this); 1013 LIRItem end(x->argument_at(3), this); 1014 buf.load_item(); 1015 off.load_nonconstant(); 1016 end.load_nonconstant(); 1017 1018 // len = end - off 1019 LIR_Opr len = end.result(); 1020 LIR_Opr tmpA = new_register(T_INT); 1021 LIR_Opr tmpB = new_register(T_INT); 1022 __ move(end.result(), tmpA); 1023 __ move(off.result(), tmpB); 1024 __ sub(tmpA, tmpB, tmpA); 1025 len = tmpA; 1026 1027 LIR_Opr index = off.result(); 1028 int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 1029 if (off.result()->is_constant()) { 1030 index = LIR_OprFact::illegalOpr; 1031 offset += off.result()->as_jint(); 1032 } 1033 LIR_Opr base_op = buf.result(); 1034 LIR_Address* a = nullptr; 1035 1036 if (index->is_valid()) { 1037 LIR_Opr tmp = new_register(T_LONG); 1038 __ convert(Bytecodes::_i2l, index, tmp); 1039 index = tmp; 1040 a = new LIR_Address(base_op, index, offset, T_BYTE); 1041 } else { 1042 a = new LIR_Address(base_op, offset, T_BYTE); 1043 } 1044 1045 BasicTypeList signature(3); 1046 signature.append(T_INT); 1047 signature.append(T_ADDRESS); 1048 signature.append(T_INT); 1049 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1050 const LIR_Opr result_reg = result_register_for(x->type()); 1051 1052 LIR_Opr arg1 = cc->at(0), 1053 arg2 = cc->at(1), 1054 arg3 = cc->at(2); 1055 1056 crc.load_item_force(arg1); 1057 __ leal(LIR_OprFact::address(a), arg2); 1058 __ move(len, arg3); 1059 1060 __ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args()); 1061 __ move(result_reg, result); 1062 break; 1063 } 1064 default: { 1065 ShouldNotReachHere(); 1066 } 1067 } 1068 } 1069 1070 void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) { 1071 assert(UseVectorizedMismatchIntrinsic, "need AVX instruction support"); 1072 1073 // Make all state_for calls early since they can emit code 1074 LIR_Opr result = rlock_result(x); 1075 1076 LIRItem a(x->argument_at(0), this); // Object 1077 LIRItem aOffset(x->argument_at(1), this); // long 1078 LIRItem b(x->argument_at(2), this); // Object 1079 LIRItem bOffset(x->argument_at(3), this); // long 1080 LIRItem length(x->argument_at(4), this); // int 1081 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int 1082 1083 a.load_item(); 1084 aOffset.load_nonconstant(); 1085 b.load_item(); 1086 bOffset.load_nonconstant(); 1087 1088 long constant_aOffset = 0; 1089 LIR_Opr result_aOffset = aOffset.result(); 1090 if (result_aOffset->is_constant()) { 1091 constant_aOffset = result_aOffset->as_jlong(); 1092 result_aOffset = LIR_OprFact::illegalOpr; 1093 } 1094 LIR_Opr result_a = a.result(); 1095 1096 long constant_bOffset = 0; 1097 LIR_Opr result_bOffset = bOffset.result(); 1098 if (result_bOffset->is_constant()) { 1099 constant_bOffset = result_bOffset->as_jlong(); 1100 result_bOffset = LIR_OprFact::illegalOpr; 1101 } 1102 LIR_Opr result_b = b.result(); 1103 1104 LIR_Address* addr_a = new LIR_Address(result_a, 1105 result_aOffset, 1106 constant_aOffset, 1107 T_BYTE); 1108 1109 LIR_Address* addr_b = new LIR_Address(result_b, 1110 result_bOffset, 1111 constant_bOffset, 1112 T_BYTE); 1113 1114 BasicTypeList signature(4); 1115 signature.append(T_ADDRESS); 1116 signature.append(T_ADDRESS); 1117 signature.append(T_INT); 1118 signature.append(T_INT); 1119 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 1120 const LIR_Opr result_reg = result_register_for(x->type()); 1121 1122 LIR_Opr ptr_addr_a = new_pointer_register(); 1123 __ leal(LIR_OprFact::address(addr_a), ptr_addr_a); 1124 1125 LIR_Opr ptr_addr_b = new_pointer_register(); 1126 __ leal(LIR_OprFact::address(addr_b), ptr_addr_b); 1127 1128 __ move(ptr_addr_a, cc->at(0)); 1129 __ move(ptr_addr_b, cc->at(1)); 1130 length.load_item_force(cc->at(2)); 1131 log2ArrayIndexScale.load_item_force(cc->at(3)); 1132 1133 __ call_runtime_leaf(StubRoutines::vectorizedMismatch(), getThreadTemp(), result_reg, cc->args()); 1134 __ move(result_reg, result); 1135 } 1136 1137 void LIRGenerator::do_Convert(Convert* x) { 1138 LIRItem value(x->value(), this); 1139 value.load_item(); 1140 LIR_Opr input = value.result(); 1141 LIR_Opr result = rlock(x); 1142 __ convert(x->op(), input, result); 1143 assert(result->is_virtual(), "result must be virtual register"); 1144 set_result(x, result); 1145 } 1146 1147 1148 void LIRGenerator::do_NewInstance(NewInstance* x) { 1149 print_if_not_loaded(x); 1150 1151 CodeEmitInfo* info = state_for(x, x->needs_state_before() ? x->state_before() : x->state()); 1152 LIR_Opr reg = result_register_for(x->type()); 1153 new_instance(reg, x->klass(), x->is_unresolved(), 1154 !x->is_unresolved() && x->klass()->is_inlinetype(), 1155 FrameMap::rcx_oop_opr, 1156 FrameMap::rdi_oop_opr, 1157 FrameMap::rsi_oop_opr, 1158 LIR_OprFact::illegalOpr, 1159 FrameMap::rdx_metadata_opr, info); 1160 LIR_Opr result = rlock_result(x); 1161 __ move(reg, result); 1162 } 1163 1164 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 1165 CodeEmitInfo* info = nullptr; 1166 if (x->state_before() != nullptr && x->state_before()->force_reexecute()) { 1167 info = state_for(x, x->state_before()); 1168 info->set_force_reexecute(); 1169 } else { 1170 info = state_for(x, x->state()); 1171 } 1172 1173 LIRItem length(x->length(), this); 1174 length.load_item_force(FrameMap::rbx_opr); 1175 1176 LIR_Opr reg = result_register_for(x->type()); 1177 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1178 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1179 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1180 LIR_Opr tmp4 = reg; 1181 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1182 LIR_Opr len = length.result(); 1183 BasicType elem_type = x->elt_type(); 1184 1185 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); 1186 1187 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 1188 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array()); 1189 1190 LIR_Opr result = rlock_result(x); 1191 __ move(reg, result); 1192 } 1193 1194 1195 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 1196 LIRItem length(x->length(), this); 1197 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 1198 // and therefore provide the state before the parameters have been consumed 1199 CodeEmitInfo* patching_info = nullptr; 1200 if (!x->klass()->is_loaded() || PatchALot) { 1201 patching_info = state_for(x, x->state_before()); 1202 } 1203 1204 CodeEmitInfo* info = state_for(x, x->state()); 1205 1206 const LIR_Opr reg = result_register_for(x->type()); 1207 LIR_Opr tmp1 = FrameMap::rcx_oop_opr; 1208 LIR_Opr tmp2 = FrameMap::rsi_oop_opr; 1209 LIR_Opr tmp3 = FrameMap::rdi_oop_opr; 1210 LIR_Opr tmp4 = reg; 1211 LIR_Opr klass_reg = FrameMap::rdx_metadata_opr; 1212 1213 length.load_item_force(FrameMap::rbx_opr); 1214 LIR_Opr len = length.result(); 1215 1216 ciKlass* obj = ciArrayKlass::make(x->klass(), false, true, true); 1217 1218 // TODO 8265122 Implement a fast path for this 1219 bool is_flat = obj->is_loaded() && obj->is_flat_array_klass(); 1220 bool is_null_free = obj->is_loaded() && obj->as_array_klass()->is_elem_null_free(); 1221 1222 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, is_null_free); 1223 if (obj == ciEnv::unloaded_ciobjarrayklass()) { 1224 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); 1225 } 1226 klass2reg_with_patching(klass_reg, obj, patching_info); 1227 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path, true, is_null_free || is_flat); 1228 1229 LIR_Opr result = rlock_result(x); 1230 __ move(reg, result); 1231 } 1232 1233 1234 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { 1235 Values* dims = x->dims(); 1236 int i = dims->length(); 1237 LIRItemList* items = new LIRItemList(i, i, nullptr); 1238 while (i-- > 0) { 1239 LIRItem* size = new LIRItem(dims->at(i), this); 1240 items->at_put(i, size); 1241 } 1242 1243 // Evaluate state_for early since it may emit code. 1244 CodeEmitInfo* patching_info = nullptr; 1245 if (!x->klass()->is_loaded() || PatchALot) { 1246 patching_info = state_for(x, x->state_before()); 1247 1248 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 1249 // clone all handlers (NOTE: Usually this is handled transparently 1250 // by the CodeEmitInfo cloning logic in CodeStub constructors but 1251 // is done explicitly here because a stub isn't being used). 1252 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 1253 } 1254 CodeEmitInfo* info = state_for(x, x->state()); 1255 1256 i = dims->length(); 1257 while (i-- > 0) { 1258 LIRItem* size = items->at(i); 1259 size->load_nonconstant(); 1260 1261 store_stack_parameter(size->result(), in_ByteSize(i*4)); 1262 } 1263 1264 LIR_Opr klass_reg = FrameMap::rax_metadata_opr; 1265 klass2reg_with_patching(klass_reg, x->klass(), patching_info); 1266 1267 LIR_Opr rank = FrameMap::rbx_opr; 1268 __ move(LIR_OprFact::intConst(x->rank()), rank); 1269 LIR_Opr varargs = FrameMap::rcx_opr; 1270 __ move(FrameMap::rsp_opr, varargs); 1271 LIR_OprList* args = new LIR_OprList(3); 1272 args->append(klass_reg); 1273 args->append(rank); 1274 args->append(varargs); 1275 LIR_Opr reg = result_register_for(x->type()); 1276 __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), 1277 LIR_OprFact::illegalOpr, 1278 reg, args, info); 1279 1280 LIR_Opr result = rlock_result(x); 1281 __ move(reg, result); 1282 } 1283 1284 1285 void LIRGenerator::do_BlockBegin(BlockBegin* x) { 1286 // nothing to do for now 1287 } 1288 1289 1290 void LIRGenerator::do_CheckCast(CheckCast* x) { 1291 LIRItem obj(x->obj(), this); 1292 1293 CodeEmitInfo* patching_info = nullptr; 1294 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { 1295 // must do this before locking the destination register as an oop register, 1296 // and before the obj is loaded (the latter is for deoptimization) 1297 patching_info = state_for(x, x->state_before()); 1298 } 1299 obj.load_item(); 1300 1301 // info for exceptions 1302 CodeEmitInfo* info_for_exception = 1303 (x->needs_exception_state() ? state_for(x) : 1304 state_for(x, x->state_before(), true /*ignore_xhandler*/)); 1305 1306 CodeStub* stub; 1307 if (x->is_incompatible_class_change_check()) { 1308 assert(patching_info == nullptr, "can't patch this"); 1309 stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); 1310 } else if (x->is_invokespecial_receiver_check()) { 1311 assert(patching_info == nullptr, "can't patch this"); 1312 stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); 1313 } else { 1314 stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); 1315 } 1316 LIR_Opr reg = rlock_result(x); 1317 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1318 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1319 tmp3 = new_register(objectType); 1320 } 1321 __ checkcast(reg, obj.result(), x->klass(), 1322 new_register(objectType), new_register(objectType), tmp3, 1323 x->direct_compare(), info_for_exception, patching_info, stub, 1324 x->profiled_method(), x->profiled_bci(), x->is_null_free()); 1325 } 1326 1327 1328 void LIRGenerator::do_InstanceOf(InstanceOf* x) { 1329 LIRItem obj(x->obj(), this); 1330 1331 // result and test object may not be in same register 1332 LIR_Opr reg = rlock_result(x); 1333 CodeEmitInfo* patching_info = nullptr; 1334 if ((!x->klass()->is_loaded() || PatchALot)) { 1335 // must do this before locking the destination register as an oop register 1336 patching_info = state_for(x, x->state_before()); 1337 } 1338 obj.load_item(); 1339 LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 1340 if (!x->klass()->is_loaded() || UseCompressedClassPointers) { 1341 tmp3 = new_register(objectType); 1342 } 1343 __ instanceof(reg, obj.result(), x->klass(), 1344 new_register(objectType), new_register(objectType), tmp3, 1345 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 1346 } 1347 1348 // Intrinsic for Class::isInstance 1349 address LIRGenerator::isInstance_entry() { 1350 return Runtime1::entry_for(C1StubId::is_instance_of_id); 1351 } 1352 1353 1354 void LIRGenerator::do_If(If* x) { 1355 assert(x->number_of_sux() == 2, "inconsistency"); 1356 ValueTag tag = x->x()->type()->tag(); 1357 bool is_safepoint = x->is_safepoint(); 1358 1359 If::Condition cond = x->cond(); 1360 1361 LIRItem xitem(x->x(), this); 1362 LIRItem yitem(x->y(), this); 1363 LIRItem* xin = &xitem; 1364 LIRItem* yin = &yitem; 1365 1366 if (tag == longTag) { 1367 // for longs, only conditions "eql", "neq", "lss", "geq" are valid; 1368 // mirror for other conditions 1369 if (cond == If::gtr || cond == If::leq) { 1370 cond = Instruction::mirror(cond); 1371 xin = &yitem; 1372 yin = &xitem; 1373 } 1374 xin->set_destroys_register(); 1375 } 1376 xin->load_item(); 1377 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { 1378 // inline long zero 1379 yin->dont_load_item(); 1380 } else if (tag == longTag || tag == floatTag || tag == doubleTag || x->substitutability_check()) { 1381 // longs cannot handle constants at right side 1382 yin->load_item(); 1383 } else { 1384 yin->dont_load_item(); 1385 } 1386 1387 LIR_Opr left = xin->result(); 1388 LIR_Opr right = yin->result(); 1389 1390 set_no_result(x); 1391 1392 // add safepoint before generating condition code so it can be recomputed 1393 if (x->is_safepoint()) { 1394 // increment backedge counter if needed 1395 increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()), 1396 x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci()); 1397 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1398 } 1399 1400 if (x->substitutability_check()) { 1401 substitutability_check(x, *xin, *yin); 1402 } else { 1403 __ cmp(lir_cond(cond), left, right); 1404 } 1405 // Generate branch profiling. Profiling code doesn't kill flags. 1406 profile_branch(x, cond); 1407 move_to_phi(x->state()); 1408 if (x->x()->type()->is_float_kind()) { 1409 __ branch(lir_cond(cond), x->tsux(), x->usux()); 1410 } else { 1411 __ branch(lir_cond(cond), x->tsux()); 1412 } 1413 assert(x->default_sux() == x->fsux(), "wrong destination above"); 1414 __ jump(x->default_sux()); 1415 } 1416 1417 1418 LIR_Opr LIRGenerator::getThreadPointer() { 1419 return FrameMap::as_pointer_opr(r15_thread); 1420 } 1421 1422 void LIRGenerator::trace_block_entry(BlockBegin* block) { 1423 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); 1424 LIR_OprList* args = new LIR_OprList(); 1425 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); 1426 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); 1427 } 1428 1429 1430 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, 1431 CodeEmitInfo* info) { 1432 if (address->type() == T_LONG) { 1433 address = new LIR_Address(address->base(), 1434 address->index(), address->scale(), 1435 address->disp(), T_DOUBLE); 1436 // Transfer the value atomically by using FP moves. This means 1437 // the value has to be moved between CPU and FPU registers. It 1438 // always has to be moved through spill slot since there's no 1439 // quick way to pack the value into an SSE register. 1440 LIR_Opr temp_double = new_register(T_DOUBLE); 1441 LIR_Opr spill = new_register(T_LONG); 1442 set_vreg_flag(spill, must_start_in_memory); 1443 __ move(value, spill); 1444 __ volatile_move(spill, temp_double, T_LONG); 1445 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); 1446 } else { 1447 __ store(value, address, info); 1448 } 1449 } 1450 1451 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, 1452 CodeEmitInfo* info) { 1453 if (address->type() == T_LONG) { 1454 address = new LIR_Address(address->base(), 1455 address->index(), address->scale(), 1456 address->disp(), T_DOUBLE); 1457 // Transfer the value atomically by using FP moves. This means 1458 // the value has to be moved between CPU and FPU registers. In 1459 // SSE0 and SSE1 mode it has to be moved through spill slot but in 1460 // SSE2+ mode it can be moved directly. 1461 LIR_Opr temp_double = new_register(T_DOUBLE); 1462 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); 1463 __ volatile_move(temp_double, result, T_LONG); 1464 } else { 1465 __ load(address, result, info); 1466 } 1467 }