1 /* 2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Compilation.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArrayKlass.hpp" 35 #include "ci/ciInlineKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gc_globals.hpp" 40 #include "nativeInst_x86.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/objArrayKlass.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/safepointMechanism.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/powerOfTwo.hpp" 48 #include "vmreg_x86.inline.hpp" 49 50 51 // These masks are used to provide 128-bit aligned bitmasks to the XMM 52 // instructions, to allow sign-masking or sign-bit flipping. They allow 53 // fast versions of NegF/NegD and AbsF/AbsD. 54 55 // Note: 'double' and 'long long' have 32-bits alignment on x86. 56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 57 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 58 // of 128-bits operands for SSE instructions. 59 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 60 // Store the value to a 128-bits operand. 61 operand[0] = lo; 62 operand[1] = hi; 63 return operand; 64 } 65 66 // Buffer for 128-bits masks used by SSE instructions. 67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 68 69 // Static initialization during VM startup. 70 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 71 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 72 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000)); 73 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); 74 75 76 NEEDS_CLEANUP // remove this definitions ? 77 const Register SYNC_header = rax; // synchronization header 78 const Register SHIFT_count = rcx; // where count for shift operations must be 79 80 #define __ _masm-> 81 82 83 static void select_different_registers(Register preserve, 84 Register extra, 85 Register &tmp1, 86 Register &tmp2) { 87 if (tmp1 == preserve) { 88 assert_different_registers(tmp1, tmp2, extra); 89 tmp1 = extra; 90 } else if (tmp2 == preserve) { 91 assert_different_registers(tmp1, tmp2, extra); 92 tmp2 = extra; 93 } 94 assert_different_registers(preserve, tmp1, tmp2); 95 } 96 97 98 99 static void select_different_registers(Register preserve, 100 Register extra, 101 Register &tmp1, 102 Register &tmp2, 103 Register &tmp3) { 104 if (tmp1 == preserve) { 105 assert_different_registers(tmp1, tmp2, tmp3, extra); 106 tmp1 = extra; 107 } else if (tmp2 == preserve) { 108 assert_different_registers(tmp1, tmp2, tmp3, extra); 109 tmp2 = extra; 110 } else if (tmp3 == preserve) { 111 assert_different_registers(tmp1, tmp2, tmp3, extra); 112 tmp3 = extra; 113 } 114 assert_different_registers(preserve, tmp1, tmp2, tmp3); 115 } 116 117 118 119 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 120 if (opr->is_constant()) { 121 LIR_Const* constant = opr->as_constant_ptr(); 122 switch (constant->type()) { 123 case T_INT: { 124 return true; 125 } 126 127 default: 128 return false; 129 } 130 } 131 return false; 132 } 133 134 135 LIR_Opr LIR_Assembler::receiverOpr() { 136 return FrameMap::receiver_opr; 137 } 138 139 LIR_Opr LIR_Assembler::osrBufferPointer() { 140 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 141 } 142 143 //--------------fpu register translations----------------------- 144 145 146 address LIR_Assembler::float_constant(float f) { 147 address const_addr = __ float_constant(f); 148 if (const_addr == nullptr) { 149 bailout("const section overflow"); 150 return __ code()->consts()->start(); 151 } else { 152 return const_addr; 153 } 154 } 155 156 157 address LIR_Assembler::double_constant(double d) { 158 address const_addr = __ double_constant(d); 159 if (const_addr == nullptr) { 160 bailout("const section overflow"); 161 return __ code()->consts()->start(); 162 } else { 163 return const_addr; 164 } 165 } 166 167 #ifndef _LP64 168 void LIR_Assembler::fpop() { 169 __ fpop(); 170 } 171 172 void LIR_Assembler::fxch(int i) { 173 __ fxch(i); 174 } 175 176 void LIR_Assembler::fld(int i) { 177 __ fld_s(i); 178 } 179 180 void LIR_Assembler::ffree(int i) { 181 __ ffree(i); 182 } 183 #endif // !_LP64 184 185 void LIR_Assembler::breakpoint() { 186 __ int3(); 187 } 188 189 void LIR_Assembler::push(LIR_Opr opr) { 190 if (opr->is_single_cpu()) { 191 __ push_reg(opr->as_register()); 192 } else if (opr->is_double_cpu()) { 193 NOT_LP64(__ push_reg(opr->as_register_hi())); 194 __ push_reg(opr->as_register_lo()); 195 } else if (opr->is_stack()) { 196 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 197 } else if (opr->is_constant()) { 198 LIR_Const* const_opr = opr->as_constant_ptr(); 199 if (const_opr->type() == T_OBJECT) { 200 __ push_oop(const_opr->as_jobject(), rscratch1); 201 } else if (const_opr->type() == T_INT) { 202 __ push_jint(const_opr->as_jint()); 203 } else { 204 ShouldNotReachHere(); 205 } 206 207 } else { 208 ShouldNotReachHere(); 209 } 210 } 211 212 void LIR_Assembler::pop(LIR_Opr opr) { 213 if (opr->is_single_cpu()) { 214 __ pop_reg(opr->as_register()); 215 } else { 216 ShouldNotReachHere(); 217 } 218 } 219 220 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { 221 return addr->base()->is_illegal() && addr->index()->is_illegal(); 222 } 223 224 //------------------------------------------- 225 226 Address LIR_Assembler::as_Address(LIR_Address* addr) { 227 return as_Address(addr, rscratch1); 228 } 229 230 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 231 if (addr->base()->is_illegal()) { 232 assert(addr->index()->is_illegal(), "must be illegal too"); 233 AddressLiteral laddr((address)addr->disp(), relocInfo::none); 234 if (! __ reachable(laddr)) { 235 __ movptr(tmp, laddr.addr()); 236 Address res(tmp, 0); 237 return res; 238 } else { 239 return __ as_Address(laddr); 240 } 241 } 242 243 Register base = addr->base()->as_pointer_register(); 244 245 if (addr->index()->is_illegal()) { 246 return Address( base, addr->disp()); 247 } else if (addr->index()->is_cpu_register()) { 248 Register index = addr->index()->as_pointer_register(); 249 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 250 } else if (addr->index()->is_constant()) { 251 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 252 assert(Assembler::is_simm32(addr_offset), "must be"); 253 254 return Address(base, addr_offset); 255 } else { 256 Unimplemented(); 257 return Address(); 258 } 259 } 260 261 262 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 263 Address base = as_Address(addr); 264 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 265 } 266 267 268 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 269 return as_Address(addr); 270 } 271 272 273 void LIR_Assembler::osr_entry() { 274 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 275 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 276 ValueStack* entry_state = osr_entry->state(); 277 int number_of_locks = entry_state->locks_size(); 278 279 // we jump here if osr happens with the interpreter 280 // state set up to continue at the beginning of the 281 // loop that triggered osr - in particular, we have 282 // the following registers setup: 283 // 284 // rcx: osr buffer 285 // 286 287 // build frame 288 ciMethod* m = compilation()->method(); 289 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 290 291 // OSR buffer is 292 // 293 // locals[nlocals-1..0] 294 // monitors[0..number_of_locks] 295 // 296 // locals is a direct copy of the interpreter frame so in the osr buffer 297 // so first slot in the local array is the last local from the interpreter 298 // and last slot is local[0] (receiver) from the interpreter 299 // 300 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 301 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 302 // in the interpreter frame (the method lock if a sync method) 303 304 // Initialize monitors in the compiled activation. 305 // rcx: pointer to osr buffer 306 // 307 // All other registers are dead at this point and the locals will be 308 // copied into place by code emitted in the IR. 309 310 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 311 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 312 int monitor_offset = BytesPerWord * method()->max_locals() + 313 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); 314 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 315 // the OSR buffer using 2 word entries: first the lock and then 316 // the oop. 317 for (int i = 0; i < number_of_locks; i++) { 318 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 319 #ifdef ASSERT 320 // verify the interpreter's monitor has a non-null object 321 { 322 Label L; 323 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD); 324 __ jcc(Assembler::notZero, L); 325 __ stop("locked object is null"); 326 __ bind(L); 327 } 328 #endif 329 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 330 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 331 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 332 __ movptr(frame_map()->address_for_monitor_object(i), rbx); 333 } 334 } 335 } 336 337 338 // inline cache check; done before the frame is built. 339 int LIR_Assembler::check_icache() { 340 return __ ic_check(CodeEntryAlignment); 341 } 342 343 void LIR_Assembler::clinit_barrier(ciMethod* method) { 344 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 345 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 346 347 Label L_skip_barrier; 348 Register klass = rscratch1; 349 Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg ); 350 assert(thread != noreg, "x86_32 not implemented"); 351 352 __ mov_metadata(klass, method->holder()->constant_encoding()); 353 __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/); 354 355 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 356 357 __ bind(L_skip_barrier); 358 } 359 360 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 361 jobject o = nullptr; 362 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 363 __ movoop(reg, o); 364 patching_epilog(patch, lir_patch_normal, reg, info); 365 } 366 367 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 368 Metadata* o = nullptr; 369 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 370 __ mov_metadata(reg, o); 371 patching_epilog(patch, lir_patch_normal, reg, info); 372 } 373 374 // This specifies the rsp decrement needed to build the frame 375 int LIR_Assembler::initial_frame_size_in_bytes() const { 376 // if rounding, must let FrameMap know! 377 378 // The frame_map records size in slots (32bit word) 379 380 // subtract two words to account for return address and link 381 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 382 } 383 384 385 int LIR_Assembler::emit_exception_handler() { 386 // generate code for exception handler 387 address handler_base = __ start_a_stub(exception_handler_size()); 388 if (handler_base == nullptr) { 389 // not enough space left for the handler 390 bailout("exception handler overflow"); 391 return -1; 392 } 393 394 int offset = code_offset(); 395 396 // the exception oop and pc are in rax, and rdx 397 // no other registers need to be preserved, so invalidate them 398 __ invalidate_registers(false, true, true, false, true, true); 399 400 // check that there is really an exception 401 __ verify_not_null_oop(rax); 402 403 // search an exception handler (rax: exception oop, rdx: throwing pc) 404 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); 405 __ should_not_reach_here(); 406 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 407 __ end_a_stub(); 408 409 return offset; 410 } 411 412 413 // Emit the code to remove the frame from the stack in the exception 414 // unwind path. 415 int LIR_Assembler::emit_unwind_handler() { 416 #ifndef PRODUCT 417 if (CommentedAssembly) { 418 _masm->block_comment("Unwind handler"); 419 } 420 #endif 421 422 int offset = code_offset(); 423 424 // Fetch the exception from TLS and clear out exception related thread state 425 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 426 NOT_LP64(__ get_thread(thread)); 427 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); 428 __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); 429 __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); 430 431 __ bind(_unwind_handler_entry); 432 __ verify_not_null_oop(rax); 433 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 434 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) 435 } 436 437 // Perform needed unlocking 438 MonitorExitStub* stub = nullptr; 439 if (method()->is_synchronized()) { 440 monitor_address(0, FrameMap::rax_opr); 441 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 442 if (LockingMode == LM_MONITOR) { 443 __ jmp(*stub->entry()); 444 } else { 445 __ unlock_object(rdi, rsi, rax, *stub->entry()); 446 } 447 __ bind(*stub->continuation()); 448 } 449 450 if (compilation()->env()->dtrace_method_probes()) { 451 #ifdef _LP64 452 __ mov(rdi, r15_thread); 453 __ mov_metadata(rsi, method()->constant_encoding()); 454 #else 455 __ get_thread(rax); 456 __ movptr(Address(rsp, 0), rax); 457 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); 458 #endif 459 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 460 } 461 462 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 463 __ mov(rax, rbx); // Restore the exception 464 } 465 466 // remove the activation and dispatch to the unwind handler 467 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 468 __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); 469 470 // Emit the slow path assembly 471 if (stub != nullptr) { 472 stub->emit_code(this); 473 } 474 475 return offset; 476 } 477 478 479 int LIR_Assembler::emit_deopt_handler() { 480 // generate code for exception handler 481 address handler_base = __ start_a_stub(deopt_handler_size()); 482 if (handler_base == nullptr) { 483 // not enough space left for the handler 484 bailout("deopt handler overflow"); 485 return -1; 486 } 487 488 int offset = code_offset(); 489 InternalAddress here(__ pc()); 490 491 __ pushptr(here.addr(), rscratch1); 492 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 493 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 494 __ end_a_stub(); 495 496 return offset; 497 } 498 499 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 500 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 501 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 502 assert(result->fpu() == 0, "result must already be on TOS"); 503 } 504 if (InlineTypeReturnedAsFields) { 505 #ifndef _LP64 506 Unimplemented(); 507 #endif 508 // Check if we are returning an non-null inline type and load its fields into registers 509 ciType* return_type = compilation()->method()->return_type(); 510 if (return_type->is_inlinetype()) { 511 ciInlineKlass* vk = return_type->as_inline_klass(); 512 if (vk->can_be_returned_as_fields()) { 513 address unpack_handler = vk->unpack_handler(); 514 assert(unpack_handler != nullptr, "must be"); 515 __ call(RuntimeAddress(unpack_handler)); 516 } 517 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) { 518 Label skip; 519 __ test_oop_is_not_inline_type(rax, rscratch1, skip); 520 521 // Load fields from a buffered value with an inline class specific handler 522 __ load_klass(rdi, rax, rscratch1); 523 __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset())); 524 __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset())); 525 // Unpack handler can be null if inline type is not scalarizable in returns 526 __ testptr(rdi, rdi); 527 __ jcc(Assembler::zero, skip); 528 __ call(rdi); 529 530 __ bind(skip); 531 } 532 // At this point, rax points to the value object (for interpreter or C1 caller). 533 // The fields of the object are copied into registers (for C2 caller). 534 } 535 536 // Pop the stack before the safepoint code 537 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 538 539 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 540 __ reserved_stack_check(); 541 } 542 543 // Note: we do not need to round double result; float result has the right precision 544 // the poll sets the condition code, but no data registers 545 546 #ifdef _LP64 547 const Register thread = r15_thread; 548 #else 549 const Register thread = rbx; 550 __ get_thread(thread); 551 #endif 552 code_stub->set_safepoint_offset(__ offset()); 553 __ relocate(relocInfo::poll_return_type); 554 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */); 555 __ ret(0); 556 } 557 558 559 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) { 560 return (__ store_inline_type_fields_to_buf(vk, false)); 561 } 562 563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 564 guarantee(info != nullptr, "Shouldn't be null"); 565 int offset = __ offset(); 566 #ifdef _LP64 567 const Register poll_addr = rscratch1; 568 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset())); 569 #else 570 assert(tmp->is_cpu_register(), "needed"); 571 const Register poll_addr = tmp->as_register(); 572 __ get_thread(poll_addr); 573 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset()))); 574 #endif 575 add_debug_info_for_branch(info); 576 __ relocate(relocInfo::poll_type); 577 address pre_pc = __ pc(); 578 __ testl(rax, Address(poll_addr, 0)); 579 address post_pc = __ pc(); 580 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); 581 return offset; 582 } 583 584 585 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 586 if (from_reg != to_reg) __ mov(to_reg, from_reg); 587 } 588 589 void LIR_Assembler::swap_reg(Register a, Register b) { 590 __ xchgptr(a, b); 591 } 592 593 594 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 595 assert(src->is_constant(), "should not call otherwise"); 596 assert(dest->is_register(), "should not call otherwise"); 597 LIR_Const* c = src->as_constant_ptr(); 598 599 switch (c->type()) { 600 case T_INT: { 601 assert(patch_code == lir_patch_none, "no patching handled here"); 602 __ movl(dest->as_register(), c->as_jint()); 603 break; 604 } 605 606 case T_ADDRESS: { 607 assert(patch_code == lir_patch_none, "no patching handled here"); 608 __ movptr(dest->as_register(), c->as_jint()); 609 break; 610 } 611 612 case T_LONG: { 613 assert(patch_code == lir_patch_none, "no patching handled here"); 614 #ifdef _LP64 615 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 616 #else 617 __ movptr(dest->as_register_lo(), c->as_jint_lo()); 618 __ movptr(dest->as_register_hi(), c->as_jint_hi()); 619 #endif // _LP64 620 break; 621 } 622 623 case T_OBJECT: { 624 if (patch_code != lir_patch_none) { 625 jobject2reg_with_patching(dest->as_register(), info); 626 } else { 627 __ movoop(dest->as_register(), c->as_jobject()); 628 } 629 break; 630 } 631 632 case T_METADATA: { 633 if (patch_code != lir_patch_none) { 634 klass2reg_with_patching(dest->as_register(), info); 635 } else { 636 __ mov_metadata(dest->as_register(), c->as_metadata()); 637 } 638 break; 639 } 640 641 case T_FLOAT: { 642 if (dest->is_single_xmm()) { 643 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) { 644 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 645 } else { 646 __ movflt(dest->as_xmm_float_reg(), 647 InternalAddress(float_constant(c->as_jfloat()))); 648 } 649 } else { 650 #ifndef _LP64 651 assert(dest->is_single_fpu(), "must be"); 652 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 653 if (c->is_zero_float()) { 654 __ fldz(); 655 } else if (c->is_one_float()) { 656 __ fld1(); 657 } else { 658 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 659 } 660 #else 661 ShouldNotReachHere(); 662 #endif // !_LP64 663 } 664 break; 665 } 666 667 case T_DOUBLE: { 668 if (dest->is_double_xmm()) { 669 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) { 670 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 671 } else { 672 __ movdbl(dest->as_xmm_double_reg(), 673 InternalAddress(double_constant(c->as_jdouble()))); 674 } 675 } else { 676 #ifndef _LP64 677 assert(dest->is_double_fpu(), "must be"); 678 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 679 if (c->is_zero_double()) { 680 __ fldz(); 681 } else if (c->is_one_double()) { 682 __ fld1(); 683 } else { 684 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 685 } 686 #else 687 ShouldNotReachHere(); 688 #endif // !_LP64 689 } 690 break; 691 } 692 693 default: 694 ShouldNotReachHere(); 695 } 696 } 697 698 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 699 assert(src->is_constant(), "should not call otherwise"); 700 assert(dest->is_stack(), "should not call otherwise"); 701 LIR_Const* c = src->as_constant_ptr(); 702 703 switch (c->type()) { 704 case T_INT: // fall through 705 case T_FLOAT: 706 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 707 break; 708 709 case T_ADDRESS: 710 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 711 break; 712 713 case T_OBJECT: 714 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); 715 break; 716 717 case T_LONG: // fall through 718 case T_DOUBLE: 719 #ifdef _LP64 720 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 721 lo_word_offset_in_bytes), 722 (intptr_t)c->as_jlong_bits(), 723 rscratch1); 724 #else 725 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 726 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 727 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 728 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 729 #endif // _LP64 730 break; 731 732 default: 733 ShouldNotReachHere(); 734 } 735 } 736 737 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 738 assert(src->is_constant(), "should not call otherwise"); 739 assert(dest->is_address(), "should not call otherwise"); 740 LIR_Const* c = src->as_constant_ptr(); 741 LIR_Address* addr = dest->as_address_ptr(); 742 743 int null_check_here = code_offset(); 744 switch (type) { 745 case T_INT: // fall through 746 case T_FLOAT: 747 __ movl(as_Address(addr), c->as_jint_bits()); 748 break; 749 750 case T_ADDRESS: 751 __ movptr(as_Address(addr), c->as_jint_bits()); 752 break; 753 754 case T_OBJECT: // fall through 755 case T_ARRAY: 756 if (c->as_jobject() == nullptr) { 757 if (UseCompressedOops && !wide) { 758 __ movl(as_Address(addr), NULL_WORD); 759 } else { 760 #ifdef _LP64 761 __ xorptr(rscratch1, rscratch1); 762 null_check_here = code_offset(); 763 __ movptr(as_Address(addr), rscratch1); 764 #else 765 __ movptr(as_Address(addr), NULL_WORD); 766 #endif 767 } 768 } else { 769 if (is_literal_address(addr)) { 770 ShouldNotReachHere(); 771 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); 772 } else { 773 #ifdef _LP64 774 __ movoop(rscratch1, c->as_jobject()); 775 if (UseCompressedOops && !wide) { 776 __ encode_heap_oop(rscratch1); 777 null_check_here = code_offset(); 778 __ movl(as_Address_lo(addr), rscratch1); 779 } else { 780 null_check_here = code_offset(); 781 __ movptr(as_Address_lo(addr), rscratch1); 782 } 783 #else 784 __ movoop(as_Address(addr), c->as_jobject(), noreg); 785 #endif 786 } 787 } 788 break; 789 790 case T_LONG: // fall through 791 case T_DOUBLE: 792 #ifdef _LP64 793 if (is_literal_address(addr)) { 794 ShouldNotReachHere(); 795 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 796 } else { 797 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 798 null_check_here = code_offset(); 799 __ movptr(as_Address_lo(addr), r10); 800 } 801 #else 802 // Always reachable in 32bit so this doesn't produce useless move literal 803 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 804 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 805 #endif // _LP64 806 break; 807 808 case T_BOOLEAN: // fall through 809 case T_BYTE: 810 __ movb(as_Address(addr), c->as_jint() & 0xFF); 811 break; 812 813 case T_CHAR: // fall through 814 case T_SHORT: 815 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 816 break; 817 818 default: 819 ShouldNotReachHere(); 820 }; 821 822 if (info != nullptr) { 823 add_debug_info_for_null_check(null_check_here, info); 824 } 825 } 826 827 828 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 829 assert(src->is_register(), "should not call otherwise"); 830 assert(dest->is_register(), "should not call otherwise"); 831 832 // move between cpu-registers 833 if (dest->is_single_cpu()) { 834 #ifdef _LP64 835 if (src->type() == T_LONG) { 836 // Can do LONG -> OBJECT 837 move_regs(src->as_register_lo(), dest->as_register()); 838 return; 839 } 840 #endif 841 assert(src->is_single_cpu(), "must match"); 842 if (src->type() == T_OBJECT) { 843 __ verify_oop(src->as_register()); 844 } 845 move_regs(src->as_register(), dest->as_register()); 846 847 } else if (dest->is_double_cpu()) { 848 #ifdef _LP64 849 if (is_reference_type(src->type())) { 850 // Surprising to me but we can see move of a long to t_object 851 __ verify_oop(src->as_register()); 852 move_regs(src->as_register(), dest->as_register_lo()); 853 return; 854 } 855 #endif 856 assert(src->is_double_cpu(), "must match"); 857 Register f_lo = src->as_register_lo(); 858 Register f_hi = src->as_register_hi(); 859 Register t_lo = dest->as_register_lo(); 860 Register t_hi = dest->as_register_hi(); 861 #ifdef _LP64 862 assert(f_hi == f_lo, "must be same"); 863 assert(t_hi == t_lo, "must be same"); 864 move_regs(f_lo, t_lo); 865 #else 866 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 867 868 869 if (f_lo == t_hi && f_hi == t_lo) { 870 swap_reg(f_lo, f_hi); 871 } else if (f_hi == t_lo) { 872 assert(f_lo != t_hi, "overwriting register"); 873 move_regs(f_hi, t_hi); 874 move_regs(f_lo, t_lo); 875 } else { 876 assert(f_hi != t_lo, "overwriting register"); 877 move_regs(f_lo, t_lo); 878 move_regs(f_hi, t_hi); 879 } 880 #endif // LP64 881 882 #ifndef _LP64 883 // special moves from fpu-register to xmm-register 884 // necessary for method results 885 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 886 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 887 __ fld_s(Address(rsp, 0)); 888 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 889 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 890 __ fld_d(Address(rsp, 0)); 891 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 892 __ fstp_s(Address(rsp, 0)); 893 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 894 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 895 __ fstp_d(Address(rsp, 0)); 896 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 897 #endif // !_LP64 898 899 // move between xmm-registers 900 } else if (dest->is_single_xmm()) { 901 assert(src->is_single_xmm(), "must match"); 902 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 903 } else if (dest->is_double_xmm()) { 904 assert(src->is_double_xmm(), "must match"); 905 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 906 907 #ifndef _LP64 908 // move between fpu-registers (no instruction necessary because of fpu-stack) 909 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 910 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 911 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 912 #endif // !_LP64 913 914 } else { 915 ShouldNotReachHere(); 916 } 917 } 918 919 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 920 assert(src->is_register(), "should not call otherwise"); 921 assert(dest->is_stack(), "should not call otherwise"); 922 923 if (src->is_single_cpu()) { 924 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 925 if (is_reference_type(type)) { 926 __ verify_oop(src->as_register()); 927 __ movptr (dst, src->as_register()); 928 } else if (type == T_METADATA || type == T_ADDRESS) { 929 __ movptr (dst, src->as_register()); 930 } else { 931 __ movl (dst, src->as_register()); 932 } 933 934 } else if (src->is_double_cpu()) { 935 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 936 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 937 __ movptr (dstLO, src->as_register_lo()); 938 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 939 940 } else if (src->is_single_xmm()) { 941 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 942 __ movflt(dst_addr, src->as_xmm_float_reg()); 943 944 } else if (src->is_double_xmm()) { 945 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 946 __ movdbl(dst_addr, src->as_xmm_double_reg()); 947 948 #ifndef _LP64 949 } else if (src->is_single_fpu()) { 950 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 951 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 952 if (pop_fpu_stack) __ fstp_s (dst_addr); 953 else __ fst_s (dst_addr); 954 955 } else if (src->is_double_fpu()) { 956 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 957 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 958 if (pop_fpu_stack) __ fstp_d (dst_addr); 959 else __ fst_d (dst_addr); 960 #endif // !_LP64 961 962 } else { 963 ShouldNotReachHere(); 964 } 965 } 966 967 968 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { 969 LIR_Address* to_addr = dest->as_address_ptr(); 970 PatchingStub* patch = nullptr; 971 Register compressed_src = rscratch1; 972 973 if (is_reference_type(type)) { 974 __ verify_oop(src->as_register()); 975 #ifdef _LP64 976 if (UseCompressedOops && !wide) { 977 __ movptr(compressed_src, src->as_register()); 978 __ encode_heap_oop(compressed_src); 979 if (patch_code != lir_patch_none) { 980 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); 981 } 982 } 983 #endif 984 } 985 986 if (patch_code != lir_patch_none) { 987 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 988 Address toa = as_Address(to_addr); 989 assert(toa.disp() != 0, "must have"); 990 } 991 992 int null_check_here = code_offset(); 993 switch (type) { 994 case T_FLOAT: { 995 #ifdef _LP64 996 assert(src->is_single_xmm(), "not a float"); 997 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 998 #else 999 if (src->is_single_xmm()) { 1000 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1001 } else { 1002 assert(src->is_single_fpu(), "must be"); 1003 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1004 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1005 else __ fst_s (as_Address(to_addr)); 1006 } 1007 #endif // _LP64 1008 break; 1009 } 1010 1011 case T_DOUBLE: { 1012 #ifdef _LP64 1013 assert(src->is_double_xmm(), "not a double"); 1014 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1015 #else 1016 if (src->is_double_xmm()) { 1017 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1018 } else { 1019 assert(src->is_double_fpu(), "must be"); 1020 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1021 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1022 else __ fst_d (as_Address(to_addr)); 1023 } 1024 #endif // _LP64 1025 break; 1026 } 1027 1028 case T_ARRAY: // fall through 1029 case T_OBJECT: // fall through 1030 if (UseCompressedOops && !wide) { 1031 __ movl(as_Address(to_addr), compressed_src); 1032 } else { 1033 __ movptr(as_Address(to_addr), src->as_register()); 1034 } 1035 break; 1036 case T_METADATA: 1037 // We get here to store a method pointer to the stack to pass to 1038 // a dtrace runtime call. This can't work on 64 bit with 1039 // compressed klass ptrs: T_METADATA can be a compressed klass 1040 // ptr or a 64 bit method pointer. 1041 LP64_ONLY(ShouldNotReachHere()); 1042 __ movptr(as_Address(to_addr), src->as_register()); 1043 break; 1044 case T_ADDRESS: 1045 __ movptr(as_Address(to_addr), src->as_register()); 1046 break; 1047 case T_INT: 1048 __ movl(as_Address(to_addr), src->as_register()); 1049 break; 1050 1051 case T_LONG: { 1052 Register from_lo = src->as_register_lo(); 1053 Register from_hi = src->as_register_hi(); 1054 #ifdef _LP64 1055 __ movptr(as_Address_lo(to_addr), from_lo); 1056 #else 1057 Register base = to_addr->base()->as_register(); 1058 Register index = noreg; 1059 if (to_addr->index()->is_register()) { 1060 index = to_addr->index()->as_register(); 1061 } 1062 if (base == from_lo || index == from_lo) { 1063 assert(base != from_hi, "can't be"); 1064 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1065 __ movl(as_Address_hi(to_addr), from_hi); 1066 if (patch != nullptr) { 1067 patching_epilog(patch, lir_patch_high, base, info); 1068 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1069 patch_code = lir_patch_low; 1070 } 1071 __ movl(as_Address_lo(to_addr), from_lo); 1072 } else { 1073 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1074 __ movl(as_Address_lo(to_addr), from_lo); 1075 if (patch != nullptr) { 1076 patching_epilog(patch, lir_patch_low, base, info); 1077 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1078 patch_code = lir_patch_high; 1079 } 1080 __ movl(as_Address_hi(to_addr), from_hi); 1081 } 1082 #endif // _LP64 1083 break; 1084 } 1085 1086 case T_BYTE: // fall through 1087 case T_BOOLEAN: { 1088 Register src_reg = src->as_register(); 1089 Address dst_addr = as_Address(to_addr); 1090 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1091 __ movb(dst_addr, src_reg); 1092 break; 1093 } 1094 1095 case T_CHAR: // fall through 1096 case T_SHORT: 1097 __ movw(as_Address(to_addr), src->as_register()); 1098 break; 1099 1100 default: 1101 ShouldNotReachHere(); 1102 } 1103 if (info != nullptr) { 1104 add_debug_info_for_null_check(null_check_here, info); 1105 } 1106 1107 if (patch_code != lir_patch_none) { 1108 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1109 } 1110 } 1111 1112 1113 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1114 assert(src->is_stack(), "should not call otherwise"); 1115 assert(dest->is_register(), "should not call otherwise"); 1116 1117 if (dest->is_single_cpu()) { 1118 if (is_reference_type(type)) { 1119 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1120 __ verify_oop(dest->as_register()); 1121 } else if (type == T_METADATA || type == T_ADDRESS) { 1122 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1123 } else { 1124 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1125 } 1126 1127 } else if (dest->is_double_cpu()) { 1128 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1129 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1130 __ movptr(dest->as_register_lo(), src_addr_LO); 1131 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1132 1133 } else if (dest->is_single_xmm()) { 1134 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1135 __ movflt(dest->as_xmm_float_reg(), src_addr); 1136 1137 } else if (dest->is_double_xmm()) { 1138 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1139 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1140 1141 #ifndef _LP64 1142 } else if (dest->is_single_fpu()) { 1143 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1144 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1145 __ fld_s(src_addr); 1146 1147 } else if (dest->is_double_fpu()) { 1148 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1149 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1150 __ fld_d(src_addr); 1151 #endif // _LP64 1152 1153 } else { 1154 ShouldNotReachHere(); 1155 } 1156 } 1157 1158 1159 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1160 if (src->is_single_stack()) { 1161 if (is_reference_type(type)) { 1162 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1163 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1164 } else { 1165 #ifndef _LP64 1166 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1167 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1168 #else 1169 //no pushl on 64bits 1170 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1171 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1172 #endif 1173 } 1174 1175 } else if (src->is_double_stack()) { 1176 #ifdef _LP64 1177 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1178 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1179 #else 1180 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1181 // push and pop the part at src + wordSize, adding wordSize for the previous push 1182 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1183 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1184 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1185 #endif // _LP64 1186 1187 } else { 1188 ShouldNotReachHere(); 1189 } 1190 } 1191 1192 1193 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 1194 assert(src->is_address(), "should not call otherwise"); 1195 assert(dest->is_register(), "should not call otherwise"); 1196 1197 LIR_Address* addr = src->as_address_ptr(); 1198 Address from_addr = as_Address(addr); 1199 1200 if (addr->base()->type() == T_OBJECT) { 1201 __ verify_oop(addr->base()->as_pointer_register()); 1202 } 1203 1204 switch (type) { 1205 case T_BOOLEAN: // fall through 1206 case T_BYTE: // fall through 1207 case T_CHAR: // fall through 1208 case T_SHORT: 1209 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1210 // on pre P6 processors we may get partial register stalls 1211 // so blow away the value of to_rinfo before loading a 1212 // partial word into it. Do it here so that it precedes 1213 // the potential patch point below. 1214 __ xorptr(dest->as_register(), dest->as_register()); 1215 } 1216 break; 1217 default: 1218 break; 1219 } 1220 1221 PatchingStub* patch = nullptr; 1222 if (patch_code != lir_patch_none) { 1223 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1224 assert(from_addr.disp() != 0, "must have"); 1225 } 1226 if (info != nullptr) { 1227 add_debug_info_for_null_check_here(info); 1228 } 1229 1230 switch (type) { 1231 case T_FLOAT: { 1232 if (dest->is_single_xmm()) { 1233 __ movflt(dest->as_xmm_float_reg(), from_addr); 1234 } else { 1235 #ifndef _LP64 1236 assert(dest->is_single_fpu(), "must be"); 1237 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1238 __ fld_s(from_addr); 1239 #else 1240 ShouldNotReachHere(); 1241 #endif // !LP64 1242 } 1243 break; 1244 } 1245 1246 case T_DOUBLE: { 1247 if (dest->is_double_xmm()) { 1248 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1249 } else { 1250 #ifndef _LP64 1251 assert(dest->is_double_fpu(), "must be"); 1252 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1253 __ fld_d(from_addr); 1254 #else 1255 ShouldNotReachHere(); 1256 #endif // !LP64 1257 } 1258 break; 1259 } 1260 1261 case T_OBJECT: // fall through 1262 case T_ARRAY: // fall through 1263 if (UseCompressedOops && !wide) { 1264 __ movl(dest->as_register(), from_addr); 1265 } else { 1266 __ movptr(dest->as_register(), from_addr); 1267 } 1268 break; 1269 1270 case T_ADDRESS: 1271 __ movptr(dest->as_register(), from_addr); 1272 break; 1273 case T_INT: 1274 __ movl(dest->as_register(), from_addr); 1275 break; 1276 1277 case T_LONG: { 1278 Register to_lo = dest->as_register_lo(); 1279 Register to_hi = dest->as_register_hi(); 1280 #ifdef _LP64 1281 __ movptr(to_lo, as_Address_lo(addr)); 1282 #else 1283 Register base = addr->base()->as_register(); 1284 Register index = noreg; 1285 if (addr->index()->is_register()) { 1286 index = addr->index()->as_register(); 1287 } 1288 if ((base == to_lo && index == to_hi) || 1289 (base == to_hi && index == to_lo)) { 1290 // addresses with 2 registers are only formed as a result of 1291 // array access so this code will never have to deal with 1292 // patches or null checks. 1293 assert(info == nullptr && patch == nullptr, "must be"); 1294 __ lea(to_hi, as_Address(addr)); 1295 __ movl(to_lo, Address(to_hi, 0)); 1296 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1297 } else if (base == to_lo || index == to_lo) { 1298 assert(base != to_hi, "can't be"); 1299 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1300 __ movl(to_hi, as_Address_hi(addr)); 1301 if (patch != nullptr) { 1302 patching_epilog(patch, lir_patch_high, base, info); 1303 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1304 patch_code = lir_patch_low; 1305 } 1306 __ movl(to_lo, as_Address_lo(addr)); 1307 } else { 1308 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1309 __ movl(to_lo, as_Address_lo(addr)); 1310 if (patch != nullptr) { 1311 patching_epilog(patch, lir_patch_low, base, info); 1312 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1313 patch_code = lir_patch_high; 1314 } 1315 __ movl(to_hi, as_Address_hi(addr)); 1316 } 1317 #endif // _LP64 1318 break; 1319 } 1320 1321 case T_BOOLEAN: // fall through 1322 case T_BYTE: { 1323 Register dest_reg = dest->as_register(); 1324 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1325 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1326 __ movsbl(dest_reg, from_addr); 1327 } else { 1328 __ movb(dest_reg, from_addr); 1329 __ shll(dest_reg, 24); 1330 __ sarl(dest_reg, 24); 1331 } 1332 break; 1333 } 1334 1335 case T_CHAR: { 1336 Register dest_reg = dest->as_register(); 1337 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1338 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1339 __ movzwl(dest_reg, from_addr); 1340 } else { 1341 __ movw(dest_reg, from_addr); 1342 } 1343 break; 1344 } 1345 1346 case T_SHORT: { 1347 Register dest_reg = dest->as_register(); 1348 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1349 __ movswl(dest_reg, from_addr); 1350 } else { 1351 __ movw(dest_reg, from_addr); 1352 __ shll(dest_reg, 16); 1353 __ sarl(dest_reg, 16); 1354 } 1355 break; 1356 } 1357 1358 default: 1359 ShouldNotReachHere(); 1360 } 1361 1362 if (patch != nullptr) { 1363 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1364 } 1365 1366 if (is_reference_type(type)) { 1367 #ifdef _LP64 1368 if (UseCompressedOops && !wide) { 1369 __ decode_heap_oop(dest->as_register()); 1370 } 1371 #endif 1372 1373 if (!(UseZGC && !ZGenerational)) { 1374 // Load barrier has not yet been applied, so ZGC can't verify the oop here 1375 __ verify_oop(dest->as_register()); 1376 } 1377 } 1378 } 1379 1380 1381 NEEDS_CLEANUP; // This could be static? 1382 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1383 int elem_size = type2aelembytes(type); 1384 switch (elem_size) { 1385 case 1: return Address::times_1; 1386 case 2: return Address::times_2; 1387 case 4: return Address::times_4; 1388 case 8: return Address::times_8; 1389 } 1390 ShouldNotReachHere(); 1391 return Address::no_scale; 1392 } 1393 1394 1395 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1396 switch (op->code()) { 1397 case lir_idiv: 1398 case lir_irem: 1399 arithmetic_idiv(op->code(), 1400 op->in_opr1(), 1401 op->in_opr2(), 1402 op->in_opr3(), 1403 op->result_opr(), 1404 op->info()); 1405 break; 1406 case lir_fmad: 1407 __ fmad(op->result_opr()->as_xmm_double_reg(), 1408 op->in_opr1()->as_xmm_double_reg(), 1409 op->in_opr2()->as_xmm_double_reg(), 1410 op->in_opr3()->as_xmm_double_reg()); 1411 break; 1412 case lir_fmaf: 1413 __ fmaf(op->result_opr()->as_xmm_float_reg(), 1414 op->in_opr1()->as_xmm_float_reg(), 1415 op->in_opr2()->as_xmm_float_reg(), 1416 op->in_opr3()->as_xmm_float_reg()); 1417 break; 1418 default: ShouldNotReachHere(); break; 1419 } 1420 } 1421 1422 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1423 #ifdef ASSERT 1424 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 1425 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 1426 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 1427 #endif 1428 1429 if (op->cond() == lir_cond_always) { 1430 if (op->info() != nullptr) add_debug_info_for_branch(op->info()); 1431 __ jmp (*(op->label())); 1432 } else { 1433 Assembler::Condition acond = Assembler::zero; 1434 if (op->code() == lir_cond_float_branch) { 1435 assert(op->ublock() != nullptr, "must have unordered successor"); 1436 __ jcc(Assembler::parity, *(op->ublock()->label())); 1437 switch(op->cond()) { 1438 case lir_cond_equal: acond = Assembler::equal; break; 1439 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1440 case lir_cond_less: acond = Assembler::below; break; 1441 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1442 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1443 case lir_cond_greater: acond = Assembler::above; break; 1444 default: ShouldNotReachHere(); 1445 } 1446 } else { 1447 switch (op->cond()) { 1448 case lir_cond_equal: acond = Assembler::equal; break; 1449 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1450 case lir_cond_less: acond = Assembler::less; break; 1451 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1452 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1453 case lir_cond_greater: acond = Assembler::greater; break; 1454 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1455 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1456 default: ShouldNotReachHere(); 1457 } 1458 } 1459 __ jcc(acond,*(op->label())); 1460 } 1461 } 1462 1463 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1464 LIR_Opr src = op->in_opr(); 1465 LIR_Opr dest = op->result_opr(); 1466 1467 switch (op->bytecode()) { 1468 case Bytecodes::_i2l: 1469 #ifdef _LP64 1470 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1471 #else 1472 move_regs(src->as_register(), dest->as_register_lo()); 1473 move_regs(src->as_register(), dest->as_register_hi()); 1474 __ sarl(dest->as_register_hi(), 31); 1475 #endif // LP64 1476 break; 1477 1478 case Bytecodes::_l2i: 1479 #ifdef _LP64 1480 __ movl(dest->as_register(), src->as_register_lo()); 1481 #else 1482 move_regs(src->as_register_lo(), dest->as_register()); 1483 #endif 1484 break; 1485 1486 case Bytecodes::_i2b: 1487 move_regs(src->as_register(), dest->as_register()); 1488 __ sign_extend_byte(dest->as_register()); 1489 break; 1490 1491 case Bytecodes::_i2c: 1492 move_regs(src->as_register(), dest->as_register()); 1493 __ andl(dest->as_register(), 0xFFFF); 1494 break; 1495 1496 case Bytecodes::_i2s: 1497 move_regs(src->as_register(), dest->as_register()); 1498 __ sign_extend_short(dest->as_register()); 1499 break; 1500 1501 1502 #ifdef _LP64 1503 case Bytecodes::_f2d: 1504 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1505 break; 1506 1507 case Bytecodes::_d2f: 1508 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1509 break; 1510 1511 case Bytecodes::_i2f: 1512 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1513 break; 1514 1515 case Bytecodes::_i2d: 1516 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1517 break; 1518 1519 case Bytecodes::_l2f: 1520 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo()); 1521 break; 1522 1523 case Bytecodes::_l2d: 1524 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo()); 1525 break; 1526 1527 case Bytecodes::_f2i: 1528 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg()); 1529 break; 1530 1531 case Bytecodes::_d2i: 1532 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg()); 1533 break; 1534 1535 case Bytecodes::_f2l: 1536 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg()); 1537 break; 1538 1539 case Bytecodes::_d2l: 1540 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg()); 1541 break; 1542 #else 1543 case Bytecodes::_f2d: 1544 case Bytecodes::_d2f: 1545 if (dest->is_single_xmm()) { 1546 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1547 } else if (dest->is_double_xmm()) { 1548 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1549 } else { 1550 assert(src->fpu() == dest->fpu(), "register must be equal"); 1551 // do nothing (float result is rounded later through spilling) 1552 } 1553 break; 1554 1555 case Bytecodes::_i2f: 1556 case Bytecodes::_i2d: 1557 if (dest->is_single_xmm()) { 1558 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1559 } else if (dest->is_double_xmm()) { 1560 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1561 } else { 1562 assert(dest->fpu() == 0, "result must be on TOS"); 1563 __ movl(Address(rsp, 0), src->as_register()); 1564 __ fild_s(Address(rsp, 0)); 1565 } 1566 break; 1567 1568 case Bytecodes::_l2f: 1569 case Bytecodes::_l2d: 1570 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1571 assert(dest->fpu() == 0, "result must be on TOS"); 1572 __ movptr(Address(rsp, 0), src->as_register_lo()); 1573 __ movl(Address(rsp, BytesPerWord), src->as_register_hi()); 1574 __ fild_d(Address(rsp, 0)); 1575 // float result is rounded later through spilling 1576 break; 1577 1578 case Bytecodes::_f2i: 1579 case Bytecodes::_d2i: 1580 if (src->is_single_xmm()) { 1581 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1582 } else if (src->is_double_xmm()) { 1583 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1584 } else { 1585 assert(src->fpu() == 0, "input must be on TOS"); 1586 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc())); 1587 __ fist_s(Address(rsp, 0)); 1588 __ movl(dest->as_register(), Address(rsp, 0)); 1589 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1590 } 1591 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1592 assert(op->stub() != nullptr, "stub required"); 1593 __ cmpl(dest->as_register(), 0x80000000); 1594 __ jcc(Assembler::equal, *op->stub()->entry()); 1595 __ bind(*op->stub()->continuation()); 1596 break; 1597 1598 case Bytecodes::_f2l: 1599 case Bytecodes::_d2l: 1600 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1601 assert(src->fpu() == 0, "input must be on TOS"); 1602 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1603 1604 // instruction sequence too long to inline it here 1605 { 1606 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::fpu2long_stub_id))); 1607 } 1608 break; 1609 #endif // _LP64 1610 1611 default: ShouldNotReachHere(); 1612 } 1613 } 1614 1615 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1616 if (op->init_check()) { 1617 add_debug_info_for_null_check_here(op->stub()->info()); 1618 // init_state needs acquire, but x86 is TSO, and so we are already good. 1619 __ cmpb(Address(op->klass()->as_register(), 1620 InstanceKlass::init_state_offset()), 1621 InstanceKlass::fully_initialized); 1622 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1623 } 1624 __ allocate_object(op->obj()->as_register(), 1625 op->tmp1()->as_register(), 1626 op->tmp2()->as_register(), 1627 op->header_size(), 1628 op->object_size(), 1629 op->klass()->as_register(), 1630 *op->stub()->entry()); 1631 __ bind(*op->stub()->continuation()); 1632 } 1633 1634 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1635 Register len = op->len()->as_register(); 1636 LP64_ONLY( __ movslq(len, len); ) 1637 1638 if (UseSlowPath || op->is_null_free() || 1639 (!UseFastNewObjectArray && is_reference_type(op->type())) || 1640 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 1641 __ jmp(*op->stub()->entry()); 1642 } else { 1643 Register tmp1 = op->tmp1()->as_register(); 1644 Register tmp2 = op->tmp2()->as_register(); 1645 Register tmp3 = op->tmp3()->as_register(); 1646 if (len == tmp1) { 1647 tmp1 = tmp3; 1648 } else if (len == tmp2) { 1649 tmp2 = tmp3; 1650 } else if (len == tmp3) { 1651 // everything is ok 1652 } else { 1653 __ mov(tmp3, len); 1654 } 1655 __ allocate_array(op->obj()->as_register(), 1656 len, 1657 tmp1, 1658 tmp2, 1659 arrayOopDesc::base_offset_in_bytes(op->type()), 1660 array_element_size(op->type()), 1661 op->klass()->as_register(), 1662 *op->stub()->entry(), 1663 op->zero_array()); 1664 } 1665 __ bind(*op->stub()->continuation()); 1666 } 1667 1668 void LIR_Assembler::type_profile_helper(Register mdo, 1669 ciMethodData *md, ciProfileData *data, 1670 Register recv, Label* update_done) { 1671 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1672 Label next_test; 1673 // See if the receiver is receiver[n]. 1674 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1675 __ jccb(Assembler::notEqual, next_test); 1676 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1677 __ addptr(data_addr, DataLayout::counter_increment); 1678 __ jmp(*update_done); 1679 __ bind(next_test); 1680 } 1681 1682 // Didn't find receiver; find next empty slot and fill it in 1683 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1684 Label next_test; 1685 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1686 __ cmpptr(recv_addr, NULL_WORD); 1687 __ jccb(Assembler::notEqual, next_test); 1688 __ movptr(recv_addr, recv); 1689 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1690 __ jmp(*update_done); 1691 __ bind(next_test); 1692 } 1693 } 1694 1695 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1696 // we always need a stub for the failure case. 1697 CodeStub* stub = op->stub(); 1698 Register obj = op->object()->as_register(); 1699 Register k_RInfo = op->tmp1()->as_register(); 1700 Register klass_RInfo = op->tmp2()->as_register(); 1701 Register dst = op->result_opr()->as_register(); 1702 ciKlass* k = op->klass(); 1703 Register Rtmp1 = noreg; 1704 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1705 1706 // check if it needs to be profiled 1707 ciMethodData* md = nullptr; 1708 ciProfileData* data = nullptr; 1709 1710 if (op->should_profile()) { 1711 ciMethod* method = op->profiled_method(); 1712 assert(method != nullptr, "Should have method"); 1713 int bci = op->profiled_bci(); 1714 md = method->method_data_or_null(); 1715 assert(md != nullptr, "Sanity"); 1716 data = md->bci_to_data(bci); 1717 assert(data != nullptr, "need data for type check"); 1718 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1719 } 1720 Label* success_target = success; 1721 Label* failure_target = failure; 1722 1723 if (obj == k_RInfo) { 1724 k_RInfo = dst; 1725 } else if (obj == klass_RInfo) { 1726 klass_RInfo = dst; 1727 } 1728 if (k->is_loaded() && !UseCompressedClassPointers) { 1729 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1730 } else { 1731 Rtmp1 = op->tmp3()->as_register(); 1732 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1733 } 1734 1735 assert_different_registers(obj, k_RInfo, klass_RInfo); 1736 1737 if (op->need_null_check()) { 1738 __ testptr(obj, obj); 1739 if (op->should_profile()) { 1740 Label not_null; 1741 Register mdo = klass_RInfo; 1742 __ mov_metadata(mdo, md->constant_encoding()); 1743 __ jccb(Assembler::notEqual, not_null); 1744 // Object is null; update MDO and exit 1745 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1746 int header_bits = BitData::null_seen_byte_constant(); 1747 __ orb(data_addr, header_bits); 1748 __ jmp(*obj_is_null); 1749 __ bind(not_null); 1750 1751 Label update_done; 1752 Register recv = k_RInfo; 1753 __ load_klass(recv, obj, tmp_load_klass); 1754 type_profile_helper(mdo, md, data, recv, &update_done); 1755 1756 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1757 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment); 1758 1759 __ bind(update_done); 1760 } else { 1761 __ jcc(Assembler::equal, *obj_is_null); 1762 } 1763 } 1764 1765 if (!k->is_loaded()) { 1766 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1767 } else { 1768 #ifdef _LP64 1769 __ mov_metadata(k_RInfo, k->constant_encoding()); 1770 #endif // _LP64 1771 } 1772 __ verify_oop(obj); 1773 1774 if (op->fast_check()) { 1775 // get object class 1776 // not a safepoint as obj null check happens earlier 1777 #ifdef _LP64 1778 if (UseCompressedClassPointers) { 1779 __ load_klass(Rtmp1, obj, tmp_load_klass); 1780 __ cmpptr(k_RInfo, Rtmp1); 1781 } else { 1782 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1783 } 1784 #else 1785 if (k->is_loaded()) { 1786 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1787 } else { 1788 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1789 } 1790 #endif 1791 __ jcc(Assembler::notEqual, *failure_target); 1792 // successful cast, fall through to profile or jump 1793 } else { 1794 // get object class 1795 // not a safepoint as obj null check happens earlier 1796 __ load_klass(klass_RInfo, obj, tmp_load_klass); 1797 if (k->is_loaded()) { 1798 // See if we get an immediate positive hit 1799 #ifdef _LP64 1800 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1801 #else 1802 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1803 #endif // _LP64 1804 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1805 __ jcc(Assembler::notEqual, *failure_target); 1806 // successful cast, fall through to profile or jump 1807 } else { 1808 // See if we get an immediate positive hit 1809 __ jcc(Assembler::equal, *success_target); 1810 // check for self 1811 #ifdef _LP64 1812 __ cmpptr(klass_RInfo, k_RInfo); 1813 #else 1814 __ cmpklass(klass_RInfo, k->constant_encoding()); 1815 #endif // _LP64 1816 __ jcc(Assembler::equal, *success_target); 1817 1818 __ push(klass_RInfo); 1819 #ifdef _LP64 1820 __ push(k_RInfo); 1821 #else 1822 __ pushklass(k->constant_encoding(), noreg); 1823 #endif // _LP64 1824 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1825 __ pop(klass_RInfo); 1826 __ pop(klass_RInfo); 1827 // result is a boolean 1828 __ testl(klass_RInfo, klass_RInfo); 1829 __ jcc(Assembler::equal, *failure_target); 1830 // successful cast, fall through to profile or jump 1831 } 1832 } else { 1833 // perform the fast part of the checking logic 1834 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1835 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1836 __ push(klass_RInfo); 1837 __ push(k_RInfo); 1838 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1839 __ pop(klass_RInfo); 1840 __ pop(k_RInfo); 1841 // result is a boolean 1842 __ testl(k_RInfo, k_RInfo); 1843 __ jcc(Assembler::equal, *failure_target); 1844 // successful cast, fall through to profile or jump 1845 } 1846 } 1847 __ jmp(*success); 1848 } 1849 1850 1851 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1852 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1853 LIR_Code code = op->code(); 1854 if (code == lir_store_check) { 1855 Register value = op->object()->as_register(); 1856 Register array = op->array()->as_register(); 1857 Register k_RInfo = op->tmp1()->as_register(); 1858 Register klass_RInfo = op->tmp2()->as_register(); 1859 Register Rtmp1 = op->tmp3()->as_register(); 1860 1861 CodeStub* stub = op->stub(); 1862 1863 // check if it needs to be profiled 1864 ciMethodData* md = nullptr; 1865 ciProfileData* data = nullptr; 1866 1867 if (op->should_profile()) { 1868 ciMethod* method = op->profiled_method(); 1869 assert(method != nullptr, "Should have method"); 1870 int bci = op->profiled_bci(); 1871 md = method->method_data_or_null(); 1872 assert(md != nullptr, "Sanity"); 1873 data = md->bci_to_data(bci); 1874 assert(data != nullptr, "need data for type check"); 1875 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1876 } 1877 Label done; 1878 Label* success_target = &done; 1879 Label* failure_target = stub->entry(); 1880 1881 __ testptr(value, value); 1882 if (op->should_profile()) { 1883 Label not_null; 1884 Register mdo = klass_RInfo; 1885 __ mov_metadata(mdo, md->constant_encoding()); 1886 __ jccb(Assembler::notEqual, not_null); 1887 // Object is null; update MDO and exit 1888 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1889 int header_bits = BitData::null_seen_byte_constant(); 1890 __ orb(data_addr, header_bits); 1891 __ jmp(done); 1892 __ bind(not_null); 1893 1894 Label update_done; 1895 Register recv = k_RInfo; 1896 __ load_klass(recv, value, tmp_load_klass); 1897 type_profile_helper(mdo, md, data, recv, &update_done); 1898 1899 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1900 __ addptr(counter_addr, DataLayout::counter_increment); 1901 __ bind(update_done); 1902 } else { 1903 __ jcc(Assembler::equal, done); 1904 } 1905 1906 add_debug_info_for_null_check_here(op->info_for_exception()); 1907 __ load_klass(k_RInfo, array, tmp_load_klass); 1908 __ load_klass(klass_RInfo, value, tmp_load_klass); 1909 1910 // get instance klass (it's already uncompressed) 1911 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1912 // perform the fast part of the checking logic 1913 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1914 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1915 __ push(klass_RInfo); 1916 __ push(k_RInfo); 1917 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1918 __ pop(klass_RInfo); 1919 __ pop(k_RInfo); 1920 // result is a boolean 1921 __ testl(k_RInfo, k_RInfo); 1922 __ jcc(Assembler::equal, *failure_target); 1923 // fall through to the success case 1924 1925 __ bind(done); 1926 } else 1927 if (code == lir_checkcast) { 1928 Register obj = op->object()->as_register(); 1929 Register dst = op->result_opr()->as_register(); 1930 Label success; 1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1932 __ bind(success); 1933 if (dst != obj) { 1934 __ mov(dst, obj); 1935 } 1936 } else 1937 if (code == lir_instanceof) { 1938 Register obj = op->object()->as_register(); 1939 Register dst = op->result_opr()->as_register(); 1940 Label success, failure, done; 1941 emit_typecheck_helper(op, &success, &failure, &failure); 1942 __ bind(failure); 1943 __ xorptr(dst, dst); 1944 __ jmpb(done); 1945 __ bind(success); 1946 __ movptr(dst, 1); 1947 __ bind(done); 1948 } else { 1949 ShouldNotReachHere(); 1950 } 1951 1952 } 1953 1954 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) { 1955 // We are loading/storing from/to an array that *may* be a flat array (the 1956 // declared type is Object[], abstract[], interface[] or VT.ref[]). 1957 // If this array is a flat array, take the slow path. 1958 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1959 if (!op->value()->is_illegal()) { 1960 // The array is not a flat array, but it might be null-free. If we are storing 1961 // a null into a null-free array, take the slow path (which will throw NPE). 1962 Label skip; 1963 __ cmpptr(op->value()->as_register(), NULL_WORD); 1964 __ jcc(Assembler::notEqual, skip); 1965 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1966 __ bind(skip); 1967 } 1968 } 1969 1970 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) { 1971 // We are storing into an array that *may* be null-free (the declared type is 1972 // Object[], abstract[], interface[] or VT.ref[]). 1973 Label test_mark_word; 1974 Register tmp = op->tmp()->as_register(); 1975 __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes())); 1976 __ testl(tmp, markWord::unlocked_value); 1977 __ jccb(Assembler::notZero, test_mark_word); 1978 __ load_prototype_header(tmp, op->array()->as_register(), rscratch1); 1979 __ bind(test_mark_word); 1980 __ testl(tmp, markWord::null_free_array_bit_in_place); 1981 } 1982 1983 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) { 1984 Label L_oops_equal; 1985 Label L_oops_not_equal; 1986 Label L_end; 1987 1988 Register left = op->left()->as_register(); 1989 Register right = op->right()->as_register(); 1990 1991 __ cmpptr(left, right); 1992 __ jcc(Assembler::equal, L_oops_equal); 1993 1994 // (1) Null check -- if one of the operands is null, the other must not be null (because 1995 // the two references are not equal), so they are not substitutable, 1996 // FIXME: do null check only if the operand is nullable 1997 __ testptr(left, right); 1998 __ jcc(Assembler::zero, L_oops_not_equal); 1999 2000 ciKlass* left_klass = op->left_klass(); 2001 ciKlass* right_klass = op->right_klass(); 2002 2003 // (2) Inline type check -- if either of the operands is not a inline type, 2004 // they are not substitutable. We do this only if we are not sure that the 2005 // operands are inline type 2006 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node. 2007 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) { 2008 Register tmp1 = op->tmp1()->as_register(); 2009 __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2010 __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes())); 2011 __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes())); 2012 __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2013 __ jcc(Assembler::notEqual, L_oops_not_equal); 2014 } 2015 2016 // (3) Same klass check: if the operands are of different klasses, they are not substitutable. 2017 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) { 2018 // No need to load klass -- the operands are statically known to be the same inline klass. 2019 __ jmp(*op->stub()->entry()); 2020 } else { 2021 Register left_klass_op = op->left_klass_op()->as_register(); 2022 Register right_klass_op = op->right_klass_op()->as_register(); 2023 2024 if (UseCompressedClassPointers) { 2025 __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2026 __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2027 __ cmpl(left_klass_op, right_klass_op); 2028 } else { 2029 __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2030 __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2031 __ cmpptr(left_klass_op, right_klass_op); 2032 } 2033 2034 __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check 2035 // fall through to L_oops_not_equal 2036 } 2037 2038 __ bind(L_oops_not_equal); 2039 move(op->not_equal_result(), op->result_opr()); 2040 __ jmp(L_end); 2041 2042 __ bind(L_oops_equal); 2043 move(op->equal_result(), op->result_opr()); 2044 __ jmp(L_end); 2045 2046 // We've returned from the stub. RAX contains 0x0 IFF the two 2047 // operands are not substitutable. (Don't compare against 0x1 in case the 2048 // C compiler is naughty) 2049 __ bind(*op->stub()->continuation()); 2050 __ cmpl(rax, 0); 2051 __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal 2052 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal 2053 // fall-through 2054 __ bind(L_end); 2055 } 2056 2057 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2058 if (LP64_ONLY(false &&) op->code() == lir_cas_long) { 2059 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 2060 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 2061 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 2062 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 2063 Register addr = op->addr()->as_register(); 2064 __ lock(); 2065 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 2066 2067 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 2068 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 2069 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2070 Register newval = op->new_value()->as_register(); 2071 Register cmpval = op->cmp_value()->as_register(); 2072 assert(cmpval == rax, "wrong register"); 2073 assert(newval != noreg, "new val must be register"); 2074 assert(cmpval != newval, "cmp and new values must be in different registers"); 2075 assert(cmpval != addr, "cmp and addr must be in different registers"); 2076 assert(newval != addr, "new value and addr must be in different registers"); 2077 2078 if ( op->code() == lir_cas_obj) { 2079 #ifdef _LP64 2080 if (UseCompressedOops) { 2081 __ encode_heap_oop(cmpval); 2082 __ mov(rscratch1, newval); 2083 __ encode_heap_oop(rscratch1); 2084 __ lock(); 2085 // cmpval (rax) is implicitly used by this instruction 2086 __ cmpxchgl(rscratch1, Address(addr, 0)); 2087 } else 2088 #endif 2089 { 2090 __ lock(); 2091 __ cmpxchgptr(newval, Address(addr, 0)); 2092 } 2093 } else { 2094 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2095 __ lock(); 2096 __ cmpxchgl(newval, Address(addr, 0)); 2097 } 2098 #ifdef _LP64 2099 } else if (op->code() == lir_cas_long) { 2100 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2101 Register newval = op->new_value()->as_register_lo(); 2102 Register cmpval = op->cmp_value()->as_register_lo(); 2103 assert(cmpval == rax, "wrong register"); 2104 assert(newval != noreg, "new val must be register"); 2105 assert(cmpval != newval, "cmp and new values must be in different registers"); 2106 assert(cmpval != addr, "cmp and addr must be in different registers"); 2107 assert(newval != addr, "new value and addr must be in different registers"); 2108 __ lock(); 2109 __ cmpxchgq(newval, Address(addr, 0)); 2110 #endif // _LP64 2111 } else { 2112 Unimplemented(); 2113 } 2114 } 2115 2116 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) { 2117 assert(dst->is_cpu_register(), "must be"); 2118 assert(dst->type() == src->type(), "must be"); 2119 2120 if (src->is_cpu_register()) { 2121 reg2reg(src, dst); 2122 } else if (src->is_stack()) { 2123 stack2reg(src, dst, dst->type()); 2124 } else if (src->is_constant()) { 2125 const2reg(src, dst, lir_patch_none, nullptr); 2126 } else { 2127 ShouldNotReachHere(); 2128 } 2129 } 2130 2131 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 2132 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 2133 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86"); 2134 2135 Assembler::Condition acond, ncond; 2136 switch (condition) { 2137 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2138 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2139 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2140 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2141 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2142 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2143 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2144 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2145 default: acond = Assembler::equal; ncond = Assembler::notEqual; 2146 ShouldNotReachHere(); 2147 } 2148 2149 if (opr1->is_cpu_register()) { 2150 reg2reg(opr1, result); 2151 } else if (opr1->is_stack()) { 2152 stack2reg(opr1, result, result->type()); 2153 } else if (opr1->is_constant()) { 2154 const2reg(opr1, result, lir_patch_none, nullptr); 2155 } else { 2156 ShouldNotReachHere(); 2157 } 2158 2159 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2160 // optimized version that does not require a branch 2161 if (opr2->is_single_cpu()) { 2162 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2163 __ cmov(ncond, result->as_register(), opr2->as_register()); 2164 } else if (opr2->is_double_cpu()) { 2165 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2166 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2167 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2168 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2169 } else if (opr2->is_single_stack()) { 2170 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2171 } else if (opr2->is_double_stack()) { 2172 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2173 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2174 } else { 2175 ShouldNotReachHere(); 2176 } 2177 2178 } else { 2179 Label skip; 2180 __ jccb(acond, skip); 2181 if (opr2->is_cpu_register()) { 2182 reg2reg(opr2, result); 2183 } else if (opr2->is_stack()) { 2184 stack2reg(opr2, result, result->type()); 2185 } else if (opr2->is_constant()) { 2186 const2reg(opr2, result, lir_patch_none, nullptr); 2187 } else { 2188 ShouldNotReachHere(); 2189 } 2190 __ bind(skip); 2191 } 2192 } 2193 2194 2195 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2196 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2197 2198 if (left->is_single_cpu()) { 2199 assert(left == dest, "left and dest must be equal"); 2200 Register lreg = left->as_register(); 2201 2202 if (right->is_single_cpu()) { 2203 // cpu register - cpu register 2204 Register rreg = right->as_register(); 2205 switch (code) { 2206 case lir_add: __ addl (lreg, rreg); break; 2207 case lir_sub: __ subl (lreg, rreg); break; 2208 case lir_mul: __ imull(lreg, rreg); break; 2209 default: ShouldNotReachHere(); 2210 } 2211 2212 } else if (right->is_stack()) { 2213 // cpu register - stack 2214 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2215 switch (code) { 2216 case lir_add: __ addl(lreg, raddr); break; 2217 case lir_sub: __ subl(lreg, raddr); break; 2218 default: ShouldNotReachHere(); 2219 } 2220 2221 } else if (right->is_constant()) { 2222 // cpu register - constant 2223 jint c = right->as_constant_ptr()->as_jint(); 2224 switch (code) { 2225 case lir_add: { 2226 __ incrementl(lreg, c); 2227 break; 2228 } 2229 case lir_sub: { 2230 __ decrementl(lreg, c); 2231 break; 2232 } 2233 default: ShouldNotReachHere(); 2234 } 2235 2236 } else { 2237 ShouldNotReachHere(); 2238 } 2239 2240 } else if (left->is_double_cpu()) { 2241 assert(left == dest, "left and dest must be equal"); 2242 Register lreg_lo = left->as_register_lo(); 2243 Register lreg_hi = left->as_register_hi(); 2244 2245 if (right->is_double_cpu()) { 2246 // cpu register - cpu register 2247 Register rreg_lo = right->as_register_lo(); 2248 Register rreg_hi = right->as_register_hi(); 2249 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2250 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2251 switch (code) { 2252 case lir_add: 2253 __ addptr(lreg_lo, rreg_lo); 2254 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2255 break; 2256 case lir_sub: 2257 __ subptr(lreg_lo, rreg_lo); 2258 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2259 break; 2260 case lir_mul: 2261 #ifdef _LP64 2262 __ imulq(lreg_lo, rreg_lo); 2263 #else 2264 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2265 __ imull(lreg_hi, rreg_lo); 2266 __ imull(rreg_hi, lreg_lo); 2267 __ addl (rreg_hi, lreg_hi); 2268 __ mull (rreg_lo); 2269 __ addl (lreg_hi, rreg_hi); 2270 #endif // _LP64 2271 break; 2272 default: 2273 ShouldNotReachHere(); 2274 } 2275 2276 } else if (right->is_constant()) { 2277 // cpu register - constant 2278 #ifdef _LP64 2279 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2280 __ movptr(r10, (intptr_t) c); 2281 switch (code) { 2282 case lir_add: 2283 __ addptr(lreg_lo, r10); 2284 break; 2285 case lir_sub: 2286 __ subptr(lreg_lo, r10); 2287 break; 2288 default: 2289 ShouldNotReachHere(); 2290 } 2291 #else 2292 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2293 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2294 switch (code) { 2295 case lir_add: 2296 __ addptr(lreg_lo, c_lo); 2297 __ adcl(lreg_hi, c_hi); 2298 break; 2299 case lir_sub: 2300 __ subptr(lreg_lo, c_lo); 2301 __ sbbl(lreg_hi, c_hi); 2302 break; 2303 default: 2304 ShouldNotReachHere(); 2305 } 2306 #endif // _LP64 2307 2308 } else { 2309 ShouldNotReachHere(); 2310 } 2311 2312 } else if (left->is_single_xmm()) { 2313 assert(left == dest, "left and dest must be equal"); 2314 XMMRegister lreg = left->as_xmm_float_reg(); 2315 2316 if (right->is_single_xmm()) { 2317 XMMRegister rreg = right->as_xmm_float_reg(); 2318 switch (code) { 2319 case lir_add: __ addss(lreg, rreg); break; 2320 case lir_sub: __ subss(lreg, rreg); break; 2321 case lir_mul: __ mulss(lreg, rreg); break; 2322 case lir_div: __ divss(lreg, rreg); break; 2323 default: ShouldNotReachHere(); 2324 } 2325 } else { 2326 Address raddr; 2327 if (right->is_single_stack()) { 2328 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2329 } else if (right->is_constant()) { 2330 // hack for now 2331 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2332 } else { 2333 ShouldNotReachHere(); 2334 } 2335 switch (code) { 2336 case lir_add: __ addss(lreg, raddr); break; 2337 case lir_sub: __ subss(lreg, raddr); break; 2338 case lir_mul: __ mulss(lreg, raddr); break; 2339 case lir_div: __ divss(lreg, raddr); break; 2340 default: ShouldNotReachHere(); 2341 } 2342 } 2343 2344 } else if (left->is_double_xmm()) { 2345 assert(left == dest, "left and dest must be equal"); 2346 2347 XMMRegister lreg = left->as_xmm_double_reg(); 2348 if (right->is_double_xmm()) { 2349 XMMRegister rreg = right->as_xmm_double_reg(); 2350 switch (code) { 2351 case lir_add: __ addsd(lreg, rreg); break; 2352 case lir_sub: __ subsd(lreg, rreg); break; 2353 case lir_mul: __ mulsd(lreg, rreg); break; 2354 case lir_div: __ divsd(lreg, rreg); break; 2355 default: ShouldNotReachHere(); 2356 } 2357 } else { 2358 Address raddr; 2359 if (right->is_double_stack()) { 2360 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2361 } else if (right->is_constant()) { 2362 // hack for now 2363 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2364 } else { 2365 ShouldNotReachHere(); 2366 } 2367 switch (code) { 2368 case lir_add: __ addsd(lreg, raddr); break; 2369 case lir_sub: __ subsd(lreg, raddr); break; 2370 case lir_mul: __ mulsd(lreg, raddr); break; 2371 case lir_div: __ divsd(lreg, raddr); break; 2372 default: ShouldNotReachHere(); 2373 } 2374 } 2375 2376 #ifndef _LP64 2377 } else if (left->is_single_fpu()) { 2378 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2379 2380 if (right->is_single_fpu()) { 2381 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2382 2383 } else { 2384 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2385 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2386 2387 Address raddr; 2388 if (right->is_single_stack()) { 2389 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2390 } else if (right->is_constant()) { 2391 address const_addr = float_constant(right->as_jfloat()); 2392 assert(const_addr != nullptr, "incorrect float/double constant maintenance"); 2393 // hack for now 2394 raddr = __ as_Address(InternalAddress(const_addr)); 2395 } else { 2396 ShouldNotReachHere(); 2397 } 2398 2399 switch (code) { 2400 case lir_add: __ fadd_s(raddr); break; 2401 case lir_sub: __ fsub_s(raddr); break; 2402 case lir_mul: __ fmul_s(raddr); break; 2403 case lir_div: __ fdiv_s(raddr); break; 2404 default: ShouldNotReachHere(); 2405 } 2406 } 2407 2408 } else if (left->is_double_fpu()) { 2409 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2410 2411 if (code == lir_mul || code == lir_div) { 2412 // Double values require special handling for strictfp mul/div on x86 2413 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1())); 2414 __ fmulp(left->fpu_regnrLo() + 1); 2415 } 2416 2417 if (right->is_double_fpu()) { 2418 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2419 2420 } else { 2421 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2422 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2423 2424 Address raddr; 2425 if (right->is_double_stack()) { 2426 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2427 } else if (right->is_constant()) { 2428 // hack for now 2429 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2430 } else { 2431 ShouldNotReachHere(); 2432 } 2433 2434 switch (code) { 2435 case lir_add: __ fadd_d(raddr); break; 2436 case lir_sub: __ fsub_d(raddr); break; 2437 case lir_mul: __ fmul_d(raddr); break; 2438 case lir_div: __ fdiv_d(raddr); break; 2439 default: ShouldNotReachHere(); 2440 } 2441 } 2442 2443 if (code == lir_mul || code == lir_div) { 2444 // Double values require special handling for strictfp mul/div on x86 2445 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2())); 2446 __ fmulp(dest->fpu_regnrLo() + 1); 2447 } 2448 #endif // !_LP64 2449 2450 } else if (left->is_single_stack() || left->is_address()) { 2451 assert(left == dest, "left and dest must be equal"); 2452 2453 Address laddr; 2454 if (left->is_single_stack()) { 2455 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2456 } else if (left->is_address()) { 2457 laddr = as_Address(left->as_address_ptr()); 2458 } else { 2459 ShouldNotReachHere(); 2460 } 2461 2462 if (right->is_single_cpu()) { 2463 Register rreg = right->as_register(); 2464 switch (code) { 2465 case lir_add: __ addl(laddr, rreg); break; 2466 case lir_sub: __ subl(laddr, rreg); break; 2467 default: ShouldNotReachHere(); 2468 } 2469 } else if (right->is_constant()) { 2470 jint c = right->as_constant_ptr()->as_jint(); 2471 switch (code) { 2472 case lir_add: { 2473 __ incrementl(laddr, c); 2474 break; 2475 } 2476 case lir_sub: { 2477 __ decrementl(laddr, c); 2478 break; 2479 } 2480 default: ShouldNotReachHere(); 2481 } 2482 } else { 2483 ShouldNotReachHere(); 2484 } 2485 2486 } else { 2487 ShouldNotReachHere(); 2488 } 2489 } 2490 2491 #ifndef _LP64 2492 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2493 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2494 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2495 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2496 2497 bool left_is_tos = (left_index == 0); 2498 bool dest_is_tos = (dest_index == 0); 2499 int non_tos_index = (left_is_tos ? right_index : left_index); 2500 2501 switch (code) { 2502 case lir_add: 2503 if (pop_fpu_stack) __ faddp(non_tos_index); 2504 else if (dest_is_tos) __ fadd (non_tos_index); 2505 else __ fadda(non_tos_index); 2506 break; 2507 2508 case lir_sub: 2509 if (left_is_tos) { 2510 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2511 else if (dest_is_tos) __ fsub (non_tos_index); 2512 else __ fsubra(non_tos_index); 2513 } else { 2514 if (pop_fpu_stack) __ fsubp (non_tos_index); 2515 else if (dest_is_tos) __ fsubr (non_tos_index); 2516 else __ fsuba (non_tos_index); 2517 } 2518 break; 2519 2520 case lir_mul: 2521 if (pop_fpu_stack) __ fmulp(non_tos_index); 2522 else if (dest_is_tos) __ fmul (non_tos_index); 2523 else __ fmula(non_tos_index); 2524 break; 2525 2526 case lir_div: 2527 if (left_is_tos) { 2528 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2529 else if (dest_is_tos) __ fdiv (non_tos_index); 2530 else __ fdivra(non_tos_index); 2531 } else { 2532 if (pop_fpu_stack) __ fdivp (non_tos_index); 2533 else if (dest_is_tos) __ fdivr (non_tos_index); 2534 else __ fdiva (non_tos_index); 2535 } 2536 break; 2537 2538 case lir_rem: 2539 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2540 __ fremr(noreg); 2541 break; 2542 2543 default: 2544 ShouldNotReachHere(); 2545 } 2546 } 2547 #endif // _LP64 2548 2549 2550 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) { 2551 if (value->is_double_xmm()) { 2552 switch(code) { 2553 case lir_abs : 2554 { 2555 #ifdef _LP64 2556 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 2557 assert(tmp->is_valid(), "need temporary"); 2558 __ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2); 2559 } else 2560 #endif 2561 { 2562 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2563 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2564 } 2565 assert(!tmp->is_valid(), "do not need temporary"); 2566 __ andpd(dest->as_xmm_double_reg(), 2567 ExternalAddress((address)double_signmask_pool), 2568 rscratch1); 2569 } 2570 } 2571 break; 2572 2573 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2574 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2575 default : ShouldNotReachHere(); 2576 } 2577 2578 #ifndef _LP64 2579 } else if (value->is_double_fpu()) { 2580 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2581 switch(code) { 2582 case lir_abs : __ fabs() ; break; 2583 case lir_sqrt : __ fsqrt(); break; 2584 default : ShouldNotReachHere(); 2585 } 2586 #endif // !_LP64 2587 } else if (code == lir_f2hf) { 2588 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 2589 } else if (code == lir_hf2f) { 2590 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register()); 2591 } else { 2592 Unimplemented(); 2593 } 2594 } 2595 2596 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2597 // assert(left->destroys_register(), "check"); 2598 if (left->is_single_cpu()) { 2599 Register reg = left->as_register(); 2600 if (right->is_constant()) { 2601 int val = right->as_constant_ptr()->as_jint(); 2602 switch (code) { 2603 case lir_logic_and: __ andl (reg, val); break; 2604 case lir_logic_or: __ orl (reg, val); break; 2605 case lir_logic_xor: __ xorl (reg, val); break; 2606 default: ShouldNotReachHere(); 2607 } 2608 } else if (right->is_stack()) { 2609 // added support for stack operands 2610 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2611 switch (code) { 2612 case lir_logic_and: __ andl (reg, raddr); break; 2613 case lir_logic_or: __ orl (reg, raddr); break; 2614 case lir_logic_xor: __ xorl (reg, raddr); break; 2615 default: ShouldNotReachHere(); 2616 } 2617 } else { 2618 Register rright = right->as_register(); 2619 switch (code) { 2620 case lir_logic_and: __ andptr (reg, rright); break; 2621 case lir_logic_or : __ orptr (reg, rright); break; 2622 case lir_logic_xor: __ xorptr (reg, rright); break; 2623 default: ShouldNotReachHere(); 2624 } 2625 } 2626 move_regs(reg, dst->as_register()); 2627 } else { 2628 Register l_lo = left->as_register_lo(); 2629 Register l_hi = left->as_register_hi(); 2630 if (right->is_constant()) { 2631 #ifdef _LP64 2632 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2633 switch (code) { 2634 case lir_logic_and: 2635 __ andq(l_lo, rscratch1); 2636 break; 2637 case lir_logic_or: 2638 __ orq(l_lo, rscratch1); 2639 break; 2640 case lir_logic_xor: 2641 __ xorq(l_lo, rscratch1); 2642 break; 2643 default: ShouldNotReachHere(); 2644 } 2645 #else 2646 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2647 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2648 switch (code) { 2649 case lir_logic_and: 2650 __ andl(l_lo, r_lo); 2651 __ andl(l_hi, r_hi); 2652 break; 2653 case lir_logic_or: 2654 __ orl(l_lo, r_lo); 2655 __ orl(l_hi, r_hi); 2656 break; 2657 case lir_logic_xor: 2658 __ xorl(l_lo, r_lo); 2659 __ xorl(l_hi, r_hi); 2660 break; 2661 default: ShouldNotReachHere(); 2662 } 2663 #endif // _LP64 2664 } else { 2665 #ifdef _LP64 2666 Register r_lo; 2667 if (is_reference_type(right->type())) { 2668 r_lo = right->as_register(); 2669 } else { 2670 r_lo = right->as_register_lo(); 2671 } 2672 #else 2673 Register r_lo = right->as_register_lo(); 2674 Register r_hi = right->as_register_hi(); 2675 assert(l_lo != r_hi, "overwriting registers"); 2676 #endif 2677 switch (code) { 2678 case lir_logic_and: 2679 __ andptr(l_lo, r_lo); 2680 NOT_LP64(__ andptr(l_hi, r_hi);) 2681 break; 2682 case lir_logic_or: 2683 __ orptr(l_lo, r_lo); 2684 NOT_LP64(__ orptr(l_hi, r_hi);) 2685 break; 2686 case lir_logic_xor: 2687 __ xorptr(l_lo, r_lo); 2688 NOT_LP64(__ xorptr(l_hi, r_hi);) 2689 break; 2690 default: ShouldNotReachHere(); 2691 } 2692 } 2693 2694 Register dst_lo = dst->as_register_lo(); 2695 Register dst_hi = dst->as_register_hi(); 2696 2697 #ifdef _LP64 2698 move_regs(l_lo, dst_lo); 2699 #else 2700 if (dst_lo == l_hi) { 2701 assert(dst_hi != l_lo, "overwriting registers"); 2702 move_regs(l_hi, dst_hi); 2703 move_regs(l_lo, dst_lo); 2704 } else { 2705 assert(dst_lo != l_hi, "overwriting registers"); 2706 move_regs(l_lo, dst_lo); 2707 move_regs(l_hi, dst_hi); 2708 } 2709 #endif // _LP64 2710 } 2711 } 2712 2713 2714 // we assume that rax, and rdx can be overwritten 2715 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2716 2717 assert(left->is_single_cpu(), "left must be register"); 2718 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2719 assert(result->is_single_cpu(), "result must be register"); 2720 2721 // assert(left->destroys_register(), "check"); 2722 // assert(right->destroys_register(), "check"); 2723 2724 Register lreg = left->as_register(); 2725 Register dreg = result->as_register(); 2726 2727 if (right->is_constant()) { 2728 jint divisor = right->as_constant_ptr()->as_jint(); 2729 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2730 if (code == lir_idiv) { 2731 assert(lreg == rax, "must be rax,"); 2732 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2733 __ cdql(); // sign extend into rdx:rax 2734 if (divisor == 2) { 2735 __ subl(lreg, rdx); 2736 } else { 2737 __ andl(rdx, divisor - 1); 2738 __ addl(lreg, rdx); 2739 } 2740 __ sarl(lreg, log2i_exact(divisor)); 2741 move_regs(lreg, dreg); 2742 } else if (code == lir_irem) { 2743 Label done; 2744 __ mov(dreg, lreg); 2745 __ andl(dreg, 0x80000000 | (divisor - 1)); 2746 __ jcc(Assembler::positive, done); 2747 __ decrement(dreg); 2748 __ orl(dreg, ~(divisor - 1)); 2749 __ increment(dreg); 2750 __ bind(done); 2751 } else { 2752 ShouldNotReachHere(); 2753 } 2754 } else { 2755 Register rreg = right->as_register(); 2756 assert(lreg == rax, "left register must be rax,"); 2757 assert(rreg != rdx, "right register must not be rdx"); 2758 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2759 2760 move_regs(lreg, rax); 2761 2762 int idivl_offset = __ corrected_idivl(rreg); 2763 if (ImplicitDiv0Checks) { 2764 add_debug_info_for_div0(idivl_offset, info); 2765 } 2766 if (code == lir_irem) { 2767 move_regs(rdx, dreg); // result is in rdx 2768 } else { 2769 move_regs(rax, dreg); 2770 } 2771 } 2772 } 2773 2774 2775 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2776 if (opr1->is_single_cpu()) { 2777 Register reg1 = opr1->as_register(); 2778 if (opr2->is_single_cpu()) { 2779 // cpu register - cpu register 2780 if (is_reference_type(opr1->type())) { 2781 __ cmpoop(reg1, opr2->as_register()); 2782 } else { 2783 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 2784 __ cmpl(reg1, opr2->as_register()); 2785 } 2786 } else if (opr2->is_stack()) { 2787 // cpu register - stack 2788 if (is_reference_type(opr1->type())) { 2789 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2790 } else { 2791 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2792 } 2793 } else if (opr2->is_constant()) { 2794 // cpu register - constant 2795 LIR_Const* c = opr2->as_constant_ptr(); 2796 if (c->type() == T_INT) { 2797 jint i = c->as_jint(); 2798 if (i == 0) { 2799 __ testl(reg1, reg1); 2800 } else { 2801 __ cmpl(reg1, i); 2802 } 2803 } else if (c->type() == T_METADATA) { 2804 // All we need for now is a comparison with null for equality. 2805 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 2806 Metadata* m = c->as_metadata(); 2807 if (m == nullptr) { 2808 __ testptr(reg1, reg1); 2809 } else { 2810 ShouldNotReachHere(); 2811 } 2812 } else if (is_reference_type(c->type())) { 2813 // In 64bit oops are single register 2814 jobject o = c->as_jobject(); 2815 if (o == nullptr) { 2816 __ testptr(reg1, reg1); 2817 } else { 2818 __ cmpoop(reg1, o, rscratch1); 2819 } 2820 } else { 2821 fatal("unexpected type: %s", basictype_to_str(c->type())); 2822 } 2823 // cpu register - address 2824 } else if (opr2->is_address()) { 2825 if (op->info() != nullptr) { 2826 add_debug_info_for_null_check_here(op->info()); 2827 } 2828 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2829 } else { 2830 ShouldNotReachHere(); 2831 } 2832 2833 } else if(opr1->is_double_cpu()) { 2834 Register xlo = opr1->as_register_lo(); 2835 Register xhi = opr1->as_register_hi(); 2836 if (opr2->is_double_cpu()) { 2837 #ifdef _LP64 2838 __ cmpptr(xlo, opr2->as_register_lo()); 2839 #else 2840 // cpu register - cpu register 2841 Register ylo = opr2->as_register_lo(); 2842 Register yhi = opr2->as_register_hi(); 2843 __ subl(xlo, ylo); 2844 __ sbbl(xhi, yhi); 2845 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2846 __ orl(xhi, xlo); 2847 } 2848 #endif // _LP64 2849 } else if (opr2->is_constant()) { 2850 // cpu register - constant 0 2851 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2852 #ifdef _LP64 2853 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2854 #else 2855 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2856 __ orl(xhi, xlo); 2857 #endif // _LP64 2858 } else { 2859 ShouldNotReachHere(); 2860 } 2861 2862 } else if (opr1->is_single_xmm()) { 2863 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2864 if (opr2->is_single_xmm()) { 2865 // xmm register - xmm register 2866 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2867 } else if (opr2->is_stack()) { 2868 // xmm register - stack 2869 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2870 } else if (opr2->is_constant()) { 2871 // xmm register - constant 2872 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2873 } else if (opr2->is_address()) { 2874 // xmm register - address 2875 if (op->info() != nullptr) { 2876 add_debug_info_for_null_check_here(op->info()); 2877 } 2878 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2879 } else { 2880 ShouldNotReachHere(); 2881 } 2882 2883 } else if (opr1->is_double_xmm()) { 2884 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2885 if (opr2->is_double_xmm()) { 2886 // xmm register - xmm register 2887 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2888 } else if (opr2->is_stack()) { 2889 // xmm register - stack 2890 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2891 } else if (opr2->is_constant()) { 2892 // xmm register - constant 2893 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2894 } else if (opr2->is_address()) { 2895 // xmm register - address 2896 if (op->info() != nullptr) { 2897 add_debug_info_for_null_check_here(op->info()); 2898 } 2899 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2900 } else { 2901 ShouldNotReachHere(); 2902 } 2903 2904 #ifndef _LP64 2905 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2906 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2907 assert(opr2->is_fpu_register(), "both must be registers"); 2908 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2909 #endif // LP64 2910 2911 } else if (opr1->is_address() && opr2->is_constant()) { 2912 LIR_Const* c = opr2->as_constant_ptr(); 2913 #ifdef _LP64 2914 if (is_reference_type(c->type())) { 2915 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2916 __ movoop(rscratch1, c->as_jobject()); 2917 } 2918 #endif // LP64 2919 if (op->info() != nullptr) { 2920 add_debug_info_for_null_check_here(op->info()); 2921 } 2922 // special case: address - constant 2923 LIR_Address* addr = opr1->as_address_ptr(); 2924 if (c->type() == T_INT) { 2925 __ cmpl(as_Address(addr), c->as_jint()); 2926 } else if (is_reference_type(c->type())) { 2927 #ifdef _LP64 2928 // %%% Make this explode if addr isn't reachable until we figure out a 2929 // better strategy by giving noreg as the temp for as_Address 2930 __ cmpoop(rscratch1, as_Address(addr, noreg)); 2931 #else 2932 __ cmpoop(as_Address(addr), c->as_jobject()); 2933 #endif // _LP64 2934 } else { 2935 ShouldNotReachHere(); 2936 } 2937 2938 } else { 2939 ShouldNotReachHere(); 2940 } 2941 } 2942 2943 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2944 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2945 if (left->is_single_xmm()) { 2946 assert(right->is_single_xmm(), "must match"); 2947 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2948 } else if (left->is_double_xmm()) { 2949 assert(right->is_double_xmm(), "must match"); 2950 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2951 2952 } else { 2953 #ifdef _LP64 2954 ShouldNotReachHere(); 2955 #else 2956 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2957 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2958 2959 assert(left->fpu() == 0, "left must be on TOS"); 2960 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2961 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2962 #endif // LP64 2963 } 2964 } else { 2965 assert(code == lir_cmp_l2i, "check"); 2966 #ifdef _LP64 2967 Label done; 2968 Register dest = dst->as_register(); 2969 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2970 __ movl(dest, -1); 2971 __ jccb(Assembler::less, done); 2972 __ setb(Assembler::notZero, dest); 2973 __ movzbl(dest, dest); 2974 __ bind(done); 2975 #else 2976 __ lcmp2int(left->as_register_hi(), 2977 left->as_register_lo(), 2978 right->as_register_hi(), 2979 right->as_register_lo()); 2980 move_regs(left->as_register_hi(), dst->as_register()); 2981 #endif // _LP64 2982 } 2983 } 2984 2985 2986 void LIR_Assembler::align_call(LIR_Code code) { 2987 // make sure that the displacement word of the call ends up word aligned 2988 int offset = __ offset(); 2989 switch (code) { 2990 case lir_static_call: 2991 case lir_optvirtual_call: 2992 case lir_dynamic_call: 2993 offset += NativeCall::displacement_offset; 2994 break; 2995 case lir_icvirtual_call: 2996 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex; 2997 break; 2998 default: ShouldNotReachHere(); 2999 } 3000 __ align(BytesPerWord, offset); 3001 } 3002 3003 3004 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 3005 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 3006 "must be aligned"); 3007 __ call(AddressLiteral(op->addr(), rtype)); 3008 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3009 __ post_call_nop(); 3010 } 3011 3012 3013 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 3014 __ ic_call(op->addr()); 3015 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3016 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 3017 "must be aligned"); 3018 __ post_call_nop(); 3019 } 3020 3021 3022 void LIR_Assembler::emit_static_call_stub() { 3023 address call_pc = __ pc(); 3024 address stub = __ start_a_stub(call_stub_size()); 3025 if (stub == nullptr) { 3026 bailout("static call stub overflow"); 3027 return; 3028 } 3029 3030 int start = __ offset(); 3031 3032 // make sure that the displacement word of the call ends up word aligned 3033 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset); 3034 __ relocate(static_stub_Relocation::spec(call_pc)); 3035 __ mov_metadata(rbx, (Metadata*)nullptr); 3036 // must be set to -1 at code generation time 3037 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); 3038 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 3039 __ jump(RuntimeAddress(__ pc())); 3040 3041 assert(__ offset() - start <= call_stub_size(), "stub too big"); 3042 __ end_a_stub(); 3043 } 3044 3045 3046 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 3047 assert(exceptionOop->as_register() == rax, "must match"); 3048 assert(exceptionPC->as_register() == rdx, "must match"); 3049 3050 // exception object is not added to oop map by LinearScan 3051 // (LinearScan assumes that no oops are in fixed registers) 3052 info->add_register_oop(exceptionOop); 3053 C1StubId unwind_id; 3054 3055 // get current pc information 3056 // pc is only needed if the method has an exception handler, the unwind code does not need it. 3057 int pc_for_athrow_offset = __ offset(); 3058 InternalAddress pc_for_athrow(__ pc()); 3059 __ lea(exceptionPC->as_register(), pc_for_athrow); 3060 add_call_info(pc_for_athrow_offset, info); // for exception handler 3061 3062 __ verify_not_null_oop(rax); 3063 // search an exception handler (rax: exception oop, rdx: throwing pc) 3064 if (compilation()->has_fpu_code()) { 3065 unwind_id = C1StubId::handle_exception_id; 3066 } else { 3067 unwind_id = C1StubId::handle_exception_nofpu_id; 3068 } 3069 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 3070 3071 // enough room for two byte trap 3072 __ nop(); 3073 } 3074 3075 3076 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 3077 assert(exceptionOop->as_register() == rax, "must match"); 3078 3079 __ jmp(_unwind_handler_entry); 3080 } 3081 3082 3083 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 3084 3085 // optimized version for linear scan: 3086 // * count must be already in ECX (guaranteed by LinearScan) 3087 // * left and dest must be equal 3088 // * tmp must be unused 3089 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 3090 assert(left == dest, "left and dest must be equal"); 3091 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3092 3093 if (left->is_single_cpu()) { 3094 Register value = left->as_register(); 3095 assert(value != SHIFT_count, "left cannot be ECX"); 3096 3097 switch (code) { 3098 case lir_shl: __ shll(value); break; 3099 case lir_shr: __ sarl(value); break; 3100 case lir_ushr: __ shrl(value); break; 3101 default: ShouldNotReachHere(); 3102 } 3103 } else if (left->is_double_cpu()) { 3104 Register lo = left->as_register_lo(); 3105 Register hi = left->as_register_hi(); 3106 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3107 #ifdef _LP64 3108 switch (code) { 3109 case lir_shl: __ shlptr(lo); break; 3110 case lir_shr: __ sarptr(lo); break; 3111 case lir_ushr: __ shrptr(lo); break; 3112 default: ShouldNotReachHere(); 3113 } 3114 #else 3115 3116 switch (code) { 3117 case lir_shl: __ lshl(hi, lo); break; 3118 case lir_shr: __ lshr(hi, lo, true); break; 3119 case lir_ushr: __ lshr(hi, lo, false); break; 3120 default: ShouldNotReachHere(); 3121 } 3122 #endif // LP64 3123 } else { 3124 ShouldNotReachHere(); 3125 } 3126 } 3127 3128 3129 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3130 if (dest->is_single_cpu()) { 3131 // first move left into dest so that left is not destroyed by the shift 3132 Register value = dest->as_register(); 3133 count = count & 0x1F; // Java spec 3134 3135 move_regs(left->as_register(), value); 3136 switch (code) { 3137 case lir_shl: __ shll(value, count); break; 3138 case lir_shr: __ sarl(value, count); break; 3139 case lir_ushr: __ shrl(value, count); break; 3140 default: ShouldNotReachHere(); 3141 } 3142 } else if (dest->is_double_cpu()) { 3143 #ifndef _LP64 3144 Unimplemented(); 3145 #else 3146 // first move left into dest so that left is not destroyed by the shift 3147 Register value = dest->as_register_lo(); 3148 count = count & 0x1F; // Java spec 3149 3150 move_regs(left->as_register_lo(), value); 3151 switch (code) { 3152 case lir_shl: __ shlptr(value, count); break; 3153 case lir_shr: __ sarptr(value, count); break; 3154 case lir_ushr: __ shrptr(value, count); break; 3155 default: ShouldNotReachHere(); 3156 } 3157 #endif // _LP64 3158 } else { 3159 ShouldNotReachHere(); 3160 } 3161 } 3162 3163 3164 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3165 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3166 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3167 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3168 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3169 } 3170 3171 3172 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3173 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3174 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3175 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3176 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3177 } 3178 3179 3180 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3181 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3182 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3183 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3184 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1); 3185 } 3186 3187 3188 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { 3189 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3190 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3191 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3192 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1); 3193 } 3194 3195 3196 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) { 3197 if (null_check) { 3198 __ testptr(obj, obj); 3199 __ jcc(Assembler::zero, *slow_path->entry()); 3200 } 3201 if (is_dest) { 3202 __ test_null_free_array_oop(obj, tmp, *slow_path->entry()); 3203 } else { 3204 __ test_flat_array_oop(obj, tmp, *slow_path->entry()); 3205 } 3206 } 3207 3208 3209 // This code replaces a call to arraycopy; no exception may 3210 // be thrown in this code, they must be thrown in the System.arraycopy 3211 // activation frame; we could save some checks if this would not be the case 3212 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3213 ciArrayKlass* default_type = op->expected_type(); 3214 Register src = op->src()->as_register(); 3215 Register dst = op->dst()->as_register(); 3216 Register src_pos = op->src_pos()->as_register(); 3217 Register dst_pos = op->dst_pos()->as_register(); 3218 Register length = op->length()->as_register(); 3219 Register tmp = op->tmp()->as_register(); 3220 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3221 3222 CodeStub* stub = op->stub(); 3223 int flags = op->flags(); 3224 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 3225 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3226 3227 if (flags & LIR_OpArrayCopy::always_slow_path) { 3228 __ jmp(*stub->entry()); 3229 __ bind(*stub->continuation()); 3230 return; 3231 } 3232 3233 // if we don't know anything, just go through the generic arraycopy 3234 if (default_type == nullptr) { 3235 // save outgoing arguments on stack in case call to System.arraycopy is needed 3236 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3237 // for interpreter calling conventions. Now we have to do it in new style conventions. 3238 // For the moment until C1 gets the new register allocator I just force all the 3239 // args to the right place (except the register args) and then on the back side 3240 // reload the register args properly if we go slow path. Yuck 3241 3242 // These are proper for the calling convention 3243 store_parameter(length, 2); 3244 store_parameter(dst_pos, 1); 3245 store_parameter(dst, 0); 3246 3247 // these are just temporary placements until we need to reload 3248 store_parameter(src_pos, 3); 3249 store_parameter(src, 4); 3250 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3251 3252 address copyfunc_addr = StubRoutines::generic_arraycopy(); 3253 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 3254 3255 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3256 #ifdef _LP64 3257 // The arguments are in java calling convention so we can trivially shift them to C 3258 // convention 3259 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3260 __ mov(c_rarg0, j_rarg0); 3261 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3262 __ mov(c_rarg1, j_rarg1); 3263 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3264 __ mov(c_rarg2, j_rarg2); 3265 assert_different_registers(c_rarg3, j_rarg4); 3266 __ mov(c_rarg3, j_rarg3); 3267 #ifdef _WIN64 3268 // Allocate abi space for args but be sure to keep stack aligned 3269 __ subptr(rsp, 6*wordSize); 3270 store_parameter(j_rarg4, 4); 3271 #ifndef PRODUCT 3272 if (PrintC1Statistics) { 3273 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3274 } 3275 #endif 3276 __ call(RuntimeAddress(copyfunc_addr)); 3277 __ addptr(rsp, 6*wordSize); 3278 #else 3279 __ mov(c_rarg4, j_rarg4); 3280 #ifndef PRODUCT 3281 if (PrintC1Statistics) { 3282 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3283 } 3284 #endif 3285 __ call(RuntimeAddress(copyfunc_addr)); 3286 #endif // _WIN64 3287 #else 3288 __ push(length); 3289 __ push(dst_pos); 3290 __ push(dst); 3291 __ push(src_pos); 3292 __ push(src); 3293 3294 #ifndef PRODUCT 3295 if (PrintC1Statistics) { 3296 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3297 } 3298 #endif 3299 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 3300 3301 #endif // _LP64 3302 3303 __ testl(rax, rax); 3304 __ jcc(Assembler::equal, *stub->continuation()); 3305 3306 __ mov(tmp, rax); 3307 __ xorl(tmp, -1); 3308 3309 // Reload values from the stack so they are where the stub 3310 // expects them. 3311 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3312 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3313 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3314 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3315 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3316 3317 __ subl(length, tmp); 3318 __ addl(src_pos, tmp); 3319 __ addl(dst_pos, tmp); 3320 __ jmp(*stub->entry()); 3321 3322 __ bind(*stub->continuation()); 3323 return; 3324 } 3325 3326 // Handle inline type arrays 3327 if (flags & LIR_OpArrayCopy::src_inlinetype_check) { 3328 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check)); 3329 } 3330 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) { 3331 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check)); 3332 } 3333 3334 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3335 3336 int elem_size = type2aelembytes(basic_type); 3337 Address::ScaleFactor scale; 3338 3339 switch (elem_size) { 3340 case 1 : 3341 scale = Address::times_1; 3342 break; 3343 case 2 : 3344 scale = Address::times_2; 3345 break; 3346 case 4 : 3347 scale = Address::times_4; 3348 break; 3349 case 8 : 3350 scale = Address::times_8; 3351 break; 3352 default: 3353 scale = Address::no_scale; 3354 ShouldNotReachHere(); 3355 } 3356 3357 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3358 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3359 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3360 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3361 3362 // length and pos's are all sign extended at this point on 64bit 3363 3364 // test for null 3365 if (flags & LIR_OpArrayCopy::src_null_check) { 3366 __ testptr(src, src); 3367 __ jcc(Assembler::zero, *stub->entry()); 3368 } 3369 if (flags & LIR_OpArrayCopy::dst_null_check) { 3370 __ testptr(dst, dst); 3371 __ jcc(Assembler::zero, *stub->entry()); 3372 } 3373 3374 // If the compiler was not able to prove that exact type of the source or the destination 3375 // of the arraycopy is an array type, check at runtime if the source or the destination is 3376 // an instance type. 3377 if (flags & LIR_OpArrayCopy::type_check) { 3378 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3379 __ load_klass(tmp, dst, tmp_load_klass); 3380 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3381 __ jcc(Assembler::greaterEqual, *stub->entry()); 3382 } 3383 3384 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3385 __ load_klass(tmp, src, tmp_load_klass); 3386 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3387 __ jcc(Assembler::greaterEqual, *stub->entry()); 3388 } 3389 } 3390 3391 // check if negative 3392 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3393 __ testl(src_pos, src_pos); 3394 __ jcc(Assembler::less, *stub->entry()); 3395 } 3396 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3397 __ testl(dst_pos, dst_pos); 3398 __ jcc(Assembler::less, *stub->entry()); 3399 } 3400 3401 if (flags & LIR_OpArrayCopy::src_range_check) { 3402 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3403 __ cmpl(tmp, src_length_addr); 3404 __ jcc(Assembler::above, *stub->entry()); 3405 } 3406 if (flags & LIR_OpArrayCopy::dst_range_check) { 3407 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3408 __ cmpl(tmp, dst_length_addr); 3409 __ jcc(Assembler::above, *stub->entry()); 3410 } 3411 3412 if (flags & LIR_OpArrayCopy::length_positive_check) { 3413 __ testl(length, length); 3414 __ jcc(Assembler::less, *stub->entry()); 3415 } 3416 3417 #ifdef _LP64 3418 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3419 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3420 #endif 3421 3422 if (flags & LIR_OpArrayCopy::type_check) { 3423 // We don't know the array types are compatible 3424 if (basic_type != T_OBJECT) { 3425 // Simple test for basic type arrays 3426 if (UseCompressedClassPointers) { 3427 __ movl(tmp, src_klass_addr); 3428 __ cmpl(tmp, dst_klass_addr); 3429 } else { 3430 __ movptr(tmp, src_klass_addr); 3431 __ cmpptr(tmp, dst_klass_addr); 3432 } 3433 __ jcc(Assembler::notEqual, *stub->entry()); 3434 } else { 3435 // For object arrays, if src is a sub class of dst then we can 3436 // safely do the copy. 3437 Label cont, slow; 3438 3439 __ push(src); 3440 __ push(dst); 3441 3442 __ load_klass(src, src, tmp_load_klass); 3443 __ load_klass(dst, dst, tmp_load_klass); 3444 3445 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); 3446 3447 __ push(src); 3448 __ push(dst); 3449 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 3450 __ pop(dst); 3451 __ pop(src); 3452 3453 __ testl(src, src); 3454 __ jcc(Assembler::notEqual, cont); 3455 3456 __ bind(slow); 3457 __ pop(dst); 3458 __ pop(src); 3459 3460 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 3461 if (copyfunc_addr != nullptr) { // use stub if available 3462 // src is not a sub class of dst so we have to do a 3463 // per-element check. 3464 3465 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 3466 if ((flags & mask) != mask) { 3467 // Check that at least both of them object arrays. 3468 assert(flags & mask, "one of the two should be known to be an object array"); 3469 3470 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3471 __ load_klass(tmp, src, tmp_load_klass); 3472 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3473 __ load_klass(tmp, dst, tmp_load_klass); 3474 } 3475 int lh_offset = in_bytes(Klass::layout_helper_offset()); 3476 Address klass_lh_addr(tmp, lh_offset); 3477 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 3478 __ cmpl(klass_lh_addr, objArray_lh); 3479 __ jcc(Assembler::notEqual, *stub->entry()); 3480 } 3481 3482 // Spill because stubs can use any register they like and it's 3483 // easier to restore just those that we care about. 3484 store_parameter(dst, 0); 3485 store_parameter(dst_pos, 1); 3486 store_parameter(length, 2); 3487 store_parameter(src_pos, 3); 3488 store_parameter(src, 4); 3489 3490 #ifndef _LP64 3491 __ movptr(tmp, dst_klass_addr); 3492 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 3493 __ push(tmp); 3494 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 3495 __ push(tmp); 3496 __ push(length); 3497 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3498 __ push(tmp); 3499 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3500 __ push(tmp); 3501 3502 __ call_VM_leaf(copyfunc_addr, 5); 3503 #else 3504 __ movl2ptr(length, length); //higher 32bits must be null 3505 3506 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3507 assert_different_registers(c_rarg0, dst, dst_pos, length); 3508 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3509 assert_different_registers(c_rarg1, dst, length); 3510 3511 __ mov(c_rarg2, length); 3512 assert_different_registers(c_rarg2, dst); 3513 3514 #ifdef _WIN64 3515 // Allocate abi space for args but be sure to keep stack aligned 3516 __ subptr(rsp, 6*wordSize); 3517 __ load_klass(c_rarg3, dst, tmp_load_klass); 3518 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 3519 store_parameter(c_rarg3, 4); 3520 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 3521 __ call(RuntimeAddress(copyfunc_addr)); 3522 __ addptr(rsp, 6*wordSize); 3523 #else 3524 __ load_klass(c_rarg4, dst, tmp_load_klass); 3525 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 3526 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 3527 __ call(RuntimeAddress(copyfunc_addr)); 3528 #endif 3529 3530 #endif 3531 3532 #ifndef PRODUCT 3533 if (PrintC1Statistics) { 3534 Label failed; 3535 __ testl(rax, rax); 3536 __ jcc(Assembler::notZero, failed); 3537 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1); 3538 __ bind(failed); 3539 } 3540 #endif 3541 3542 __ testl(rax, rax); 3543 __ jcc(Assembler::zero, *stub->continuation()); 3544 3545 #ifndef PRODUCT 3546 if (PrintC1Statistics) { 3547 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1); 3548 } 3549 #endif 3550 3551 __ mov(tmp, rax); 3552 3553 __ xorl(tmp, -1); 3554 3555 // Restore previously spilled arguments 3556 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3557 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3558 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3559 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3560 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3561 3562 3563 __ subl(length, tmp); 3564 __ addl(src_pos, tmp); 3565 __ addl(dst_pos, tmp); 3566 } 3567 3568 __ jmp(*stub->entry()); 3569 3570 __ bind(cont); 3571 __ pop(dst); 3572 __ pop(src); 3573 } 3574 } 3575 3576 #ifdef ASSERT 3577 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3578 // Sanity check the known type with the incoming class. For the 3579 // primitive case the types must match exactly with src.klass and 3580 // dst.klass each exactly matching the default type. For the 3581 // object array case, if no type check is needed then either the 3582 // dst type is exactly the expected type and the src type is a 3583 // subtype which we can't check or src is the same array as dst 3584 // but not necessarily exactly of type default_type. 3585 Label known_ok, halt; 3586 __ mov_metadata(tmp, default_type->constant_encoding()); 3587 #ifdef _LP64 3588 if (UseCompressedClassPointers) { 3589 __ encode_klass_not_null(tmp, rscratch1); 3590 } 3591 #endif 3592 3593 if (basic_type != T_OBJECT) { 3594 3595 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3596 else __ cmpptr(tmp, dst_klass_addr); 3597 __ jcc(Assembler::notEqual, halt); 3598 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); 3599 else __ cmpptr(tmp, src_klass_addr); 3600 __ jcc(Assembler::equal, known_ok); 3601 } else { 3602 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3603 else __ cmpptr(tmp, dst_klass_addr); 3604 __ jcc(Assembler::equal, known_ok); 3605 __ cmpptr(src, dst); 3606 __ jcc(Assembler::equal, known_ok); 3607 } 3608 __ bind(halt); 3609 __ stop("incorrect type information in arraycopy"); 3610 __ bind(known_ok); 3611 } 3612 #endif 3613 3614 #ifndef PRODUCT 3615 if (PrintC1Statistics) { 3616 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1); 3617 } 3618 #endif 3619 3620 #ifdef _LP64 3621 assert_different_registers(c_rarg0, dst, dst_pos, length); 3622 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3623 assert_different_registers(c_rarg1, length); 3624 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3625 __ mov(c_rarg2, length); 3626 3627 #else 3628 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3629 store_parameter(tmp, 0); 3630 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3631 store_parameter(tmp, 1); 3632 store_parameter(length, 2); 3633 #endif // _LP64 3634 3635 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3636 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3637 const char *name; 3638 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3639 __ call_VM_leaf(entry, 0); 3640 3641 if (stub != nullptr) { 3642 __ bind(*stub->continuation()); 3643 } 3644 } 3645 3646 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3647 assert(op->crc()->is_single_cpu(), "crc must be register"); 3648 assert(op->val()->is_single_cpu(), "byte value must be register"); 3649 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3650 Register crc = op->crc()->as_register(); 3651 Register val = op->val()->as_register(); 3652 Register res = op->result_opr()->as_register(); 3653 3654 assert_different_registers(val, crc, res); 3655 3656 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3657 __ notl(crc); // ~crc 3658 __ update_byte_crc32(crc, val, res); 3659 __ notl(crc); // ~crc 3660 __ mov(res, crc); 3661 } 3662 3663 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3664 Register obj = op->obj_opr()->as_register(); // may not be an oop 3665 Register hdr = op->hdr_opr()->as_register(); 3666 Register lock = op->lock_opr()->as_register(); 3667 if (LockingMode == LM_MONITOR) { 3668 if (op->info() != nullptr) { 3669 add_debug_info_for_null_check_here(op->info()); 3670 __ null_check(obj); 3671 } 3672 __ jmp(*op->stub()->entry()); 3673 } else if (op->code() == lir_lock) { 3674 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3675 Register tmp = LockingMode == LM_LIGHTWEIGHT ? op->scratch_opr()->as_register() : noreg; 3676 // add debug info for NullPointerException only if one is possible 3677 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 3678 if (op->info() != nullptr) { 3679 add_debug_info_for_null_check(null_check_offset, op->info()); 3680 } 3681 // done 3682 } else if (op->code() == lir_unlock) { 3683 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3684 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3685 } else { 3686 Unimplemented(); 3687 } 3688 __ bind(*op->stub()->continuation()); 3689 } 3690 3691 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 3692 Register obj = op->obj()->as_pointer_register(); 3693 Register result = op->result_opr()->as_pointer_register(); 3694 3695 CodeEmitInfo* info = op->info(); 3696 if (info != nullptr) { 3697 add_debug_info_for_null_check_here(info); 3698 } 3699 3700 #ifdef _LP64 3701 if (UseCompressedClassPointers) { 3702 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3703 __ decode_klass_not_null(result, rscratch1); 3704 } else 3705 #endif 3706 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3707 } 3708 3709 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3710 ciMethod* method = op->profiled_method(); 3711 int bci = op->profiled_bci(); 3712 ciMethod* callee = op->profiled_callee(); 3713 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3714 3715 // Update counter for all call types 3716 ciMethodData* md = method->method_data_or_null(); 3717 assert(md != nullptr, "Sanity"); 3718 ciProfileData* data = md->bci_to_data(bci); 3719 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 3720 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3721 Register mdo = op->mdo()->as_register(); 3722 __ mov_metadata(mdo, md->constant_encoding()); 3723 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3724 // Perform additional virtual call profiling for invokevirtual and 3725 // invokeinterface bytecodes 3726 if (op->should_profile_receiver_type()) { 3727 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3728 Register recv = op->recv()->as_register(); 3729 assert_different_registers(mdo, recv); 3730 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3731 ciKlass* known_klass = op->known_holder(); 3732 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 3733 // We know the type that will be seen at this call site; we can 3734 // statically update the MethodData* rather than needing to do 3735 // dynamic tests on the receiver type 3736 3737 // NOTE: we should probably put a lock around this search to 3738 // avoid collisions by concurrent compilations 3739 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3740 uint i; 3741 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3742 ciKlass* receiver = vc_data->receiver(i); 3743 if (known_klass->equals(receiver)) { 3744 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3745 __ addptr(data_addr, DataLayout::counter_increment); 3746 return; 3747 } 3748 } 3749 3750 // Receiver type not found in profile data; select an empty slot 3751 3752 // Note that this is less efficient than it should be because it 3753 // always does a write to the receiver part of the 3754 // VirtualCallData rather than just the first time 3755 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3756 ciKlass* receiver = vc_data->receiver(i); 3757 if (receiver == nullptr) { 3758 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3759 __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); 3760 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3761 __ addptr(data_addr, DataLayout::counter_increment); 3762 return; 3763 } 3764 } 3765 } else { 3766 __ load_klass(recv, recv, tmp_load_klass); 3767 Label update_done; 3768 type_profile_helper(mdo, md, data, recv, &update_done); 3769 // Receiver did not match any saved receiver and there is no empty row for it. 3770 // Increment total counter to indicate polymorphic case. 3771 __ addptr(counter_addr, DataLayout::counter_increment); 3772 3773 __ bind(update_done); 3774 } 3775 } else { 3776 // Static call 3777 __ addptr(counter_addr, DataLayout::counter_increment); 3778 } 3779 } 3780 3781 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3782 Register obj = op->obj()->as_register(); 3783 Register tmp = op->tmp()->as_pointer_register(); 3784 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3785 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3786 ciKlass* exact_klass = op->exact_klass(); 3787 intptr_t current_klass = op->current_klass(); 3788 bool not_null = op->not_null(); 3789 bool no_conflict = op->no_conflict(); 3790 3791 Label update, next, none; 3792 3793 bool do_null = !not_null; 3794 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3795 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3796 3797 assert(do_null || do_update, "why are we here?"); 3798 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3799 3800 __ verify_oop(obj); 3801 3802 #ifdef ASSERT 3803 if (obj == tmp) { 3804 #ifdef _LP64 3805 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); 3806 #else 3807 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); 3808 #endif 3809 } else { 3810 #ifdef _LP64 3811 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index()); 3812 #else 3813 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index()); 3814 #endif 3815 } 3816 #endif 3817 if (do_null) { 3818 __ testptr(obj, obj); 3819 __ jccb(Assembler::notZero, update); 3820 if (!TypeEntries::was_null_seen(current_klass)) { 3821 __ testptr(mdo_addr, TypeEntries::null_seen); 3822 #ifndef ASSERT 3823 __ jccb(Assembler::notZero, next); // already set 3824 #else 3825 __ jcc(Assembler::notZero, next); // already set 3826 #endif 3827 // atomic update to prevent overwriting Klass* with 0 3828 __ lock(); 3829 __ orptr(mdo_addr, TypeEntries::null_seen); 3830 } 3831 if (do_update) { 3832 #ifndef ASSERT 3833 __ jmpb(next); 3834 } 3835 #else 3836 __ jmp(next); 3837 } 3838 } else { 3839 __ testptr(obj, obj); 3840 __ jcc(Assembler::notZero, update); 3841 __ stop("unexpected null obj"); 3842 #endif 3843 } 3844 3845 __ bind(update); 3846 3847 if (do_update) { 3848 #ifdef ASSERT 3849 if (exact_klass != nullptr) { 3850 Label ok; 3851 __ load_klass(tmp, obj, tmp_load_klass); 3852 __ push(tmp); 3853 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3854 __ cmpptr(tmp, Address(rsp, 0)); 3855 __ jcc(Assembler::equal, ok); 3856 __ stop("exact klass and actual klass differ"); 3857 __ bind(ok); 3858 __ pop(tmp); 3859 } 3860 #endif 3861 if (!no_conflict) { 3862 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 3863 if (exact_klass != nullptr) { 3864 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3865 } else { 3866 __ load_klass(tmp, obj, tmp_load_klass); 3867 } 3868 #ifdef _LP64 3869 __ mov(rscratch1, tmp); // save original value before XOR 3870 #endif 3871 __ xorptr(tmp, mdo_addr); 3872 __ testptr(tmp, TypeEntries::type_klass_mask); 3873 // klass seen before, nothing to do. The unknown bit may have been 3874 // set already but no need to check. 3875 __ jccb(Assembler::zero, next); 3876 3877 __ testptr(tmp, TypeEntries::type_unknown); 3878 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3879 3880 if (TypeEntries::is_type_none(current_klass)) { 3881 __ testptr(mdo_addr, TypeEntries::type_mask); 3882 __ jccb(Assembler::zero, none); 3883 #ifdef _LP64 3884 // There is a chance that the checks above (re-reading profiling 3885 // data from memory) fail if another thread has just set the 3886 // profiling to this obj's klass 3887 __ mov(tmp, rscratch1); // get back original value before XOR 3888 __ xorptr(tmp, mdo_addr); 3889 __ testptr(tmp, TypeEntries::type_klass_mask); 3890 __ jccb(Assembler::zero, next); 3891 #endif 3892 } 3893 } else { 3894 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3895 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3896 3897 __ testptr(mdo_addr, TypeEntries::type_unknown); 3898 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3899 } 3900 3901 // different than before. Cannot keep accurate profile. 3902 __ orptr(mdo_addr, TypeEntries::type_unknown); 3903 3904 if (TypeEntries::is_type_none(current_klass)) { 3905 __ jmpb(next); 3906 3907 __ bind(none); 3908 // first time here. Set profile type. 3909 __ movptr(mdo_addr, tmp); 3910 #ifdef ASSERT 3911 __ andptr(tmp, TypeEntries::type_klass_mask); 3912 __ verify_klass_ptr(tmp); 3913 #endif 3914 } 3915 } else { 3916 // There's a single possible klass at this profile point 3917 assert(exact_klass != nullptr, "should be"); 3918 if (TypeEntries::is_type_none(current_klass)) { 3919 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3920 __ xorptr(tmp, mdo_addr); 3921 __ testptr(tmp, TypeEntries::type_klass_mask); 3922 #ifdef ASSERT 3923 __ jcc(Assembler::zero, next); 3924 3925 { 3926 Label ok; 3927 __ push(tmp); 3928 __ testptr(mdo_addr, TypeEntries::type_mask); 3929 __ jcc(Assembler::zero, ok); 3930 // may have been set by another thread 3931 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3932 __ xorptr(tmp, mdo_addr); 3933 __ testptr(tmp, TypeEntries::type_mask); 3934 __ jcc(Assembler::zero, ok); 3935 3936 __ stop("unexpected profiling mismatch"); 3937 __ bind(ok); 3938 __ pop(tmp); 3939 } 3940 #else 3941 __ jccb(Assembler::zero, next); 3942 #endif 3943 // first time here. Set profile type. 3944 __ movptr(mdo_addr, tmp); 3945 #ifdef ASSERT 3946 __ andptr(tmp, TypeEntries::type_klass_mask); 3947 __ verify_klass_ptr(tmp); 3948 #endif 3949 } else { 3950 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3951 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3952 3953 __ testptr(mdo_addr, TypeEntries::type_unknown); 3954 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3955 3956 __ orptr(mdo_addr, TypeEntries::type_unknown); 3957 } 3958 } 3959 } 3960 __ bind(next); 3961 } 3962 3963 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 3964 Register obj = op->obj()->as_register(); 3965 Register tmp = op->tmp()->as_pointer_register(); 3966 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3967 bool not_null = op->not_null(); 3968 int flag = op->flag(); 3969 3970 Label not_inline_type; 3971 if (!not_null) { 3972 __ testptr(obj, obj); 3973 __ jccb(Assembler::zero, not_inline_type); 3974 } 3975 3976 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type); 3977 3978 __ orb(mdo_addr, flag); 3979 3980 __ bind(not_inline_type); 3981 } 3982 3983 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3984 Unimplemented(); 3985 } 3986 3987 3988 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3989 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3990 } 3991 3992 3993 void LIR_Assembler::align_backward_branch_target() { 3994 __ align(BytesPerWord); 3995 } 3996 3997 3998 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 3999 if (left->is_single_cpu()) { 4000 __ negl(left->as_register()); 4001 move_regs(left->as_register(), dest->as_register()); 4002 4003 } else if (left->is_double_cpu()) { 4004 Register lo = left->as_register_lo(); 4005 #ifdef _LP64 4006 Register dst = dest->as_register_lo(); 4007 __ movptr(dst, lo); 4008 __ negptr(dst); 4009 #else 4010 Register hi = left->as_register_hi(); 4011 __ lneg(hi, lo); 4012 if (dest->as_register_lo() == hi) { 4013 assert(dest->as_register_hi() != lo, "destroying register"); 4014 move_regs(hi, dest->as_register_hi()); 4015 move_regs(lo, dest->as_register_lo()); 4016 } else { 4017 move_regs(lo, dest->as_register_lo()); 4018 move_regs(hi, dest->as_register_hi()); 4019 } 4020 #endif // _LP64 4021 4022 } else if (dest->is_single_xmm()) { 4023 #ifdef _LP64 4024 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4025 assert(tmp->is_valid(), "need temporary"); 4026 assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 4027 __ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2); 4028 } 4029 else 4030 #endif 4031 { 4032 assert(!tmp->is_valid(), "do not need temporary"); 4033 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 4034 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 4035 } 4036 __ xorps(dest->as_xmm_float_reg(), 4037 ExternalAddress((address)float_signflip_pool), 4038 rscratch1); 4039 } 4040 } else if (dest->is_double_xmm()) { 4041 #ifdef _LP64 4042 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4043 assert(tmp->is_valid(), "need temporary"); 4044 assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg()); 4045 __ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2); 4046 } 4047 else 4048 #endif 4049 { 4050 assert(!tmp->is_valid(), "do not need temporary"); 4051 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 4052 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 4053 } 4054 __ xorpd(dest->as_xmm_double_reg(), 4055 ExternalAddress((address)double_signflip_pool), 4056 rscratch1); 4057 } 4058 #ifndef _LP64 4059 } else if (left->is_single_fpu() || left->is_double_fpu()) { 4060 assert(left->fpu() == 0, "arg must be on TOS"); 4061 assert(dest->fpu() == 0, "dest must be TOS"); 4062 __ fchs(); 4063 #endif // !_LP64 4064 4065 } else { 4066 ShouldNotReachHere(); 4067 } 4068 } 4069 4070 4071 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 4072 assert(src->is_address(), "must be an address"); 4073 assert(dest->is_register(), "must be a register"); 4074 4075 PatchingStub* patch = nullptr; 4076 if (patch_code != lir_patch_none) { 4077 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 4078 } 4079 4080 Register reg = dest->as_pointer_register(); 4081 LIR_Address* addr = src->as_address_ptr(); 4082 __ lea(reg, as_Address(addr)); 4083 4084 if (patch != nullptr) { 4085 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 4086 } 4087 } 4088 4089 4090 4091 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 4092 assert(!tmp->is_valid(), "don't need temporary"); 4093 __ call(RuntimeAddress(dest)); 4094 if (info != nullptr) { 4095 add_call_info_here(info); 4096 } 4097 __ post_call_nop(); 4098 } 4099 4100 4101 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 4102 assert(type == T_LONG, "only for volatile long fields"); 4103 4104 if (info != nullptr) { 4105 add_debug_info_for_null_check_here(info); 4106 } 4107 4108 if (src->is_double_xmm()) { 4109 if (dest->is_double_cpu()) { 4110 #ifdef _LP64 4111 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 4112 #else 4113 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 4114 __ psrlq(src->as_xmm_double_reg(), 32); 4115 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 4116 #endif // _LP64 4117 } else if (dest->is_double_stack()) { 4118 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 4119 } else if (dest->is_address()) { 4120 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 4121 } else { 4122 ShouldNotReachHere(); 4123 } 4124 4125 } else if (dest->is_double_xmm()) { 4126 if (src->is_double_stack()) { 4127 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 4128 } else if (src->is_address()) { 4129 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 4130 } else { 4131 ShouldNotReachHere(); 4132 } 4133 4134 #ifndef _LP64 4135 } else if (src->is_double_fpu()) { 4136 assert(src->fpu_regnrLo() == 0, "must be TOS"); 4137 if (dest->is_double_stack()) { 4138 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 4139 } else if (dest->is_address()) { 4140 __ fistp_d(as_Address(dest->as_address_ptr())); 4141 } else { 4142 ShouldNotReachHere(); 4143 } 4144 4145 } else if (dest->is_double_fpu()) { 4146 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 4147 if (src->is_double_stack()) { 4148 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 4149 } else if (src->is_address()) { 4150 __ fild_d(as_Address(src->as_address_ptr())); 4151 } else { 4152 ShouldNotReachHere(); 4153 } 4154 #endif // !_LP64 4155 4156 } else { 4157 ShouldNotReachHere(); 4158 } 4159 } 4160 4161 #ifdef ASSERT 4162 // emit run-time assertion 4163 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 4164 assert(op->code() == lir_assert, "must be"); 4165 4166 if (op->in_opr1()->is_valid()) { 4167 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 4168 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 4169 } else { 4170 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 4171 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 4172 } 4173 4174 Label ok; 4175 if (op->condition() != lir_cond_always) { 4176 Assembler::Condition acond = Assembler::zero; 4177 switch (op->condition()) { 4178 case lir_cond_equal: acond = Assembler::equal; break; 4179 case lir_cond_notEqual: acond = Assembler::notEqual; break; 4180 case lir_cond_less: acond = Assembler::less; break; 4181 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 4182 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 4183 case lir_cond_greater: acond = Assembler::greater; break; 4184 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 4185 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 4186 default: ShouldNotReachHere(); 4187 } 4188 __ jcc(acond, ok); 4189 } 4190 if (op->halt()) { 4191 const char* str = __ code_string(op->msg()); 4192 __ stop(str); 4193 } else { 4194 breakpoint(); 4195 } 4196 __ bind(ok); 4197 } 4198 #endif 4199 4200 void LIR_Assembler::membar() { 4201 // QQQ sparc TSO uses this, 4202 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4203 } 4204 4205 void LIR_Assembler::membar_acquire() { 4206 // No x86 machines currently require load fences 4207 } 4208 4209 void LIR_Assembler::membar_release() { 4210 // No x86 machines currently require store fences 4211 } 4212 4213 void LIR_Assembler::membar_loadload() { 4214 // no-op 4215 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 4216 } 4217 4218 void LIR_Assembler::membar_storestore() { 4219 // no-op 4220 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 4221 } 4222 4223 void LIR_Assembler::membar_loadstore() { 4224 // no-op 4225 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 4226 } 4227 4228 void LIR_Assembler::membar_storeload() { 4229 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4230 } 4231 4232 void LIR_Assembler::on_spin_wait() { 4233 __ pause (); 4234 } 4235 4236 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 4237 assert(result_reg->is_register(), "check"); 4238 #ifdef _LP64 4239 // __ get_thread(result_reg->as_register_lo()); 4240 __ mov(result_reg->as_register(), r15_thread); 4241 #else 4242 __ get_thread(result_reg->as_register()); 4243 #endif // _LP64 4244 } 4245 4246 void LIR_Assembler::check_orig_pc() { 4247 __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD); 4248 } 4249 4250 void LIR_Assembler::peephole(LIR_List*) { 4251 // do nothing for now 4252 } 4253 4254 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 4255 assert(data == dest, "xchg/xadd uses only 2 operands"); 4256 4257 if (data->type() == T_INT) { 4258 if (code == lir_xadd) { 4259 __ lock(); 4260 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 4261 } else { 4262 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 4263 } 4264 } else if (data->is_oop()) { 4265 assert (code == lir_xchg, "xadd for oops"); 4266 Register obj = data->as_register(); 4267 #ifdef _LP64 4268 if (UseCompressedOops) { 4269 __ encode_heap_oop(obj); 4270 __ xchgl(obj, as_Address(src->as_address_ptr())); 4271 __ decode_heap_oop(obj); 4272 } else { 4273 __ xchgptr(obj, as_Address(src->as_address_ptr())); 4274 } 4275 #else 4276 __ xchgl(obj, as_Address(src->as_address_ptr())); 4277 #endif 4278 } else if (data->type() == T_LONG) { 4279 #ifdef _LP64 4280 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 4281 if (code == lir_xadd) { 4282 __ lock(); 4283 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 4284 } else { 4285 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 4286 } 4287 #else 4288 ShouldNotReachHere(); 4289 #endif 4290 } else { 4291 ShouldNotReachHere(); 4292 } 4293 } 4294 4295 #undef __