1 /* 2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Compilation.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArrayKlass.hpp" 35 #include "ci/ciInlineKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gc_globals.hpp" 40 #include "nativeInst_x86.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/objArrayKlass.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/safepointMechanism.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/powerOfTwo.hpp" 48 #include "vmreg_x86.inline.hpp" 49 50 51 // These masks are used to provide 128-bit aligned bitmasks to the XMM 52 // instructions, to allow sign-masking or sign-bit flipping. They allow 53 // fast versions of NegF/NegD and AbsF/AbsD. 54 55 // Note: 'double' and 'long long' have 32-bits alignment on x86. 56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 57 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 58 // of 128-bits operands for SSE instructions. 59 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 60 // Store the value to a 128-bits operand. 61 operand[0] = lo; 62 operand[1] = hi; 63 return operand; 64 } 65 66 // Buffer for 128-bits masks used by SSE instructions. 67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 68 69 // Static initialization during VM startup. 70 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 71 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 72 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000)); 73 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); 74 75 76 NEEDS_CLEANUP // remove this definitions ? 77 const Register SYNC_header = rax; // synchronization header 78 const Register SHIFT_count = rcx; // where count for shift operations must be 79 80 #define __ _masm-> 81 82 83 static void select_different_registers(Register preserve, 84 Register extra, 85 Register &tmp1, 86 Register &tmp2) { 87 if (tmp1 == preserve) { 88 assert_different_registers(tmp1, tmp2, extra); 89 tmp1 = extra; 90 } else if (tmp2 == preserve) { 91 assert_different_registers(tmp1, tmp2, extra); 92 tmp2 = extra; 93 } 94 assert_different_registers(preserve, tmp1, tmp2); 95 } 96 97 98 99 static void select_different_registers(Register preserve, 100 Register extra, 101 Register &tmp1, 102 Register &tmp2, 103 Register &tmp3) { 104 if (tmp1 == preserve) { 105 assert_different_registers(tmp1, tmp2, tmp3, extra); 106 tmp1 = extra; 107 } else if (tmp2 == preserve) { 108 assert_different_registers(tmp1, tmp2, tmp3, extra); 109 tmp2 = extra; 110 } else if (tmp3 == preserve) { 111 assert_different_registers(tmp1, tmp2, tmp3, extra); 112 tmp3 = extra; 113 } 114 assert_different_registers(preserve, tmp1, tmp2, tmp3); 115 } 116 117 118 119 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 120 if (opr->is_constant()) { 121 LIR_Const* constant = opr->as_constant_ptr(); 122 switch (constant->type()) { 123 case T_INT: { 124 return true; 125 } 126 127 default: 128 return false; 129 } 130 } 131 return false; 132 } 133 134 135 LIR_Opr LIR_Assembler::receiverOpr() { 136 return FrameMap::receiver_opr; 137 } 138 139 LIR_Opr LIR_Assembler::osrBufferPointer() { 140 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 141 } 142 143 //--------------fpu register translations----------------------- 144 145 146 address LIR_Assembler::float_constant(float f) { 147 address const_addr = __ float_constant(f); 148 if (const_addr == nullptr) { 149 bailout("const section overflow"); 150 return __ code()->consts()->start(); 151 } else { 152 return const_addr; 153 } 154 } 155 156 157 address LIR_Assembler::double_constant(double d) { 158 address const_addr = __ double_constant(d); 159 if (const_addr == nullptr) { 160 bailout("const section overflow"); 161 return __ code()->consts()->start(); 162 } else { 163 return const_addr; 164 } 165 } 166 167 #ifndef _LP64 168 void LIR_Assembler::fpop() { 169 __ fpop(); 170 } 171 172 void LIR_Assembler::fxch(int i) { 173 __ fxch(i); 174 } 175 176 void LIR_Assembler::fld(int i) { 177 __ fld_s(i); 178 } 179 180 void LIR_Assembler::ffree(int i) { 181 __ ffree(i); 182 } 183 #endif // !_LP64 184 185 void LIR_Assembler::breakpoint() { 186 __ int3(); 187 } 188 189 void LIR_Assembler::push(LIR_Opr opr) { 190 if (opr->is_single_cpu()) { 191 __ push_reg(opr->as_register()); 192 } else if (opr->is_double_cpu()) { 193 NOT_LP64(__ push_reg(opr->as_register_hi())); 194 __ push_reg(opr->as_register_lo()); 195 } else if (opr->is_stack()) { 196 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 197 } else if (opr->is_constant()) { 198 LIR_Const* const_opr = opr->as_constant_ptr(); 199 if (const_opr->type() == T_OBJECT) { 200 __ push_oop(const_opr->as_jobject(), rscratch1); 201 } else if (const_opr->type() == T_INT) { 202 __ push_jint(const_opr->as_jint()); 203 } else { 204 ShouldNotReachHere(); 205 } 206 207 } else { 208 ShouldNotReachHere(); 209 } 210 } 211 212 void LIR_Assembler::pop(LIR_Opr opr) { 213 if (opr->is_single_cpu()) { 214 __ pop_reg(opr->as_register()); 215 } else { 216 ShouldNotReachHere(); 217 } 218 } 219 220 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { 221 return addr->base()->is_illegal() && addr->index()->is_illegal(); 222 } 223 224 //------------------------------------------- 225 226 Address LIR_Assembler::as_Address(LIR_Address* addr) { 227 return as_Address(addr, rscratch1); 228 } 229 230 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 231 if (addr->base()->is_illegal()) { 232 assert(addr->index()->is_illegal(), "must be illegal too"); 233 AddressLiteral laddr((address)addr->disp(), relocInfo::none); 234 if (! __ reachable(laddr)) { 235 __ movptr(tmp, laddr.addr()); 236 Address res(tmp, 0); 237 return res; 238 } else { 239 return __ as_Address(laddr); 240 } 241 } 242 243 Register base = addr->base()->as_pointer_register(); 244 245 if (addr->index()->is_illegal()) { 246 return Address( base, addr->disp()); 247 } else if (addr->index()->is_cpu_register()) { 248 Register index = addr->index()->as_pointer_register(); 249 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 250 } else if (addr->index()->is_constant()) { 251 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 252 assert(Assembler::is_simm32(addr_offset), "must be"); 253 254 return Address(base, addr_offset); 255 } else { 256 Unimplemented(); 257 return Address(); 258 } 259 } 260 261 262 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 263 Address base = as_Address(addr); 264 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 265 } 266 267 268 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 269 return as_Address(addr); 270 } 271 272 273 void LIR_Assembler::osr_entry() { 274 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 275 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 276 ValueStack* entry_state = osr_entry->state(); 277 int number_of_locks = entry_state->locks_size(); 278 279 // we jump here if osr happens with the interpreter 280 // state set up to continue at the beginning of the 281 // loop that triggered osr - in particular, we have 282 // the following registers setup: 283 // 284 // rcx: osr buffer 285 // 286 287 // build frame 288 ciMethod* m = compilation()->method(); 289 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 290 291 // OSR buffer is 292 // 293 // locals[nlocals-1..0] 294 // monitors[0..number_of_locks] 295 // 296 // locals is a direct copy of the interpreter frame so in the osr buffer 297 // so first slot in the local array is the last local from the interpreter 298 // and last slot is local[0] (receiver) from the interpreter 299 // 300 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 301 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 302 // in the interpreter frame (the method lock if a sync method) 303 304 // Initialize monitors in the compiled activation. 305 // rcx: pointer to osr buffer 306 // 307 // All other registers are dead at this point and the locals will be 308 // copied into place by code emitted in the IR. 309 310 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 311 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 312 int monitor_offset = BytesPerWord * method()->max_locals() + 313 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); 314 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 315 // the OSR buffer using 2 word entries: first the lock and then 316 // the oop. 317 for (int i = 0; i < number_of_locks; i++) { 318 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 319 #ifdef ASSERT 320 // verify the interpreter's monitor has a non-null object 321 { 322 Label L; 323 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD); 324 __ jcc(Assembler::notZero, L); 325 __ stop("locked object is null"); 326 __ bind(L); 327 } 328 #endif 329 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 330 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 331 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 332 __ movptr(frame_map()->address_for_monitor_object(i), rbx); 333 } 334 } 335 } 336 337 338 // inline cache check; done before the frame is built. 339 int LIR_Assembler::check_icache() { 340 return __ ic_check(CodeEntryAlignment); 341 } 342 343 void LIR_Assembler::clinit_barrier(ciMethod* method) { 344 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 345 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 346 347 Label L_skip_barrier; 348 Register klass = rscratch1; 349 Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg ); 350 assert(thread != noreg, "x86_32 not implemented"); 351 352 __ mov_metadata(klass, method->holder()->constant_encoding()); 353 __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/); 354 355 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 356 357 __ bind(L_skip_barrier); 358 } 359 360 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 361 jobject o = nullptr; 362 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 363 __ movoop(reg, o); 364 patching_epilog(patch, lir_patch_normal, reg, info); 365 } 366 367 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 368 Metadata* o = nullptr; 369 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 370 __ mov_metadata(reg, o); 371 patching_epilog(patch, lir_patch_normal, reg, info); 372 } 373 374 // This specifies the rsp decrement needed to build the frame 375 int LIR_Assembler::initial_frame_size_in_bytes() const { 376 // if rounding, must let FrameMap know! 377 378 // The frame_map records size in slots (32bit word) 379 380 // subtract two words to account for return address and link 381 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 382 } 383 384 385 int LIR_Assembler::emit_exception_handler() { 386 // generate code for exception handler 387 address handler_base = __ start_a_stub(exception_handler_size()); 388 if (handler_base == nullptr) { 389 // not enough space left for the handler 390 bailout("exception handler overflow"); 391 return -1; 392 } 393 394 int offset = code_offset(); 395 396 // the exception oop and pc are in rax, and rdx 397 // no other registers need to be preserved, so invalidate them 398 __ invalidate_registers(false, true, true, false, true, true); 399 400 // check that there is really an exception 401 __ verify_not_null_oop(rax); 402 403 // search an exception handler (rax: exception oop, rdx: throwing pc) 404 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); 405 __ should_not_reach_here(); 406 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 407 __ end_a_stub(); 408 409 return offset; 410 } 411 412 413 // Emit the code to remove the frame from the stack in the exception 414 // unwind path. 415 int LIR_Assembler::emit_unwind_handler() { 416 #ifndef PRODUCT 417 if (CommentedAssembly) { 418 _masm->block_comment("Unwind handler"); 419 } 420 #endif 421 422 int offset = code_offset(); 423 424 // Fetch the exception from TLS and clear out exception related thread state 425 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 426 NOT_LP64(__ get_thread(thread)); 427 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); 428 __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); 429 __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); 430 431 __ bind(_unwind_handler_entry); 432 __ verify_not_null_oop(rax); 433 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 434 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) 435 } 436 437 // Perform needed unlocking 438 MonitorExitStub* stub = nullptr; 439 if (method()->is_synchronized()) { 440 monitor_address(0, FrameMap::rax_opr); 441 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 442 if (LockingMode == LM_MONITOR) { 443 __ jmp(*stub->entry()); 444 } else { 445 __ unlock_object(rdi, rsi, rax, *stub->entry()); 446 } 447 __ bind(*stub->continuation()); 448 } 449 450 if (compilation()->env()->dtrace_method_probes()) { 451 #ifdef _LP64 452 __ mov(rdi, r15_thread); 453 __ mov_metadata(rsi, method()->constant_encoding()); 454 #else 455 __ get_thread(rax); 456 __ movptr(Address(rsp, 0), rax); 457 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); 458 #endif 459 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 460 } 461 462 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 463 __ mov(rax, rbx); // Restore the exception 464 } 465 466 // remove the activation and dispatch to the unwind handler 467 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 468 __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); 469 470 // Emit the slow path assembly 471 if (stub != nullptr) { 472 stub->emit_code(this); 473 } 474 475 return offset; 476 } 477 478 479 int LIR_Assembler::emit_deopt_handler() { 480 // generate code for exception handler 481 address handler_base = __ start_a_stub(deopt_handler_size()); 482 if (handler_base == nullptr) { 483 // not enough space left for the handler 484 bailout("deopt handler overflow"); 485 return -1; 486 } 487 488 int offset = code_offset(); 489 InternalAddress here(__ pc()); 490 491 __ pushptr(here.addr(), rscratch1); 492 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 493 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 494 __ end_a_stub(); 495 496 return offset; 497 } 498 499 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 500 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 501 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 502 assert(result->fpu() == 0, "result must already be on TOS"); 503 } 504 if (InlineTypeReturnedAsFields) { 505 #ifndef _LP64 506 Unimplemented(); 507 #endif 508 // Check if we are returning an non-null inline type and load its fields into registers 509 ciType* return_type = compilation()->method()->return_type(); 510 if (return_type->is_inlinetype()) { 511 ciInlineKlass* vk = return_type->as_inline_klass(); 512 if (vk->can_be_returned_as_fields()) { 513 address unpack_handler = vk->unpack_handler(); 514 assert(unpack_handler != nullptr, "must be"); 515 __ call(RuntimeAddress(unpack_handler)); 516 } 517 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) { 518 Label skip; 519 __ test_oop_is_not_inline_type(rax, rscratch1, skip); 520 521 // Load fields from a buffered value with an inline class specific handler 522 __ load_klass(rdi, rax, rscratch1); 523 __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset())); 524 __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset())); 525 // Unpack handler can be null if inline type is not scalarizable in returns 526 __ testptr(rdi, rdi); 527 __ jcc(Assembler::zero, skip); 528 __ call(rdi); 529 530 __ bind(skip); 531 } 532 // At this point, rax points to the value object (for interpreter or C1 caller). 533 // The fields of the object are copied into registers (for C2 caller). 534 } 535 536 // Pop the stack before the safepoint code 537 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 538 539 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 540 __ reserved_stack_check(); 541 } 542 543 // Note: we do not need to round double result; float result has the right precision 544 // the poll sets the condition code, but no data registers 545 546 #ifdef _LP64 547 const Register thread = r15_thread; 548 #else 549 const Register thread = rbx; 550 __ get_thread(thread); 551 #endif 552 code_stub->set_safepoint_offset(__ offset()); 553 __ relocate(relocInfo::poll_return_type); 554 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */); 555 __ ret(0); 556 } 557 558 559 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) { 560 return (__ store_inline_type_fields_to_buf(vk, false)); 561 } 562 563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 564 guarantee(info != nullptr, "Shouldn't be null"); 565 int offset = __ offset(); 566 #ifdef _LP64 567 const Register poll_addr = rscratch1; 568 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset())); 569 #else 570 assert(tmp->is_cpu_register(), "needed"); 571 const Register poll_addr = tmp->as_register(); 572 __ get_thread(poll_addr); 573 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset()))); 574 #endif 575 add_debug_info_for_branch(info); 576 __ relocate(relocInfo::poll_type); 577 address pre_pc = __ pc(); 578 __ testl(rax, Address(poll_addr, 0)); 579 address post_pc = __ pc(); 580 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); 581 return offset; 582 } 583 584 585 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 586 if (from_reg != to_reg) __ mov(to_reg, from_reg); 587 } 588 589 void LIR_Assembler::swap_reg(Register a, Register b) { 590 __ xchgptr(a, b); 591 } 592 593 594 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 595 assert(src->is_constant(), "should not call otherwise"); 596 assert(dest->is_register(), "should not call otherwise"); 597 LIR_Const* c = src->as_constant_ptr(); 598 599 switch (c->type()) { 600 case T_INT: { 601 assert(patch_code == lir_patch_none, "no patching handled here"); 602 __ movl(dest->as_register(), c->as_jint()); 603 break; 604 } 605 606 case T_ADDRESS: { 607 assert(patch_code == lir_patch_none, "no patching handled here"); 608 __ movptr(dest->as_register(), c->as_jint()); 609 break; 610 } 611 612 case T_LONG: { 613 assert(patch_code == lir_patch_none, "no patching handled here"); 614 #ifdef _LP64 615 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 616 #else 617 __ movptr(dest->as_register_lo(), c->as_jint_lo()); 618 __ movptr(dest->as_register_hi(), c->as_jint_hi()); 619 #endif // _LP64 620 break; 621 } 622 623 case T_OBJECT: { 624 if (patch_code != lir_patch_none) { 625 jobject2reg_with_patching(dest->as_register(), info); 626 } else { 627 __ movoop(dest->as_register(), c->as_jobject()); 628 } 629 break; 630 } 631 632 case T_METADATA: { 633 if (patch_code != lir_patch_none) { 634 klass2reg_with_patching(dest->as_register(), info); 635 } else { 636 __ mov_metadata(dest->as_register(), c->as_metadata()); 637 } 638 break; 639 } 640 641 case T_FLOAT: { 642 if (dest->is_single_xmm()) { 643 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) { 644 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 645 } else { 646 __ movflt(dest->as_xmm_float_reg(), 647 InternalAddress(float_constant(c->as_jfloat()))); 648 } 649 } else { 650 #ifndef _LP64 651 assert(dest->is_single_fpu(), "must be"); 652 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 653 if (c->is_zero_float()) { 654 __ fldz(); 655 } else if (c->is_one_float()) { 656 __ fld1(); 657 } else { 658 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 659 } 660 #else 661 ShouldNotReachHere(); 662 #endif // !_LP64 663 } 664 break; 665 } 666 667 case T_DOUBLE: { 668 if (dest->is_double_xmm()) { 669 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) { 670 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 671 } else { 672 __ movdbl(dest->as_xmm_double_reg(), 673 InternalAddress(double_constant(c->as_jdouble()))); 674 } 675 } else { 676 #ifndef _LP64 677 assert(dest->is_double_fpu(), "must be"); 678 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 679 if (c->is_zero_double()) { 680 __ fldz(); 681 } else if (c->is_one_double()) { 682 __ fld1(); 683 } else { 684 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 685 } 686 #else 687 ShouldNotReachHere(); 688 #endif // !_LP64 689 } 690 break; 691 } 692 693 default: 694 ShouldNotReachHere(); 695 } 696 } 697 698 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 699 assert(src->is_constant(), "should not call otherwise"); 700 assert(dest->is_stack(), "should not call otherwise"); 701 LIR_Const* c = src->as_constant_ptr(); 702 703 switch (c->type()) { 704 case T_INT: // fall through 705 case T_FLOAT: 706 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 707 break; 708 709 case T_ADDRESS: 710 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 711 break; 712 713 case T_OBJECT: 714 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); 715 break; 716 717 case T_LONG: // fall through 718 case T_DOUBLE: 719 #ifdef _LP64 720 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 721 lo_word_offset_in_bytes), 722 (intptr_t)c->as_jlong_bits(), 723 rscratch1); 724 #else 725 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 726 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 727 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 728 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 729 #endif // _LP64 730 break; 731 732 default: 733 ShouldNotReachHere(); 734 } 735 } 736 737 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 738 assert(src->is_constant(), "should not call otherwise"); 739 assert(dest->is_address(), "should not call otherwise"); 740 LIR_Const* c = src->as_constant_ptr(); 741 LIR_Address* addr = dest->as_address_ptr(); 742 743 int null_check_here = code_offset(); 744 switch (type) { 745 case T_INT: // fall through 746 case T_FLOAT: 747 __ movl(as_Address(addr), c->as_jint_bits()); 748 break; 749 750 case T_ADDRESS: 751 __ movptr(as_Address(addr), c->as_jint_bits()); 752 break; 753 754 case T_OBJECT: // fall through 755 case T_ARRAY: 756 if (c->as_jobject() == nullptr) { 757 if (UseCompressedOops && !wide) { 758 __ movl(as_Address(addr), NULL_WORD); 759 } else { 760 #ifdef _LP64 761 __ xorptr(rscratch1, rscratch1); 762 null_check_here = code_offset(); 763 __ movptr(as_Address(addr), rscratch1); 764 #else 765 __ movptr(as_Address(addr), NULL_WORD); 766 #endif 767 } 768 } else { 769 if (is_literal_address(addr)) { 770 ShouldNotReachHere(); 771 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); 772 } else { 773 #ifdef _LP64 774 __ movoop(rscratch1, c->as_jobject()); 775 if (UseCompressedOops && !wide) { 776 __ encode_heap_oop(rscratch1); 777 null_check_here = code_offset(); 778 __ movl(as_Address_lo(addr), rscratch1); 779 } else { 780 null_check_here = code_offset(); 781 __ movptr(as_Address_lo(addr), rscratch1); 782 } 783 #else 784 __ movoop(as_Address(addr), c->as_jobject(), noreg); 785 #endif 786 } 787 } 788 break; 789 790 case T_LONG: // fall through 791 case T_DOUBLE: 792 #ifdef _LP64 793 if (is_literal_address(addr)) { 794 ShouldNotReachHere(); 795 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 796 } else { 797 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 798 null_check_here = code_offset(); 799 __ movptr(as_Address_lo(addr), r10); 800 } 801 #else 802 // Always reachable in 32bit so this doesn't produce useless move literal 803 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 804 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 805 #endif // _LP64 806 break; 807 808 case T_BOOLEAN: // fall through 809 case T_BYTE: 810 __ movb(as_Address(addr), c->as_jint() & 0xFF); 811 break; 812 813 case T_CHAR: // fall through 814 case T_SHORT: 815 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 816 break; 817 818 default: 819 ShouldNotReachHere(); 820 }; 821 822 if (info != nullptr) { 823 add_debug_info_for_null_check(null_check_here, info); 824 } 825 } 826 827 828 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 829 assert(src->is_register(), "should not call otherwise"); 830 assert(dest->is_register(), "should not call otherwise"); 831 832 // move between cpu-registers 833 if (dest->is_single_cpu()) { 834 #ifdef _LP64 835 if (src->type() == T_LONG) { 836 // Can do LONG -> OBJECT 837 move_regs(src->as_register_lo(), dest->as_register()); 838 return; 839 } 840 #endif 841 assert(src->is_single_cpu(), "must match"); 842 if (src->type() == T_OBJECT) { 843 __ verify_oop(src->as_register()); 844 } 845 move_regs(src->as_register(), dest->as_register()); 846 847 } else if (dest->is_double_cpu()) { 848 #ifdef _LP64 849 if (is_reference_type(src->type())) { 850 // Surprising to me but we can see move of a long to t_object 851 __ verify_oop(src->as_register()); 852 move_regs(src->as_register(), dest->as_register_lo()); 853 return; 854 } 855 #endif 856 assert(src->is_double_cpu(), "must match"); 857 Register f_lo = src->as_register_lo(); 858 Register f_hi = src->as_register_hi(); 859 Register t_lo = dest->as_register_lo(); 860 Register t_hi = dest->as_register_hi(); 861 #ifdef _LP64 862 assert(f_hi == f_lo, "must be same"); 863 assert(t_hi == t_lo, "must be same"); 864 move_regs(f_lo, t_lo); 865 #else 866 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 867 868 869 if (f_lo == t_hi && f_hi == t_lo) { 870 swap_reg(f_lo, f_hi); 871 } else if (f_hi == t_lo) { 872 assert(f_lo != t_hi, "overwriting register"); 873 move_regs(f_hi, t_hi); 874 move_regs(f_lo, t_lo); 875 } else { 876 assert(f_hi != t_lo, "overwriting register"); 877 move_regs(f_lo, t_lo); 878 move_regs(f_hi, t_hi); 879 } 880 #endif // LP64 881 882 #ifndef _LP64 883 // special moves from fpu-register to xmm-register 884 // necessary for method results 885 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 886 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 887 __ fld_s(Address(rsp, 0)); 888 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 889 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 890 __ fld_d(Address(rsp, 0)); 891 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 892 __ fstp_s(Address(rsp, 0)); 893 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 894 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 895 __ fstp_d(Address(rsp, 0)); 896 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 897 #endif // !_LP64 898 899 // move between xmm-registers 900 } else if (dest->is_single_xmm()) { 901 assert(src->is_single_xmm(), "must match"); 902 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 903 } else if (dest->is_double_xmm()) { 904 assert(src->is_double_xmm(), "must match"); 905 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 906 907 #ifndef _LP64 908 // move between fpu-registers (no instruction necessary because of fpu-stack) 909 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 910 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 911 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 912 #endif // !_LP64 913 914 } else { 915 ShouldNotReachHere(); 916 } 917 } 918 919 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 920 assert(src->is_register(), "should not call otherwise"); 921 assert(dest->is_stack(), "should not call otherwise"); 922 923 if (src->is_single_cpu()) { 924 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 925 if (is_reference_type(type)) { 926 __ verify_oop(src->as_register()); 927 __ movptr (dst, src->as_register()); 928 } else if (type == T_METADATA || type == T_ADDRESS) { 929 __ movptr (dst, src->as_register()); 930 } else { 931 __ movl (dst, src->as_register()); 932 } 933 934 } else if (src->is_double_cpu()) { 935 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 936 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 937 __ movptr (dstLO, src->as_register_lo()); 938 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 939 940 } else if (src->is_single_xmm()) { 941 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 942 __ movflt(dst_addr, src->as_xmm_float_reg()); 943 944 } else if (src->is_double_xmm()) { 945 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 946 __ movdbl(dst_addr, src->as_xmm_double_reg()); 947 948 #ifndef _LP64 949 } else if (src->is_single_fpu()) { 950 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 951 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 952 if (pop_fpu_stack) __ fstp_s (dst_addr); 953 else __ fst_s (dst_addr); 954 955 } else if (src->is_double_fpu()) { 956 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 957 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 958 if (pop_fpu_stack) __ fstp_d (dst_addr); 959 else __ fst_d (dst_addr); 960 #endif // !_LP64 961 962 } else { 963 ShouldNotReachHere(); 964 } 965 } 966 967 968 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { 969 LIR_Address* to_addr = dest->as_address_ptr(); 970 PatchingStub* patch = nullptr; 971 Register compressed_src = rscratch1; 972 973 if (is_reference_type(type)) { 974 __ verify_oop(src->as_register()); 975 #ifdef _LP64 976 if (UseCompressedOops && !wide) { 977 __ movptr(compressed_src, src->as_register()); 978 __ encode_heap_oop(compressed_src); 979 if (patch_code != lir_patch_none) { 980 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); 981 } 982 } 983 #endif 984 } 985 986 if (patch_code != lir_patch_none) { 987 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 988 Address toa = as_Address(to_addr); 989 assert(toa.disp() != 0, "must have"); 990 } 991 992 int null_check_here = code_offset(); 993 switch (type) { 994 case T_FLOAT: { 995 #ifdef _LP64 996 assert(src->is_single_xmm(), "not a float"); 997 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 998 #else 999 if (src->is_single_xmm()) { 1000 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1001 } else { 1002 assert(src->is_single_fpu(), "must be"); 1003 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1004 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1005 else __ fst_s (as_Address(to_addr)); 1006 } 1007 #endif // _LP64 1008 break; 1009 } 1010 1011 case T_DOUBLE: { 1012 #ifdef _LP64 1013 assert(src->is_double_xmm(), "not a double"); 1014 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1015 #else 1016 if (src->is_double_xmm()) { 1017 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1018 } else { 1019 assert(src->is_double_fpu(), "must be"); 1020 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1021 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1022 else __ fst_d (as_Address(to_addr)); 1023 } 1024 #endif // _LP64 1025 break; 1026 } 1027 1028 case T_ARRAY: // fall through 1029 case T_OBJECT: // fall through 1030 if (UseCompressedOops && !wide) { 1031 __ movl(as_Address(to_addr), compressed_src); 1032 } else { 1033 __ movptr(as_Address(to_addr), src->as_register()); 1034 } 1035 break; 1036 case T_METADATA: 1037 // We get here to store a method pointer to the stack to pass to 1038 // a dtrace runtime call. This can't work on 64 bit with 1039 // compressed klass ptrs: T_METADATA can be a compressed klass 1040 // ptr or a 64 bit method pointer. 1041 LP64_ONLY(ShouldNotReachHere()); 1042 __ movptr(as_Address(to_addr), src->as_register()); 1043 break; 1044 case T_ADDRESS: 1045 __ movptr(as_Address(to_addr), src->as_register()); 1046 break; 1047 case T_INT: 1048 __ movl(as_Address(to_addr), src->as_register()); 1049 break; 1050 1051 case T_LONG: { 1052 Register from_lo = src->as_register_lo(); 1053 Register from_hi = src->as_register_hi(); 1054 #ifdef _LP64 1055 __ movptr(as_Address_lo(to_addr), from_lo); 1056 #else 1057 Register base = to_addr->base()->as_register(); 1058 Register index = noreg; 1059 if (to_addr->index()->is_register()) { 1060 index = to_addr->index()->as_register(); 1061 } 1062 if (base == from_lo || index == from_lo) { 1063 assert(base != from_hi, "can't be"); 1064 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1065 __ movl(as_Address_hi(to_addr), from_hi); 1066 if (patch != nullptr) { 1067 patching_epilog(patch, lir_patch_high, base, info); 1068 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1069 patch_code = lir_patch_low; 1070 } 1071 __ movl(as_Address_lo(to_addr), from_lo); 1072 } else { 1073 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1074 __ movl(as_Address_lo(to_addr), from_lo); 1075 if (patch != nullptr) { 1076 patching_epilog(patch, lir_patch_low, base, info); 1077 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1078 patch_code = lir_patch_high; 1079 } 1080 __ movl(as_Address_hi(to_addr), from_hi); 1081 } 1082 #endif // _LP64 1083 break; 1084 } 1085 1086 case T_BYTE: // fall through 1087 case T_BOOLEAN: { 1088 Register src_reg = src->as_register(); 1089 Address dst_addr = as_Address(to_addr); 1090 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1091 __ movb(dst_addr, src_reg); 1092 break; 1093 } 1094 1095 case T_CHAR: // fall through 1096 case T_SHORT: 1097 __ movw(as_Address(to_addr), src->as_register()); 1098 break; 1099 1100 default: 1101 ShouldNotReachHere(); 1102 } 1103 if (info != nullptr) { 1104 add_debug_info_for_null_check(null_check_here, info); 1105 } 1106 1107 if (patch_code != lir_patch_none) { 1108 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1109 } 1110 } 1111 1112 1113 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1114 assert(src->is_stack(), "should not call otherwise"); 1115 assert(dest->is_register(), "should not call otherwise"); 1116 1117 if (dest->is_single_cpu()) { 1118 if (is_reference_type(type)) { 1119 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1120 __ verify_oop(dest->as_register()); 1121 } else if (type == T_METADATA || type == T_ADDRESS) { 1122 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1123 } else { 1124 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1125 } 1126 1127 } else if (dest->is_double_cpu()) { 1128 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1129 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1130 __ movptr(dest->as_register_lo(), src_addr_LO); 1131 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1132 1133 } else if (dest->is_single_xmm()) { 1134 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1135 __ movflt(dest->as_xmm_float_reg(), src_addr); 1136 1137 } else if (dest->is_double_xmm()) { 1138 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1139 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1140 1141 #ifndef _LP64 1142 } else if (dest->is_single_fpu()) { 1143 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1144 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1145 __ fld_s(src_addr); 1146 1147 } else if (dest->is_double_fpu()) { 1148 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1149 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1150 __ fld_d(src_addr); 1151 #endif // _LP64 1152 1153 } else { 1154 ShouldNotReachHere(); 1155 } 1156 } 1157 1158 1159 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1160 if (src->is_single_stack()) { 1161 if (is_reference_type(type)) { 1162 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1163 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1164 } else { 1165 #ifndef _LP64 1166 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1167 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1168 #else 1169 //no pushl on 64bits 1170 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1171 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1172 #endif 1173 } 1174 1175 } else if (src->is_double_stack()) { 1176 #ifdef _LP64 1177 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1178 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1179 #else 1180 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1181 // push and pop the part at src + wordSize, adding wordSize for the previous push 1182 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1183 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1184 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1185 #endif // _LP64 1186 1187 } else { 1188 ShouldNotReachHere(); 1189 } 1190 } 1191 1192 1193 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 1194 assert(src->is_address(), "should not call otherwise"); 1195 assert(dest->is_register(), "should not call otherwise"); 1196 1197 LIR_Address* addr = src->as_address_ptr(); 1198 Address from_addr = as_Address(addr); 1199 1200 if (addr->base()->type() == T_OBJECT) { 1201 __ verify_oop(addr->base()->as_pointer_register()); 1202 } 1203 1204 switch (type) { 1205 case T_BOOLEAN: // fall through 1206 case T_BYTE: // fall through 1207 case T_CHAR: // fall through 1208 case T_SHORT: 1209 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1210 // on pre P6 processors we may get partial register stalls 1211 // so blow away the value of to_rinfo before loading a 1212 // partial word into it. Do it here so that it precedes 1213 // the potential patch point below. 1214 __ xorptr(dest->as_register(), dest->as_register()); 1215 } 1216 break; 1217 default: 1218 break; 1219 } 1220 1221 PatchingStub* patch = nullptr; 1222 if (patch_code != lir_patch_none) { 1223 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1224 assert(from_addr.disp() != 0, "must have"); 1225 } 1226 if (info != nullptr) { 1227 add_debug_info_for_null_check_here(info); 1228 } 1229 1230 switch (type) { 1231 case T_FLOAT: { 1232 if (dest->is_single_xmm()) { 1233 __ movflt(dest->as_xmm_float_reg(), from_addr); 1234 } else { 1235 #ifndef _LP64 1236 assert(dest->is_single_fpu(), "must be"); 1237 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1238 __ fld_s(from_addr); 1239 #else 1240 ShouldNotReachHere(); 1241 #endif // !LP64 1242 } 1243 break; 1244 } 1245 1246 case T_DOUBLE: { 1247 if (dest->is_double_xmm()) { 1248 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1249 } else { 1250 #ifndef _LP64 1251 assert(dest->is_double_fpu(), "must be"); 1252 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1253 __ fld_d(from_addr); 1254 #else 1255 ShouldNotReachHere(); 1256 #endif // !LP64 1257 } 1258 break; 1259 } 1260 1261 case T_OBJECT: // fall through 1262 case T_ARRAY: // fall through 1263 if (UseCompressedOops && !wide) { 1264 __ movl(dest->as_register(), from_addr); 1265 } else { 1266 __ movptr(dest->as_register(), from_addr); 1267 } 1268 break; 1269 1270 case T_ADDRESS: 1271 __ movptr(dest->as_register(), from_addr); 1272 break; 1273 case T_INT: 1274 __ movl(dest->as_register(), from_addr); 1275 break; 1276 1277 case T_LONG: { 1278 Register to_lo = dest->as_register_lo(); 1279 Register to_hi = dest->as_register_hi(); 1280 #ifdef _LP64 1281 __ movptr(to_lo, as_Address_lo(addr)); 1282 #else 1283 Register base = addr->base()->as_register(); 1284 Register index = noreg; 1285 if (addr->index()->is_register()) { 1286 index = addr->index()->as_register(); 1287 } 1288 if ((base == to_lo && index == to_hi) || 1289 (base == to_hi && index == to_lo)) { 1290 // addresses with 2 registers are only formed as a result of 1291 // array access so this code will never have to deal with 1292 // patches or null checks. 1293 assert(info == nullptr && patch == nullptr, "must be"); 1294 __ lea(to_hi, as_Address(addr)); 1295 __ movl(to_lo, Address(to_hi, 0)); 1296 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1297 } else if (base == to_lo || index == to_lo) { 1298 assert(base != to_hi, "can't be"); 1299 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1300 __ movl(to_hi, as_Address_hi(addr)); 1301 if (patch != nullptr) { 1302 patching_epilog(patch, lir_patch_high, base, info); 1303 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1304 patch_code = lir_patch_low; 1305 } 1306 __ movl(to_lo, as_Address_lo(addr)); 1307 } else { 1308 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1309 __ movl(to_lo, as_Address_lo(addr)); 1310 if (patch != nullptr) { 1311 patching_epilog(patch, lir_patch_low, base, info); 1312 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1313 patch_code = lir_patch_high; 1314 } 1315 __ movl(to_hi, as_Address_hi(addr)); 1316 } 1317 #endif // _LP64 1318 break; 1319 } 1320 1321 case T_BOOLEAN: // fall through 1322 case T_BYTE: { 1323 Register dest_reg = dest->as_register(); 1324 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1325 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1326 __ movsbl(dest_reg, from_addr); 1327 } else { 1328 __ movb(dest_reg, from_addr); 1329 __ shll(dest_reg, 24); 1330 __ sarl(dest_reg, 24); 1331 } 1332 break; 1333 } 1334 1335 case T_CHAR: { 1336 Register dest_reg = dest->as_register(); 1337 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1338 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1339 __ movzwl(dest_reg, from_addr); 1340 } else { 1341 __ movw(dest_reg, from_addr); 1342 } 1343 break; 1344 } 1345 1346 case T_SHORT: { 1347 Register dest_reg = dest->as_register(); 1348 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1349 __ movswl(dest_reg, from_addr); 1350 } else { 1351 __ movw(dest_reg, from_addr); 1352 __ shll(dest_reg, 16); 1353 __ sarl(dest_reg, 16); 1354 } 1355 break; 1356 } 1357 1358 default: 1359 ShouldNotReachHere(); 1360 } 1361 1362 if (patch != nullptr) { 1363 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1364 } 1365 1366 if (is_reference_type(type)) { 1367 #ifdef _LP64 1368 if (UseCompressedOops && !wide) { 1369 __ decode_heap_oop(dest->as_register()); 1370 } 1371 #endif 1372 1373 __ verify_oop(dest->as_register()); 1374 } 1375 } 1376 1377 1378 NEEDS_CLEANUP; // This could be static? 1379 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1380 int elem_size = type2aelembytes(type); 1381 switch (elem_size) { 1382 case 1: return Address::times_1; 1383 case 2: return Address::times_2; 1384 case 4: return Address::times_4; 1385 case 8: return Address::times_8; 1386 } 1387 ShouldNotReachHere(); 1388 return Address::no_scale; 1389 } 1390 1391 1392 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1393 switch (op->code()) { 1394 case lir_idiv: 1395 case lir_irem: 1396 arithmetic_idiv(op->code(), 1397 op->in_opr1(), 1398 op->in_opr2(), 1399 op->in_opr3(), 1400 op->result_opr(), 1401 op->info()); 1402 break; 1403 case lir_fmad: 1404 __ fmad(op->result_opr()->as_xmm_double_reg(), 1405 op->in_opr1()->as_xmm_double_reg(), 1406 op->in_opr2()->as_xmm_double_reg(), 1407 op->in_opr3()->as_xmm_double_reg()); 1408 break; 1409 case lir_fmaf: 1410 __ fmaf(op->result_opr()->as_xmm_float_reg(), 1411 op->in_opr1()->as_xmm_float_reg(), 1412 op->in_opr2()->as_xmm_float_reg(), 1413 op->in_opr3()->as_xmm_float_reg()); 1414 break; 1415 default: ShouldNotReachHere(); break; 1416 } 1417 } 1418 1419 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1420 #ifdef ASSERT 1421 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 1422 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 1423 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 1424 #endif 1425 1426 if (op->cond() == lir_cond_always) { 1427 if (op->info() != nullptr) add_debug_info_for_branch(op->info()); 1428 __ jmp (*(op->label())); 1429 } else { 1430 Assembler::Condition acond = Assembler::zero; 1431 if (op->code() == lir_cond_float_branch) { 1432 assert(op->ublock() != nullptr, "must have unordered successor"); 1433 __ jcc(Assembler::parity, *(op->ublock()->label())); 1434 switch(op->cond()) { 1435 case lir_cond_equal: acond = Assembler::equal; break; 1436 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1437 case lir_cond_less: acond = Assembler::below; break; 1438 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1439 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1440 case lir_cond_greater: acond = Assembler::above; break; 1441 default: ShouldNotReachHere(); 1442 } 1443 } else { 1444 switch (op->cond()) { 1445 case lir_cond_equal: acond = Assembler::equal; break; 1446 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1447 case lir_cond_less: acond = Assembler::less; break; 1448 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1449 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1450 case lir_cond_greater: acond = Assembler::greater; break; 1451 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1452 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1453 default: ShouldNotReachHere(); 1454 } 1455 } 1456 __ jcc(acond,*(op->label())); 1457 } 1458 } 1459 1460 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1461 LIR_Opr src = op->in_opr(); 1462 LIR_Opr dest = op->result_opr(); 1463 1464 switch (op->bytecode()) { 1465 case Bytecodes::_i2l: 1466 #ifdef _LP64 1467 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1468 #else 1469 move_regs(src->as_register(), dest->as_register_lo()); 1470 move_regs(src->as_register(), dest->as_register_hi()); 1471 __ sarl(dest->as_register_hi(), 31); 1472 #endif // LP64 1473 break; 1474 1475 case Bytecodes::_l2i: 1476 #ifdef _LP64 1477 __ movl(dest->as_register(), src->as_register_lo()); 1478 #else 1479 move_regs(src->as_register_lo(), dest->as_register()); 1480 #endif 1481 break; 1482 1483 case Bytecodes::_i2b: 1484 move_regs(src->as_register(), dest->as_register()); 1485 __ sign_extend_byte(dest->as_register()); 1486 break; 1487 1488 case Bytecodes::_i2c: 1489 move_regs(src->as_register(), dest->as_register()); 1490 __ andl(dest->as_register(), 0xFFFF); 1491 break; 1492 1493 case Bytecodes::_i2s: 1494 move_regs(src->as_register(), dest->as_register()); 1495 __ sign_extend_short(dest->as_register()); 1496 break; 1497 1498 1499 #ifdef _LP64 1500 case Bytecodes::_f2d: 1501 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1502 break; 1503 1504 case Bytecodes::_d2f: 1505 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1506 break; 1507 1508 case Bytecodes::_i2f: 1509 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1510 break; 1511 1512 case Bytecodes::_i2d: 1513 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1514 break; 1515 1516 case Bytecodes::_l2f: 1517 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo()); 1518 break; 1519 1520 case Bytecodes::_l2d: 1521 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo()); 1522 break; 1523 1524 case Bytecodes::_f2i: 1525 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg()); 1526 break; 1527 1528 case Bytecodes::_d2i: 1529 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg()); 1530 break; 1531 1532 case Bytecodes::_f2l: 1533 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg()); 1534 break; 1535 1536 case Bytecodes::_d2l: 1537 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg()); 1538 break; 1539 #else 1540 case Bytecodes::_f2d: 1541 case Bytecodes::_d2f: 1542 if (dest->is_single_xmm()) { 1543 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1544 } else if (dest->is_double_xmm()) { 1545 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1546 } else { 1547 assert(src->fpu() == dest->fpu(), "register must be equal"); 1548 // do nothing (float result is rounded later through spilling) 1549 } 1550 break; 1551 1552 case Bytecodes::_i2f: 1553 case Bytecodes::_i2d: 1554 if (dest->is_single_xmm()) { 1555 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1556 } else if (dest->is_double_xmm()) { 1557 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1558 } else { 1559 assert(dest->fpu() == 0, "result must be on TOS"); 1560 __ movl(Address(rsp, 0), src->as_register()); 1561 __ fild_s(Address(rsp, 0)); 1562 } 1563 break; 1564 1565 case Bytecodes::_l2f: 1566 case Bytecodes::_l2d: 1567 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1568 assert(dest->fpu() == 0, "result must be on TOS"); 1569 __ movptr(Address(rsp, 0), src->as_register_lo()); 1570 __ movl(Address(rsp, BytesPerWord), src->as_register_hi()); 1571 __ fild_d(Address(rsp, 0)); 1572 // float result is rounded later through spilling 1573 break; 1574 1575 case Bytecodes::_f2i: 1576 case Bytecodes::_d2i: 1577 if (src->is_single_xmm()) { 1578 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1579 } else if (src->is_double_xmm()) { 1580 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1581 } else { 1582 assert(src->fpu() == 0, "input must be on TOS"); 1583 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc())); 1584 __ fist_s(Address(rsp, 0)); 1585 __ movl(dest->as_register(), Address(rsp, 0)); 1586 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1587 } 1588 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1589 assert(op->stub() != nullptr, "stub required"); 1590 __ cmpl(dest->as_register(), 0x80000000); 1591 __ jcc(Assembler::equal, *op->stub()->entry()); 1592 __ bind(*op->stub()->continuation()); 1593 break; 1594 1595 case Bytecodes::_f2l: 1596 case Bytecodes::_d2l: 1597 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1598 assert(src->fpu() == 0, "input must be on TOS"); 1599 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1600 1601 // instruction sequence too long to inline it here 1602 { 1603 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::fpu2long_stub_id))); 1604 } 1605 break; 1606 #endif // _LP64 1607 1608 default: ShouldNotReachHere(); 1609 } 1610 } 1611 1612 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1613 if (op->init_check()) { 1614 add_debug_info_for_null_check_here(op->stub()->info()); 1615 // init_state needs acquire, but x86 is TSO, and so we are already good. 1616 __ cmpb(Address(op->klass()->as_register(), 1617 InstanceKlass::init_state_offset()), 1618 InstanceKlass::fully_initialized); 1619 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1620 } 1621 __ allocate_object(op->obj()->as_register(), 1622 op->tmp1()->as_register(), 1623 op->tmp2()->as_register(), 1624 op->header_size(), 1625 op->object_size(), 1626 op->klass()->as_register(), 1627 *op->stub()->entry()); 1628 __ bind(*op->stub()->continuation()); 1629 } 1630 1631 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1632 Register len = op->len()->as_register(); 1633 LP64_ONLY( __ movslq(len, len); ) 1634 1635 if (UseSlowPath || op->is_null_free() || 1636 (!UseFastNewObjectArray && is_reference_type(op->type())) || 1637 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 1638 __ jmp(*op->stub()->entry()); 1639 } else { 1640 Register tmp1 = op->tmp1()->as_register(); 1641 Register tmp2 = op->tmp2()->as_register(); 1642 Register tmp3 = op->tmp3()->as_register(); 1643 if (len == tmp1) { 1644 tmp1 = tmp3; 1645 } else if (len == tmp2) { 1646 tmp2 = tmp3; 1647 } else if (len == tmp3) { 1648 // everything is ok 1649 } else { 1650 __ mov(tmp3, len); 1651 } 1652 __ allocate_array(op->obj()->as_register(), 1653 len, 1654 tmp1, 1655 tmp2, 1656 arrayOopDesc::base_offset_in_bytes(op->type()), 1657 array_element_size(op->type()), 1658 op->klass()->as_register(), 1659 *op->stub()->entry(), 1660 op->zero_array()); 1661 } 1662 __ bind(*op->stub()->continuation()); 1663 } 1664 1665 void LIR_Assembler::type_profile_helper(Register mdo, 1666 ciMethodData *md, ciProfileData *data, 1667 Register recv, Label* update_done) { 1668 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1669 Label next_test; 1670 // See if the receiver is receiver[n]. 1671 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1672 __ jccb(Assembler::notEqual, next_test); 1673 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1674 __ addptr(data_addr, DataLayout::counter_increment); 1675 __ jmp(*update_done); 1676 __ bind(next_test); 1677 } 1678 1679 // Didn't find receiver; find next empty slot and fill it in 1680 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1681 Label next_test; 1682 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1683 __ cmpptr(recv_addr, NULL_WORD); 1684 __ jccb(Assembler::notEqual, next_test); 1685 __ movptr(recv_addr, recv); 1686 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1687 __ jmp(*update_done); 1688 __ bind(next_test); 1689 } 1690 } 1691 1692 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1693 // we always need a stub for the failure case. 1694 CodeStub* stub = op->stub(); 1695 Register obj = op->object()->as_register(); 1696 Register k_RInfo = op->tmp1()->as_register(); 1697 Register klass_RInfo = op->tmp2()->as_register(); 1698 Register dst = op->result_opr()->as_register(); 1699 ciKlass* k = op->klass(); 1700 Register Rtmp1 = noreg; 1701 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1702 1703 // check if it needs to be profiled 1704 ciMethodData* md = nullptr; 1705 ciProfileData* data = nullptr; 1706 1707 if (op->should_profile()) { 1708 ciMethod* method = op->profiled_method(); 1709 assert(method != nullptr, "Should have method"); 1710 int bci = op->profiled_bci(); 1711 md = method->method_data_or_null(); 1712 assert(md != nullptr, "Sanity"); 1713 data = md->bci_to_data(bci); 1714 assert(data != nullptr, "need data for type check"); 1715 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1716 } 1717 Label* success_target = success; 1718 Label* failure_target = failure; 1719 1720 if (obj == k_RInfo) { 1721 k_RInfo = dst; 1722 } else if (obj == klass_RInfo) { 1723 klass_RInfo = dst; 1724 } 1725 if (k->is_loaded() && !UseCompressedClassPointers) { 1726 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1727 } else { 1728 Rtmp1 = op->tmp3()->as_register(); 1729 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1730 } 1731 1732 assert_different_registers(obj, k_RInfo, klass_RInfo); 1733 1734 if (op->need_null_check()) { 1735 __ testptr(obj, obj); 1736 if (op->should_profile()) { 1737 Label not_null; 1738 Register mdo = klass_RInfo; 1739 __ mov_metadata(mdo, md->constant_encoding()); 1740 __ jccb(Assembler::notEqual, not_null); 1741 // Object is null; update MDO and exit 1742 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1743 int header_bits = BitData::null_seen_byte_constant(); 1744 __ orb(data_addr, header_bits); 1745 __ jmp(*obj_is_null); 1746 __ bind(not_null); 1747 1748 Label update_done; 1749 Register recv = k_RInfo; 1750 __ load_klass(recv, obj, tmp_load_klass); 1751 type_profile_helper(mdo, md, data, recv, &update_done); 1752 1753 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1754 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment); 1755 1756 __ bind(update_done); 1757 } else { 1758 __ jcc(Assembler::equal, *obj_is_null); 1759 } 1760 } 1761 1762 if (!k->is_loaded()) { 1763 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1764 } else { 1765 #ifdef _LP64 1766 __ mov_metadata(k_RInfo, k->constant_encoding()); 1767 #endif // _LP64 1768 } 1769 __ verify_oop(obj); 1770 1771 if (op->fast_check()) { 1772 // get object class 1773 // not a safepoint as obj null check happens earlier 1774 #ifdef _LP64 1775 if (UseCompressedClassPointers) { 1776 __ load_klass(Rtmp1, obj, tmp_load_klass); 1777 __ cmpptr(k_RInfo, Rtmp1); 1778 } else { 1779 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1780 } 1781 #else 1782 if (k->is_loaded()) { 1783 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1784 } else { 1785 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1786 } 1787 #endif 1788 __ jcc(Assembler::notEqual, *failure_target); 1789 // successful cast, fall through to profile or jump 1790 } else { 1791 // get object class 1792 // not a safepoint as obj null check happens earlier 1793 __ load_klass(klass_RInfo, obj, tmp_load_klass); 1794 if (k->is_loaded()) { 1795 // See if we get an immediate positive hit 1796 #ifdef _LP64 1797 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1798 #else 1799 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1800 #endif // _LP64 1801 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1802 __ jcc(Assembler::notEqual, *failure_target); 1803 // successful cast, fall through to profile or jump 1804 } else { 1805 // See if we get an immediate positive hit 1806 __ jcc(Assembler::equal, *success_target); 1807 // check for self 1808 #ifdef _LP64 1809 __ cmpptr(klass_RInfo, k_RInfo); 1810 #else 1811 __ cmpklass(klass_RInfo, k->constant_encoding()); 1812 #endif // _LP64 1813 __ jcc(Assembler::equal, *success_target); 1814 1815 __ push(klass_RInfo); 1816 #ifdef _LP64 1817 __ push(k_RInfo); 1818 #else 1819 __ pushklass(k->constant_encoding(), noreg); 1820 #endif // _LP64 1821 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1822 __ pop(klass_RInfo); 1823 __ pop(klass_RInfo); 1824 // result is a boolean 1825 __ testl(klass_RInfo, klass_RInfo); 1826 __ jcc(Assembler::equal, *failure_target); 1827 // successful cast, fall through to profile or jump 1828 } 1829 } else { 1830 // perform the fast part of the checking logic 1831 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1832 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1833 __ push(klass_RInfo); 1834 __ push(k_RInfo); 1835 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1836 __ pop(klass_RInfo); 1837 __ pop(k_RInfo); 1838 // result is a boolean 1839 __ testl(k_RInfo, k_RInfo); 1840 __ jcc(Assembler::equal, *failure_target); 1841 // successful cast, fall through to profile or jump 1842 } 1843 } 1844 __ jmp(*success); 1845 } 1846 1847 1848 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1849 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1850 LIR_Code code = op->code(); 1851 if (code == lir_store_check) { 1852 Register value = op->object()->as_register(); 1853 Register array = op->array()->as_register(); 1854 Register k_RInfo = op->tmp1()->as_register(); 1855 Register klass_RInfo = op->tmp2()->as_register(); 1856 Register Rtmp1 = op->tmp3()->as_register(); 1857 1858 CodeStub* stub = op->stub(); 1859 1860 // check if it needs to be profiled 1861 ciMethodData* md = nullptr; 1862 ciProfileData* data = nullptr; 1863 1864 if (op->should_profile()) { 1865 ciMethod* method = op->profiled_method(); 1866 assert(method != nullptr, "Should have method"); 1867 int bci = op->profiled_bci(); 1868 md = method->method_data_or_null(); 1869 assert(md != nullptr, "Sanity"); 1870 data = md->bci_to_data(bci); 1871 assert(data != nullptr, "need data for type check"); 1872 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1873 } 1874 Label done; 1875 Label* success_target = &done; 1876 Label* failure_target = stub->entry(); 1877 1878 __ testptr(value, value); 1879 if (op->should_profile()) { 1880 Label not_null; 1881 Register mdo = klass_RInfo; 1882 __ mov_metadata(mdo, md->constant_encoding()); 1883 __ jccb(Assembler::notEqual, not_null); 1884 // Object is null; update MDO and exit 1885 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1886 int header_bits = BitData::null_seen_byte_constant(); 1887 __ orb(data_addr, header_bits); 1888 __ jmp(done); 1889 __ bind(not_null); 1890 1891 Label update_done; 1892 Register recv = k_RInfo; 1893 __ load_klass(recv, value, tmp_load_klass); 1894 type_profile_helper(mdo, md, data, recv, &update_done); 1895 1896 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1897 __ addptr(counter_addr, DataLayout::counter_increment); 1898 __ bind(update_done); 1899 } else { 1900 __ jcc(Assembler::equal, done); 1901 } 1902 1903 add_debug_info_for_null_check_here(op->info_for_exception()); 1904 __ load_klass(k_RInfo, array, tmp_load_klass); 1905 __ load_klass(klass_RInfo, value, tmp_load_klass); 1906 1907 // get instance klass (it's already uncompressed) 1908 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1909 // perform the fast part of the checking logic 1910 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1911 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1912 __ push(klass_RInfo); 1913 __ push(k_RInfo); 1914 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1915 __ pop(klass_RInfo); 1916 __ pop(k_RInfo); 1917 // result is a boolean 1918 __ testl(k_RInfo, k_RInfo); 1919 __ jcc(Assembler::equal, *failure_target); 1920 // fall through to the success case 1921 1922 __ bind(done); 1923 } else 1924 if (code == lir_checkcast) { 1925 Register obj = op->object()->as_register(); 1926 Register dst = op->result_opr()->as_register(); 1927 Label success; 1928 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1929 __ bind(success); 1930 if (dst != obj) { 1931 __ mov(dst, obj); 1932 } 1933 } else 1934 if (code == lir_instanceof) { 1935 Register obj = op->object()->as_register(); 1936 Register dst = op->result_opr()->as_register(); 1937 Label success, failure, done; 1938 emit_typecheck_helper(op, &success, &failure, &failure); 1939 __ bind(failure); 1940 __ xorptr(dst, dst); 1941 __ jmpb(done); 1942 __ bind(success); 1943 __ movptr(dst, 1); 1944 __ bind(done); 1945 } else { 1946 ShouldNotReachHere(); 1947 } 1948 1949 } 1950 1951 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) { 1952 // We are loading/storing from/to an array that *may* be a flat array (the 1953 // declared type is Object[], abstract[], interface[] or VT.ref[]). 1954 // If this array is a flat array, take the slow path. 1955 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1956 if (!op->value()->is_illegal()) { 1957 // The array is not a flat array, but it might be null-free. If we are storing 1958 // a null into a null-free array, take the slow path (which will throw NPE). 1959 Label skip; 1960 __ cmpptr(op->value()->as_register(), NULL_WORD); 1961 __ jcc(Assembler::notEqual, skip); 1962 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1963 __ bind(skip); 1964 } 1965 } 1966 1967 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) { 1968 // We are storing into an array that *may* be null-free (the declared type is 1969 // Object[], abstract[], interface[] or VT.ref[]). 1970 Label test_mark_word; 1971 Register tmp = op->tmp()->as_register(); 1972 __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes())); 1973 __ testl(tmp, markWord::unlocked_value); 1974 __ jccb(Assembler::notZero, test_mark_word); 1975 __ load_prototype_header(tmp, op->array()->as_register(), rscratch1); 1976 __ bind(test_mark_word); 1977 __ testl(tmp, markWord::null_free_array_bit_in_place); 1978 } 1979 1980 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) { 1981 Label L_oops_equal; 1982 Label L_oops_not_equal; 1983 Label L_end; 1984 1985 Register left = op->left()->as_register(); 1986 Register right = op->right()->as_register(); 1987 1988 __ cmpptr(left, right); 1989 __ jcc(Assembler::equal, L_oops_equal); 1990 1991 // (1) Null check -- if one of the operands is null, the other must not be null (because 1992 // the two references are not equal), so they are not substitutable, 1993 // FIXME: do null check only if the operand is nullable 1994 __ testptr(left, right); 1995 __ jcc(Assembler::zero, L_oops_not_equal); 1996 1997 ciKlass* left_klass = op->left_klass(); 1998 ciKlass* right_klass = op->right_klass(); 1999 2000 // (2) Inline type check -- if either of the operands is not a inline type, 2001 // they are not substitutable. We do this only if we are not sure that the 2002 // operands are inline type 2003 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node. 2004 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) { 2005 Register tmp1 = op->tmp1()->as_register(); 2006 __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2007 __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes())); 2008 __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes())); 2009 __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2010 __ jcc(Assembler::notEqual, L_oops_not_equal); 2011 } 2012 2013 // (3) Same klass check: if the operands are of different klasses, they are not substitutable. 2014 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) { 2015 // No need to load klass -- the operands are statically known to be the same inline klass. 2016 __ jmp(*op->stub()->entry()); 2017 } else { 2018 Register left_klass_op = op->left_klass_op()->as_register(); 2019 Register right_klass_op = op->right_klass_op()->as_register(); 2020 2021 if (UseCompressedClassPointers) { 2022 __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2023 __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2024 __ cmpl(left_klass_op, right_klass_op); 2025 } else { 2026 __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2027 __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2028 __ cmpptr(left_klass_op, right_klass_op); 2029 } 2030 2031 __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check 2032 // fall through to L_oops_not_equal 2033 } 2034 2035 __ bind(L_oops_not_equal); 2036 move(op->not_equal_result(), op->result_opr()); 2037 __ jmp(L_end); 2038 2039 __ bind(L_oops_equal); 2040 move(op->equal_result(), op->result_opr()); 2041 __ jmp(L_end); 2042 2043 // We've returned from the stub. RAX contains 0x0 IFF the two 2044 // operands are not substitutable. (Don't compare against 0x1 in case the 2045 // C compiler is naughty) 2046 __ bind(*op->stub()->continuation()); 2047 __ cmpl(rax, 0); 2048 __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal 2049 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal 2050 // fall-through 2051 __ bind(L_end); 2052 } 2053 2054 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2055 if (LP64_ONLY(false &&) op->code() == lir_cas_long) { 2056 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 2057 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 2058 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 2059 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 2060 Register addr = op->addr()->as_register(); 2061 __ lock(); 2062 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 2063 2064 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 2065 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 2066 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2067 Register newval = op->new_value()->as_register(); 2068 Register cmpval = op->cmp_value()->as_register(); 2069 assert(cmpval == rax, "wrong register"); 2070 assert(newval != noreg, "new val must be register"); 2071 assert(cmpval != newval, "cmp and new values must be in different registers"); 2072 assert(cmpval != addr, "cmp and addr must be in different registers"); 2073 assert(newval != addr, "new value and addr must be in different registers"); 2074 2075 if ( op->code() == lir_cas_obj) { 2076 #ifdef _LP64 2077 if (UseCompressedOops) { 2078 __ encode_heap_oop(cmpval); 2079 __ mov(rscratch1, newval); 2080 __ encode_heap_oop(rscratch1); 2081 __ lock(); 2082 // cmpval (rax) is implicitly used by this instruction 2083 __ cmpxchgl(rscratch1, Address(addr, 0)); 2084 } else 2085 #endif 2086 { 2087 __ lock(); 2088 __ cmpxchgptr(newval, Address(addr, 0)); 2089 } 2090 } else { 2091 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2092 __ lock(); 2093 __ cmpxchgl(newval, Address(addr, 0)); 2094 } 2095 #ifdef _LP64 2096 } else if (op->code() == lir_cas_long) { 2097 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2098 Register newval = op->new_value()->as_register_lo(); 2099 Register cmpval = op->cmp_value()->as_register_lo(); 2100 assert(cmpval == rax, "wrong register"); 2101 assert(newval != noreg, "new val must be register"); 2102 assert(cmpval != newval, "cmp and new values must be in different registers"); 2103 assert(cmpval != addr, "cmp and addr must be in different registers"); 2104 assert(newval != addr, "new value and addr must be in different registers"); 2105 __ lock(); 2106 __ cmpxchgq(newval, Address(addr, 0)); 2107 #endif // _LP64 2108 } else { 2109 Unimplemented(); 2110 } 2111 } 2112 2113 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) { 2114 assert(dst->is_cpu_register(), "must be"); 2115 assert(dst->type() == src->type(), "must be"); 2116 2117 if (src->is_cpu_register()) { 2118 reg2reg(src, dst); 2119 } else if (src->is_stack()) { 2120 stack2reg(src, dst, dst->type()); 2121 } else if (src->is_constant()) { 2122 const2reg(src, dst, lir_patch_none, nullptr); 2123 } else { 2124 ShouldNotReachHere(); 2125 } 2126 } 2127 2128 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 2129 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 2130 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86"); 2131 2132 Assembler::Condition acond, ncond; 2133 switch (condition) { 2134 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2135 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2136 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2137 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2138 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2139 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2140 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2141 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2142 default: acond = Assembler::equal; ncond = Assembler::notEqual; 2143 ShouldNotReachHere(); 2144 } 2145 2146 if (opr1->is_cpu_register()) { 2147 reg2reg(opr1, result); 2148 } else if (opr1->is_stack()) { 2149 stack2reg(opr1, result, result->type()); 2150 } else if (opr1->is_constant()) { 2151 const2reg(opr1, result, lir_patch_none, nullptr); 2152 } else { 2153 ShouldNotReachHere(); 2154 } 2155 2156 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2157 // optimized version that does not require a branch 2158 if (opr2->is_single_cpu()) { 2159 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2160 __ cmov(ncond, result->as_register(), opr2->as_register()); 2161 } else if (opr2->is_double_cpu()) { 2162 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2163 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2164 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2165 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2166 } else if (opr2->is_single_stack()) { 2167 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2168 } else if (opr2->is_double_stack()) { 2169 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2170 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2171 } else { 2172 ShouldNotReachHere(); 2173 } 2174 2175 } else { 2176 Label skip; 2177 __ jccb(acond, skip); 2178 if (opr2->is_cpu_register()) { 2179 reg2reg(opr2, result); 2180 } else if (opr2->is_stack()) { 2181 stack2reg(opr2, result, result->type()); 2182 } else if (opr2->is_constant()) { 2183 const2reg(opr2, result, lir_patch_none, nullptr); 2184 } else { 2185 ShouldNotReachHere(); 2186 } 2187 __ bind(skip); 2188 } 2189 } 2190 2191 2192 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2193 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2194 2195 if (left->is_single_cpu()) { 2196 assert(left == dest, "left and dest must be equal"); 2197 Register lreg = left->as_register(); 2198 2199 if (right->is_single_cpu()) { 2200 // cpu register - cpu register 2201 Register rreg = right->as_register(); 2202 switch (code) { 2203 case lir_add: __ addl (lreg, rreg); break; 2204 case lir_sub: __ subl (lreg, rreg); break; 2205 case lir_mul: __ imull(lreg, rreg); break; 2206 default: ShouldNotReachHere(); 2207 } 2208 2209 } else if (right->is_stack()) { 2210 // cpu register - stack 2211 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2212 switch (code) { 2213 case lir_add: __ addl(lreg, raddr); break; 2214 case lir_sub: __ subl(lreg, raddr); break; 2215 default: ShouldNotReachHere(); 2216 } 2217 2218 } else if (right->is_constant()) { 2219 // cpu register - constant 2220 jint c = right->as_constant_ptr()->as_jint(); 2221 switch (code) { 2222 case lir_add: { 2223 __ incrementl(lreg, c); 2224 break; 2225 } 2226 case lir_sub: { 2227 __ decrementl(lreg, c); 2228 break; 2229 } 2230 default: ShouldNotReachHere(); 2231 } 2232 2233 } else { 2234 ShouldNotReachHere(); 2235 } 2236 2237 } else if (left->is_double_cpu()) { 2238 assert(left == dest, "left and dest must be equal"); 2239 Register lreg_lo = left->as_register_lo(); 2240 Register lreg_hi = left->as_register_hi(); 2241 2242 if (right->is_double_cpu()) { 2243 // cpu register - cpu register 2244 Register rreg_lo = right->as_register_lo(); 2245 Register rreg_hi = right->as_register_hi(); 2246 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2247 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2248 switch (code) { 2249 case lir_add: 2250 __ addptr(lreg_lo, rreg_lo); 2251 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2252 break; 2253 case lir_sub: 2254 __ subptr(lreg_lo, rreg_lo); 2255 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2256 break; 2257 case lir_mul: 2258 #ifdef _LP64 2259 __ imulq(lreg_lo, rreg_lo); 2260 #else 2261 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2262 __ imull(lreg_hi, rreg_lo); 2263 __ imull(rreg_hi, lreg_lo); 2264 __ addl (rreg_hi, lreg_hi); 2265 __ mull (rreg_lo); 2266 __ addl (lreg_hi, rreg_hi); 2267 #endif // _LP64 2268 break; 2269 default: 2270 ShouldNotReachHere(); 2271 } 2272 2273 } else if (right->is_constant()) { 2274 // cpu register - constant 2275 #ifdef _LP64 2276 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2277 __ movptr(r10, (intptr_t) c); 2278 switch (code) { 2279 case lir_add: 2280 __ addptr(lreg_lo, r10); 2281 break; 2282 case lir_sub: 2283 __ subptr(lreg_lo, r10); 2284 break; 2285 default: 2286 ShouldNotReachHere(); 2287 } 2288 #else 2289 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2290 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2291 switch (code) { 2292 case lir_add: 2293 __ addptr(lreg_lo, c_lo); 2294 __ adcl(lreg_hi, c_hi); 2295 break; 2296 case lir_sub: 2297 __ subptr(lreg_lo, c_lo); 2298 __ sbbl(lreg_hi, c_hi); 2299 break; 2300 default: 2301 ShouldNotReachHere(); 2302 } 2303 #endif // _LP64 2304 2305 } else { 2306 ShouldNotReachHere(); 2307 } 2308 2309 } else if (left->is_single_xmm()) { 2310 assert(left == dest, "left and dest must be equal"); 2311 XMMRegister lreg = left->as_xmm_float_reg(); 2312 2313 if (right->is_single_xmm()) { 2314 XMMRegister rreg = right->as_xmm_float_reg(); 2315 switch (code) { 2316 case lir_add: __ addss(lreg, rreg); break; 2317 case lir_sub: __ subss(lreg, rreg); break; 2318 case lir_mul: __ mulss(lreg, rreg); break; 2319 case lir_div: __ divss(lreg, rreg); break; 2320 default: ShouldNotReachHere(); 2321 } 2322 } else { 2323 Address raddr; 2324 if (right->is_single_stack()) { 2325 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2326 } else if (right->is_constant()) { 2327 // hack for now 2328 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2329 } else { 2330 ShouldNotReachHere(); 2331 } 2332 switch (code) { 2333 case lir_add: __ addss(lreg, raddr); break; 2334 case lir_sub: __ subss(lreg, raddr); break; 2335 case lir_mul: __ mulss(lreg, raddr); break; 2336 case lir_div: __ divss(lreg, raddr); break; 2337 default: ShouldNotReachHere(); 2338 } 2339 } 2340 2341 } else if (left->is_double_xmm()) { 2342 assert(left == dest, "left and dest must be equal"); 2343 2344 XMMRegister lreg = left->as_xmm_double_reg(); 2345 if (right->is_double_xmm()) { 2346 XMMRegister rreg = right->as_xmm_double_reg(); 2347 switch (code) { 2348 case lir_add: __ addsd(lreg, rreg); break; 2349 case lir_sub: __ subsd(lreg, rreg); break; 2350 case lir_mul: __ mulsd(lreg, rreg); break; 2351 case lir_div: __ divsd(lreg, rreg); break; 2352 default: ShouldNotReachHere(); 2353 } 2354 } else { 2355 Address raddr; 2356 if (right->is_double_stack()) { 2357 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2358 } else if (right->is_constant()) { 2359 // hack for now 2360 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2361 } else { 2362 ShouldNotReachHere(); 2363 } 2364 switch (code) { 2365 case lir_add: __ addsd(lreg, raddr); break; 2366 case lir_sub: __ subsd(lreg, raddr); break; 2367 case lir_mul: __ mulsd(lreg, raddr); break; 2368 case lir_div: __ divsd(lreg, raddr); break; 2369 default: ShouldNotReachHere(); 2370 } 2371 } 2372 2373 #ifndef _LP64 2374 } else if (left->is_single_fpu()) { 2375 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2376 2377 if (right->is_single_fpu()) { 2378 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2379 2380 } else { 2381 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2382 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2383 2384 Address raddr; 2385 if (right->is_single_stack()) { 2386 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2387 } else if (right->is_constant()) { 2388 address const_addr = float_constant(right->as_jfloat()); 2389 assert(const_addr != nullptr, "incorrect float/double constant maintenance"); 2390 // hack for now 2391 raddr = __ as_Address(InternalAddress(const_addr)); 2392 } else { 2393 ShouldNotReachHere(); 2394 } 2395 2396 switch (code) { 2397 case lir_add: __ fadd_s(raddr); break; 2398 case lir_sub: __ fsub_s(raddr); break; 2399 case lir_mul: __ fmul_s(raddr); break; 2400 case lir_div: __ fdiv_s(raddr); break; 2401 default: ShouldNotReachHere(); 2402 } 2403 } 2404 2405 } else if (left->is_double_fpu()) { 2406 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2407 2408 if (code == lir_mul || code == lir_div) { 2409 // Double values require special handling for strictfp mul/div on x86 2410 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1())); 2411 __ fmulp(left->fpu_regnrLo() + 1); 2412 } 2413 2414 if (right->is_double_fpu()) { 2415 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2416 2417 } else { 2418 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2419 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2420 2421 Address raddr; 2422 if (right->is_double_stack()) { 2423 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2424 } else if (right->is_constant()) { 2425 // hack for now 2426 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2427 } else { 2428 ShouldNotReachHere(); 2429 } 2430 2431 switch (code) { 2432 case lir_add: __ fadd_d(raddr); break; 2433 case lir_sub: __ fsub_d(raddr); break; 2434 case lir_mul: __ fmul_d(raddr); break; 2435 case lir_div: __ fdiv_d(raddr); break; 2436 default: ShouldNotReachHere(); 2437 } 2438 } 2439 2440 if (code == lir_mul || code == lir_div) { 2441 // Double values require special handling for strictfp mul/div on x86 2442 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2())); 2443 __ fmulp(dest->fpu_regnrLo() + 1); 2444 } 2445 #endif // !_LP64 2446 2447 } else if (left->is_single_stack() || left->is_address()) { 2448 assert(left == dest, "left and dest must be equal"); 2449 2450 Address laddr; 2451 if (left->is_single_stack()) { 2452 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2453 } else if (left->is_address()) { 2454 laddr = as_Address(left->as_address_ptr()); 2455 } else { 2456 ShouldNotReachHere(); 2457 } 2458 2459 if (right->is_single_cpu()) { 2460 Register rreg = right->as_register(); 2461 switch (code) { 2462 case lir_add: __ addl(laddr, rreg); break; 2463 case lir_sub: __ subl(laddr, rreg); break; 2464 default: ShouldNotReachHere(); 2465 } 2466 } else if (right->is_constant()) { 2467 jint c = right->as_constant_ptr()->as_jint(); 2468 switch (code) { 2469 case lir_add: { 2470 __ incrementl(laddr, c); 2471 break; 2472 } 2473 case lir_sub: { 2474 __ decrementl(laddr, c); 2475 break; 2476 } 2477 default: ShouldNotReachHere(); 2478 } 2479 } else { 2480 ShouldNotReachHere(); 2481 } 2482 2483 } else { 2484 ShouldNotReachHere(); 2485 } 2486 } 2487 2488 #ifndef _LP64 2489 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2490 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2491 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2492 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2493 2494 bool left_is_tos = (left_index == 0); 2495 bool dest_is_tos = (dest_index == 0); 2496 int non_tos_index = (left_is_tos ? right_index : left_index); 2497 2498 switch (code) { 2499 case lir_add: 2500 if (pop_fpu_stack) __ faddp(non_tos_index); 2501 else if (dest_is_tos) __ fadd (non_tos_index); 2502 else __ fadda(non_tos_index); 2503 break; 2504 2505 case lir_sub: 2506 if (left_is_tos) { 2507 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2508 else if (dest_is_tos) __ fsub (non_tos_index); 2509 else __ fsubra(non_tos_index); 2510 } else { 2511 if (pop_fpu_stack) __ fsubp (non_tos_index); 2512 else if (dest_is_tos) __ fsubr (non_tos_index); 2513 else __ fsuba (non_tos_index); 2514 } 2515 break; 2516 2517 case lir_mul: 2518 if (pop_fpu_stack) __ fmulp(non_tos_index); 2519 else if (dest_is_tos) __ fmul (non_tos_index); 2520 else __ fmula(non_tos_index); 2521 break; 2522 2523 case lir_div: 2524 if (left_is_tos) { 2525 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2526 else if (dest_is_tos) __ fdiv (non_tos_index); 2527 else __ fdivra(non_tos_index); 2528 } else { 2529 if (pop_fpu_stack) __ fdivp (non_tos_index); 2530 else if (dest_is_tos) __ fdivr (non_tos_index); 2531 else __ fdiva (non_tos_index); 2532 } 2533 break; 2534 2535 case lir_rem: 2536 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2537 __ fremr(noreg); 2538 break; 2539 2540 default: 2541 ShouldNotReachHere(); 2542 } 2543 } 2544 #endif // _LP64 2545 2546 2547 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) { 2548 if (value->is_double_xmm()) { 2549 switch(code) { 2550 case lir_abs : 2551 { 2552 #ifdef _LP64 2553 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 2554 assert(tmp->is_valid(), "need temporary"); 2555 __ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2); 2556 } else 2557 #endif 2558 { 2559 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2560 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2561 } 2562 assert(!tmp->is_valid(), "do not need temporary"); 2563 __ andpd(dest->as_xmm_double_reg(), 2564 ExternalAddress((address)double_signmask_pool), 2565 rscratch1); 2566 } 2567 } 2568 break; 2569 2570 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2571 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2572 default : ShouldNotReachHere(); 2573 } 2574 2575 #ifndef _LP64 2576 } else if (value->is_double_fpu()) { 2577 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2578 switch(code) { 2579 case lir_abs : __ fabs() ; break; 2580 case lir_sqrt : __ fsqrt(); break; 2581 default : ShouldNotReachHere(); 2582 } 2583 #endif // !_LP64 2584 } else if (code == lir_f2hf) { 2585 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 2586 } else if (code == lir_hf2f) { 2587 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register()); 2588 } else { 2589 Unimplemented(); 2590 } 2591 } 2592 2593 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2594 // assert(left->destroys_register(), "check"); 2595 if (left->is_single_cpu()) { 2596 Register reg = left->as_register(); 2597 if (right->is_constant()) { 2598 int val = right->as_constant_ptr()->as_jint(); 2599 switch (code) { 2600 case lir_logic_and: __ andl (reg, val); break; 2601 case lir_logic_or: __ orl (reg, val); break; 2602 case lir_logic_xor: __ xorl (reg, val); break; 2603 default: ShouldNotReachHere(); 2604 } 2605 } else if (right->is_stack()) { 2606 // added support for stack operands 2607 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2608 switch (code) { 2609 case lir_logic_and: __ andl (reg, raddr); break; 2610 case lir_logic_or: __ orl (reg, raddr); break; 2611 case lir_logic_xor: __ xorl (reg, raddr); break; 2612 default: ShouldNotReachHere(); 2613 } 2614 } else { 2615 Register rright = right->as_register(); 2616 switch (code) { 2617 case lir_logic_and: __ andptr (reg, rright); break; 2618 case lir_logic_or : __ orptr (reg, rright); break; 2619 case lir_logic_xor: __ xorptr (reg, rright); break; 2620 default: ShouldNotReachHere(); 2621 } 2622 } 2623 move_regs(reg, dst->as_register()); 2624 } else { 2625 Register l_lo = left->as_register_lo(); 2626 Register l_hi = left->as_register_hi(); 2627 if (right->is_constant()) { 2628 #ifdef _LP64 2629 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2630 switch (code) { 2631 case lir_logic_and: 2632 __ andq(l_lo, rscratch1); 2633 break; 2634 case lir_logic_or: 2635 __ orq(l_lo, rscratch1); 2636 break; 2637 case lir_logic_xor: 2638 __ xorq(l_lo, rscratch1); 2639 break; 2640 default: ShouldNotReachHere(); 2641 } 2642 #else 2643 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2644 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2645 switch (code) { 2646 case lir_logic_and: 2647 __ andl(l_lo, r_lo); 2648 __ andl(l_hi, r_hi); 2649 break; 2650 case lir_logic_or: 2651 __ orl(l_lo, r_lo); 2652 __ orl(l_hi, r_hi); 2653 break; 2654 case lir_logic_xor: 2655 __ xorl(l_lo, r_lo); 2656 __ xorl(l_hi, r_hi); 2657 break; 2658 default: ShouldNotReachHere(); 2659 } 2660 #endif // _LP64 2661 } else { 2662 #ifdef _LP64 2663 Register r_lo; 2664 if (is_reference_type(right->type())) { 2665 r_lo = right->as_register(); 2666 } else { 2667 r_lo = right->as_register_lo(); 2668 } 2669 #else 2670 Register r_lo = right->as_register_lo(); 2671 Register r_hi = right->as_register_hi(); 2672 assert(l_lo != r_hi, "overwriting registers"); 2673 #endif 2674 switch (code) { 2675 case lir_logic_and: 2676 __ andptr(l_lo, r_lo); 2677 NOT_LP64(__ andptr(l_hi, r_hi);) 2678 break; 2679 case lir_logic_or: 2680 __ orptr(l_lo, r_lo); 2681 NOT_LP64(__ orptr(l_hi, r_hi);) 2682 break; 2683 case lir_logic_xor: 2684 __ xorptr(l_lo, r_lo); 2685 NOT_LP64(__ xorptr(l_hi, r_hi);) 2686 break; 2687 default: ShouldNotReachHere(); 2688 } 2689 } 2690 2691 Register dst_lo = dst->as_register_lo(); 2692 Register dst_hi = dst->as_register_hi(); 2693 2694 #ifdef _LP64 2695 move_regs(l_lo, dst_lo); 2696 #else 2697 if (dst_lo == l_hi) { 2698 assert(dst_hi != l_lo, "overwriting registers"); 2699 move_regs(l_hi, dst_hi); 2700 move_regs(l_lo, dst_lo); 2701 } else { 2702 assert(dst_lo != l_hi, "overwriting registers"); 2703 move_regs(l_lo, dst_lo); 2704 move_regs(l_hi, dst_hi); 2705 } 2706 #endif // _LP64 2707 } 2708 } 2709 2710 2711 // we assume that rax, and rdx can be overwritten 2712 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2713 2714 assert(left->is_single_cpu(), "left must be register"); 2715 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2716 assert(result->is_single_cpu(), "result must be register"); 2717 2718 // assert(left->destroys_register(), "check"); 2719 // assert(right->destroys_register(), "check"); 2720 2721 Register lreg = left->as_register(); 2722 Register dreg = result->as_register(); 2723 2724 if (right->is_constant()) { 2725 jint divisor = right->as_constant_ptr()->as_jint(); 2726 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2727 if (code == lir_idiv) { 2728 assert(lreg == rax, "must be rax,"); 2729 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2730 __ cdql(); // sign extend into rdx:rax 2731 if (divisor == 2) { 2732 __ subl(lreg, rdx); 2733 } else { 2734 __ andl(rdx, divisor - 1); 2735 __ addl(lreg, rdx); 2736 } 2737 __ sarl(lreg, log2i_exact(divisor)); 2738 move_regs(lreg, dreg); 2739 } else if (code == lir_irem) { 2740 Label done; 2741 __ mov(dreg, lreg); 2742 __ andl(dreg, 0x80000000 | (divisor - 1)); 2743 __ jcc(Assembler::positive, done); 2744 __ decrement(dreg); 2745 __ orl(dreg, ~(divisor - 1)); 2746 __ increment(dreg); 2747 __ bind(done); 2748 } else { 2749 ShouldNotReachHere(); 2750 } 2751 } else { 2752 Register rreg = right->as_register(); 2753 assert(lreg == rax, "left register must be rax,"); 2754 assert(rreg != rdx, "right register must not be rdx"); 2755 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2756 2757 move_regs(lreg, rax); 2758 2759 int idivl_offset = __ corrected_idivl(rreg); 2760 if (ImplicitDiv0Checks) { 2761 add_debug_info_for_div0(idivl_offset, info); 2762 } 2763 if (code == lir_irem) { 2764 move_regs(rdx, dreg); // result is in rdx 2765 } else { 2766 move_regs(rax, dreg); 2767 } 2768 } 2769 } 2770 2771 2772 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2773 if (opr1->is_single_cpu()) { 2774 Register reg1 = opr1->as_register(); 2775 if (opr2->is_single_cpu()) { 2776 // cpu register - cpu register 2777 if (is_reference_type(opr1->type())) { 2778 __ cmpoop(reg1, opr2->as_register()); 2779 } else { 2780 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 2781 __ cmpl(reg1, opr2->as_register()); 2782 } 2783 } else if (opr2->is_stack()) { 2784 // cpu register - stack 2785 if (is_reference_type(opr1->type())) { 2786 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2787 } else { 2788 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2789 } 2790 } else if (opr2->is_constant()) { 2791 // cpu register - constant 2792 LIR_Const* c = opr2->as_constant_ptr(); 2793 if (c->type() == T_INT) { 2794 jint i = c->as_jint(); 2795 if (i == 0) { 2796 __ testl(reg1, reg1); 2797 } else { 2798 __ cmpl(reg1, i); 2799 } 2800 } else if (c->type() == T_METADATA) { 2801 // All we need for now is a comparison with null for equality. 2802 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 2803 Metadata* m = c->as_metadata(); 2804 if (m == nullptr) { 2805 __ testptr(reg1, reg1); 2806 } else { 2807 ShouldNotReachHere(); 2808 } 2809 } else if (is_reference_type(c->type())) { 2810 // In 64bit oops are single register 2811 jobject o = c->as_jobject(); 2812 if (o == nullptr) { 2813 __ testptr(reg1, reg1); 2814 } else { 2815 __ cmpoop(reg1, o, rscratch1); 2816 } 2817 } else { 2818 fatal("unexpected type: %s", basictype_to_str(c->type())); 2819 } 2820 // cpu register - address 2821 } else if (opr2->is_address()) { 2822 if (op->info() != nullptr) { 2823 add_debug_info_for_null_check_here(op->info()); 2824 } 2825 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2826 } else { 2827 ShouldNotReachHere(); 2828 } 2829 2830 } else if(opr1->is_double_cpu()) { 2831 Register xlo = opr1->as_register_lo(); 2832 Register xhi = opr1->as_register_hi(); 2833 if (opr2->is_double_cpu()) { 2834 #ifdef _LP64 2835 __ cmpptr(xlo, opr2->as_register_lo()); 2836 #else 2837 // cpu register - cpu register 2838 Register ylo = opr2->as_register_lo(); 2839 Register yhi = opr2->as_register_hi(); 2840 __ subl(xlo, ylo); 2841 __ sbbl(xhi, yhi); 2842 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2843 __ orl(xhi, xlo); 2844 } 2845 #endif // _LP64 2846 } else if (opr2->is_constant()) { 2847 // cpu register - constant 0 2848 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2849 #ifdef _LP64 2850 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2851 #else 2852 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2853 __ orl(xhi, xlo); 2854 #endif // _LP64 2855 } else { 2856 ShouldNotReachHere(); 2857 } 2858 2859 } else if (opr1->is_single_xmm()) { 2860 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2861 if (opr2->is_single_xmm()) { 2862 // xmm register - xmm register 2863 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2864 } else if (opr2->is_stack()) { 2865 // xmm register - stack 2866 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2867 } else if (opr2->is_constant()) { 2868 // xmm register - constant 2869 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2870 } else if (opr2->is_address()) { 2871 // xmm register - address 2872 if (op->info() != nullptr) { 2873 add_debug_info_for_null_check_here(op->info()); 2874 } 2875 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2876 } else { 2877 ShouldNotReachHere(); 2878 } 2879 2880 } else if (opr1->is_double_xmm()) { 2881 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2882 if (opr2->is_double_xmm()) { 2883 // xmm register - xmm register 2884 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2885 } else if (opr2->is_stack()) { 2886 // xmm register - stack 2887 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2888 } else if (opr2->is_constant()) { 2889 // xmm register - constant 2890 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2891 } else if (opr2->is_address()) { 2892 // xmm register - address 2893 if (op->info() != nullptr) { 2894 add_debug_info_for_null_check_here(op->info()); 2895 } 2896 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2897 } else { 2898 ShouldNotReachHere(); 2899 } 2900 2901 #ifndef _LP64 2902 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2903 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2904 assert(opr2->is_fpu_register(), "both must be registers"); 2905 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2906 #endif // LP64 2907 2908 } else if (opr1->is_address() && opr2->is_constant()) { 2909 LIR_Const* c = opr2->as_constant_ptr(); 2910 #ifdef _LP64 2911 if (is_reference_type(c->type())) { 2912 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2913 __ movoop(rscratch1, c->as_jobject()); 2914 } 2915 #endif // LP64 2916 if (op->info() != nullptr) { 2917 add_debug_info_for_null_check_here(op->info()); 2918 } 2919 // special case: address - constant 2920 LIR_Address* addr = opr1->as_address_ptr(); 2921 if (c->type() == T_INT) { 2922 __ cmpl(as_Address(addr), c->as_jint()); 2923 } else if (is_reference_type(c->type())) { 2924 #ifdef _LP64 2925 // %%% Make this explode if addr isn't reachable until we figure out a 2926 // better strategy by giving noreg as the temp for as_Address 2927 __ cmpoop(rscratch1, as_Address(addr, noreg)); 2928 #else 2929 __ cmpoop(as_Address(addr), c->as_jobject()); 2930 #endif // _LP64 2931 } else { 2932 ShouldNotReachHere(); 2933 } 2934 2935 } else { 2936 ShouldNotReachHere(); 2937 } 2938 } 2939 2940 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2941 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2942 if (left->is_single_xmm()) { 2943 assert(right->is_single_xmm(), "must match"); 2944 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2945 } else if (left->is_double_xmm()) { 2946 assert(right->is_double_xmm(), "must match"); 2947 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2948 2949 } else { 2950 #ifdef _LP64 2951 ShouldNotReachHere(); 2952 #else 2953 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2954 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2955 2956 assert(left->fpu() == 0, "left must be on TOS"); 2957 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2958 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2959 #endif // LP64 2960 } 2961 } else { 2962 assert(code == lir_cmp_l2i, "check"); 2963 #ifdef _LP64 2964 Label done; 2965 Register dest = dst->as_register(); 2966 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2967 __ movl(dest, -1); 2968 __ jccb(Assembler::less, done); 2969 __ setb(Assembler::notZero, dest); 2970 __ movzbl(dest, dest); 2971 __ bind(done); 2972 #else 2973 __ lcmp2int(left->as_register_hi(), 2974 left->as_register_lo(), 2975 right->as_register_hi(), 2976 right->as_register_lo()); 2977 move_regs(left->as_register_hi(), dst->as_register()); 2978 #endif // _LP64 2979 } 2980 } 2981 2982 2983 void LIR_Assembler::align_call(LIR_Code code) { 2984 // make sure that the displacement word of the call ends up word aligned 2985 int offset = __ offset(); 2986 switch (code) { 2987 case lir_static_call: 2988 case lir_optvirtual_call: 2989 case lir_dynamic_call: 2990 offset += NativeCall::displacement_offset; 2991 break; 2992 case lir_icvirtual_call: 2993 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex; 2994 break; 2995 default: ShouldNotReachHere(); 2996 } 2997 __ align(BytesPerWord, offset); 2998 } 2999 3000 3001 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 3002 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 3003 "must be aligned"); 3004 __ call(AddressLiteral(op->addr(), rtype)); 3005 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3006 __ post_call_nop(); 3007 } 3008 3009 3010 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 3011 __ ic_call(op->addr()); 3012 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3013 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 3014 "must be aligned"); 3015 __ post_call_nop(); 3016 } 3017 3018 3019 void LIR_Assembler::emit_static_call_stub() { 3020 address call_pc = __ pc(); 3021 address stub = __ start_a_stub(call_stub_size()); 3022 if (stub == nullptr) { 3023 bailout("static call stub overflow"); 3024 return; 3025 } 3026 3027 int start = __ offset(); 3028 3029 // make sure that the displacement word of the call ends up word aligned 3030 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset); 3031 __ relocate(static_stub_Relocation::spec(call_pc)); 3032 __ mov_metadata(rbx, (Metadata*)nullptr); 3033 // must be set to -1 at code generation time 3034 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); 3035 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 3036 __ jump(RuntimeAddress(__ pc())); 3037 3038 assert(__ offset() - start <= call_stub_size(), "stub too big"); 3039 __ end_a_stub(); 3040 } 3041 3042 3043 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 3044 assert(exceptionOop->as_register() == rax, "must match"); 3045 assert(exceptionPC->as_register() == rdx, "must match"); 3046 3047 // exception object is not added to oop map by LinearScan 3048 // (LinearScan assumes that no oops are in fixed registers) 3049 info->add_register_oop(exceptionOop); 3050 C1StubId unwind_id; 3051 3052 // get current pc information 3053 // pc is only needed if the method has an exception handler, the unwind code does not need it. 3054 int pc_for_athrow_offset = __ offset(); 3055 InternalAddress pc_for_athrow(__ pc()); 3056 __ lea(exceptionPC->as_register(), pc_for_athrow); 3057 add_call_info(pc_for_athrow_offset, info); // for exception handler 3058 3059 __ verify_not_null_oop(rax); 3060 // search an exception handler (rax: exception oop, rdx: throwing pc) 3061 if (compilation()->has_fpu_code()) { 3062 unwind_id = C1StubId::handle_exception_id; 3063 } else { 3064 unwind_id = C1StubId::handle_exception_nofpu_id; 3065 } 3066 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 3067 3068 // enough room for two byte trap 3069 __ nop(); 3070 } 3071 3072 3073 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 3074 assert(exceptionOop->as_register() == rax, "must match"); 3075 3076 __ jmp(_unwind_handler_entry); 3077 } 3078 3079 3080 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 3081 3082 // optimized version for linear scan: 3083 // * count must be already in ECX (guaranteed by LinearScan) 3084 // * left and dest must be equal 3085 // * tmp must be unused 3086 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 3087 assert(left == dest, "left and dest must be equal"); 3088 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3089 3090 if (left->is_single_cpu()) { 3091 Register value = left->as_register(); 3092 assert(value != SHIFT_count, "left cannot be ECX"); 3093 3094 switch (code) { 3095 case lir_shl: __ shll(value); break; 3096 case lir_shr: __ sarl(value); break; 3097 case lir_ushr: __ shrl(value); break; 3098 default: ShouldNotReachHere(); 3099 } 3100 } else if (left->is_double_cpu()) { 3101 Register lo = left->as_register_lo(); 3102 Register hi = left->as_register_hi(); 3103 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3104 #ifdef _LP64 3105 switch (code) { 3106 case lir_shl: __ shlptr(lo); break; 3107 case lir_shr: __ sarptr(lo); break; 3108 case lir_ushr: __ shrptr(lo); break; 3109 default: ShouldNotReachHere(); 3110 } 3111 #else 3112 3113 switch (code) { 3114 case lir_shl: __ lshl(hi, lo); break; 3115 case lir_shr: __ lshr(hi, lo, true); break; 3116 case lir_ushr: __ lshr(hi, lo, false); break; 3117 default: ShouldNotReachHere(); 3118 } 3119 #endif // LP64 3120 } else { 3121 ShouldNotReachHere(); 3122 } 3123 } 3124 3125 3126 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3127 if (dest->is_single_cpu()) { 3128 // first move left into dest so that left is not destroyed by the shift 3129 Register value = dest->as_register(); 3130 count = count & 0x1F; // Java spec 3131 3132 move_regs(left->as_register(), value); 3133 switch (code) { 3134 case lir_shl: __ shll(value, count); break; 3135 case lir_shr: __ sarl(value, count); break; 3136 case lir_ushr: __ shrl(value, count); break; 3137 default: ShouldNotReachHere(); 3138 } 3139 } else if (dest->is_double_cpu()) { 3140 #ifndef _LP64 3141 Unimplemented(); 3142 #else 3143 // first move left into dest so that left is not destroyed by the shift 3144 Register value = dest->as_register_lo(); 3145 count = count & 0x1F; // Java spec 3146 3147 move_regs(left->as_register_lo(), value); 3148 switch (code) { 3149 case lir_shl: __ shlptr(value, count); break; 3150 case lir_shr: __ sarptr(value, count); break; 3151 case lir_ushr: __ shrptr(value, count); break; 3152 default: ShouldNotReachHere(); 3153 } 3154 #endif // _LP64 3155 } else { 3156 ShouldNotReachHere(); 3157 } 3158 } 3159 3160 3161 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3162 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3163 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3164 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3165 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3166 } 3167 3168 3169 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3170 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3171 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3172 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3173 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3174 } 3175 3176 3177 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3178 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3179 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3180 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3181 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1); 3182 } 3183 3184 3185 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { 3186 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3187 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3188 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3189 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1); 3190 } 3191 3192 3193 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) { 3194 if (null_check) { 3195 __ testptr(obj, obj); 3196 __ jcc(Assembler::zero, *slow_path->entry()); 3197 } 3198 if (is_dest) { 3199 __ test_null_free_array_oop(obj, tmp, *slow_path->entry()); 3200 } else { 3201 __ test_flat_array_oop(obj, tmp, *slow_path->entry()); 3202 } 3203 } 3204 3205 3206 // This code replaces a call to arraycopy; no exception may 3207 // be thrown in this code, they must be thrown in the System.arraycopy 3208 // activation frame; we could save some checks if this would not be the case 3209 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3210 ciArrayKlass* default_type = op->expected_type(); 3211 Register src = op->src()->as_register(); 3212 Register dst = op->dst()->as_register(); 3213 Register src_pos = op->src_pos()->as_register(); 3214 Register dst_pos = op->dst_pos()->as_register(); 3215 Register length = op->length()->as_register(); 3216 Register tmp = op->tmp()->as_register(); 3217 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3218 3219 CodeStub* stub = op->stub(); 3220 int flags = op->flags(); 3221 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 3222 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3223 3224 if (flags & LIR_OpArrayCopy::always_slow_path) { 3225 __ jmp(*stub->entry()); 3226 __ bind(*stub->continuation()); 3227 return; 3228 } 3229 3230 // if we don't know anything, just go through the generic arraycopy 3231 if (default_type == nullptr) { 3232 // save outgoing arguments on stack in case call to System.arraycopy is needed 3233 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3234 // for interpreter calling conventions. Now we have to do it in new style conventions. 3235 // For the moment until C1 gets the new register allocator I just force all the 3236 // args to the right place (except the register args) and then on the back side 3237 // reload the register args properly if we go slow path. Yuck 3238 3239 // These are proper for the calling convention 3240 store_parameter(length, 2); 3241 store_parameter(dst_pos, 1); 3242 store_parameter(dst, 0); 3243 3244 // these are just temporary placements until we need to reload 3245 store_parameter(src_pos, 3); 3246 store_parameter(src, 4); 3247 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3248 3249 address copyfunc_addr = StubRoutines::generic_arraycopy(); 3250 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 3251 3252 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3253 #ifdef _LP64 3254 // The arguments are in java calling convention so we can trivially shift them to C 3255 // convention 3256 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3257 __ mov(c_rarg0, j_rarg0); 3258 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3259 __ mov(c_rarg1, j_rarg1); 3260 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3261 __ mov(c_rarg2, j_rarg2); 3262 assert_different_registers(c_rarg3, j_rarg4); 3263 __ mov(c_rarg3, j_rarg3); 3264 #ifdef _WIN64 3265 // Allocate abi space for args but be sure to keep stack aligned 3266 __ subptr(rsp, 6*wordSize); 3267 store_parameter(j_rarg4, 4); 3268 #ifndef PRODUCT 3269 if (PrintC1Statistics) { 3270 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3271 } 3272 #endif 3273 __ call(RuntimeAddress(copyfunc_addr)); 3274 __ addptr(rsp, 6*wordSize); 3275 #else 3276 __ mov(c_rarg4, j_rarg4); 3277 #ifndef PRODUCT 3278 if (PrintC1Statistics) { 3279 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3280 } 3281 #endif 3282 __ call(RuntimeAddress(copyfunc_addr)); 3283 #endif // _WIN64 3284 #else 3285 __ push(length); 3286 __ push(dst_pos); 3287 __ push(dst); 3288 __ push(src_pos); 3289 __ push(src); 3290 3291 #ifndef PRODUCT 3292 if (PrintC1Statistics) { 3293 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3294 } 3295 #endif 3296 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 3297 3298 #endif // _LP64 3299 3300 __ testl(rax, rax); 3301 __ jcc(Assembler::equal, *stub->continuation()); 3302 3303 __ mov(tmp, rax); 3304 __ xorl(tmp, -1); 3305 3306 // Reload values from the stack so they are where the stub 3307 // expects them. 3308 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3309 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3310 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3311 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3312 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3313 3314 __ subl(length, tmp); 3315 __ addl(src_pos, tmp); 3316 __ addl(dst_pos, tmp); 3317 __ jmp(*stub->entry()); 3318 3319 __ bind(*stub->continuation()); 3320 return; 3321 } 3322 3323 // Handle inline type arrays 3324 if (flags & LIR_OpArrayCopy::src_inlinetype_check) { 3325 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check)); 3326 } 3327 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) { 3328 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check)); 3329 } 3330 3331 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3332 3333 int elem_size = type2aelembytes(basic_type); 3334 Address::ScaleFactor scale; 3335 3336 switch (elem_size) { 3337 case 1 : 3338 scale = Address::times_1; 3339 break; 3340 case 2 : 3341 scale = Address::times_2; 3342 break; 3343 case 4 : 3344 scale = Address::times_4; 3345 break; 3346 case 8 : 3347 scale = Address::times_8; 3348 break; 3349 default: 3350 scale = Address::no_scale; 3351 ShouldNotReachHere(); 3352 } 3353 3354 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3355 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3356 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3357 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3358 3359 // length and pos's are all sign extended at this point on 64bit 3360 3361 // test for null 3362 if (flags & LIR_OpArrayCopy::src_null_check) { 3363 __ testptr(src, src); 3364 __ jcc(Assembler::zero, *stub->entry()); 3365 } 3366 if (flags & LIR_OpArrayCopy::dst_null_check) { 3367 __ testptr(dst, dst); 3368 __ jcc(Assembler::zero, *stub->entry()); 3369 } 3370 3371 // If the compiler was not able to prove that exact type of the source or the destination 3372 // of the arraycopy is an array type, check at runtime if the source or the destination is 3373 // an instance type. 3374 if (flags & LIR_OpArrayCopy::type_check) { 3375 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3376 __ load_klass(tmp, dst, tmp_load_klass); 3377 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3378 __ jcc(Assembler::greaterEqual, *stub->entry()); 3379 } 3380 3381 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3382 __ load_klass(tmp, src, tmp_load_klass); 3383 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3384 __ jcc(Assembler::greaterEqual, *stub->entry()); 3385 } 3386 } 3387 3388 // check if negative 3389 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3390 __ testl(src_pos, src_pos); 3391 __ jcc(Assembler::less, *stub->entry()); 3392 } 3393 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3394 __ testl(dst_pos, dst_pos); 3395 __ jcc(Assembler::less, *stub->entry()); 3396 } 3397 3398 if (flags & LIR_OpArrayCopy::src_range_check) { 3399 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3400 __ cmpl(tmp, src_length_addr); 3401 __ jcc(Assembler::above, *stub->entry()); 3402 } 3403 if (flags & LIR_OpArrayCopy::dst_range_check) { 3404 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3405 __ cmpl(tmp, dst_length_addr); 3406 __ jcc(Assembler::above, *stub->entry()); 3407 } 3408 3409 if (flags & LIR_OpArrayCopy::length_positive_check) { 3410 __ testl(length, length); 3411 __ jcc(Assembler::less, *stub->entry()); 3412 } 3413 3414 #ifdef _LP64 3415 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3416 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3417 #endif 3418 3419 if (flags & LIR_OpArrayCopy::type_check) { 3420 // We don't know the array types are compatible 3421 if (basic_type != T_OBJECT) { 3422 // Simple test for basic type arrays 3423 if (UseCompressedClassPointers) { 3424 __ movl(tmp, src_klass_addr); 3425 __ cmpl(tmp, dst_klass_addr); 3426 } else { 3427 __ movptr(tmp, src_klass_addr); 3428 __ cmpptr(tmp, dst_klass_addr); 3429 } 3430 __ jcc(Assembler::notEqual, *stub->entry()); 3431 } else { 3432 // For object arrays, if src is a sub class of dst then we can 3433 // safely do the copy. 3434 Label cont, slow; 3435 3436 __ push(src); 3437 __ push(dst); 3438 3439 __ load_klass(src, src, tmp_load_klass); 3440 __ load_klass(dst, dst, tmp_load_klass); 3441 3442 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); 3443 3444 __ push(src); 3445 __ push(dst); 3446 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 3447 __ pop(dst); 3448 __ pop(src); 3449 3450 __ testl(src, src); 3451 __ jcc(Assembler::notEqual, cont); 3452 3453 __ bind(slow); 3454 __ pop(dst); 3455 __ pop(src); 3456 3457 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 3458 if (copyfunc_addr != nullptr) { // use stub if available 3459 // src is not a sub class of dst so we have to do a 3460 // per-element check. 3461 3462 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 3463 if ((flags & mask) != mask) { 3464 // Check that at least both of them object arrays. 3465 assert(flags & mask, "one of the two should be known to be an object array"); 3466 3467 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3468 __ load_klass(tmp, src, tmp_load_klass); 3469 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3470 __ load_klass(tmp, dst, tmp_load_klass); 3471 } 3472 int lh_offset = in_bytes(Klass::layout_helper_offset()); 3473 Address klass_lh_addr(tmp, lh_offset); 3474 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 3475 __ cmpl(klass_lh_addr, objArray_lh); 3476 __ jcc(Assembler::notEqual, *stub->entry()); 3477 } 3478 3479 // Spill because stubs can use any register they like and it's 3480 // easier to restore just those that we care about. 3481 store_parameter(dst, 0); 3482 store_parameter(dst_pos, 1); 3483 store_parameter(length, 2); 3484 store_parameter(src_pos, 3); 3485 store_parameter(src, 4); 3486 3487 #ifndef _LP64 3488 __ movptr(tmp, dst_klass_addr); 3489 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 3490 __ push(tmp); 3491 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 3492 __ push(tmp); 3493 __ push(length); 3494 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3495 __ push(tmp); 3496 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3497 __ push(tmp); 3498 3499 __ call_VM_leaf(copyfunc_addr, 5); 3500 #else 3501 __ movl2ptr(length, length); //higher 32bits must be null 3502 3503 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3504 assert_different_registers(c_rarg0, dst, dst_pos, length); 3505 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3506 assert_different_registers(c_rarg1, dst, length); 3507 3508 __ mov(c_rarg2, length); 3509 assert_different_registers(c_rarg2, dst); 3510 3511 #ifdef _WIN64 3512 // Allocate abi space for args but be sure to keep stack aligned 3513 __ subptr(rsp, 6*wordSize); 3514 __ load_klass(c_rarg3, dst, tmp_load_klass); 3515 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 3516 store_parameter(c_rarg3, 4); 3517 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 3518 __ call(RuntimeAddress(copyfunc_addr)); 3519 __ addptr(rsp, 6*wordSize); 3520 #else 3521 __ load_klass(c_rarg4, dst, tmp_load_klass); 3522 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 3523 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 3524 __ call(RuntimeAddress(copyfunc_addr)); 3525 #endif 3526 3527 #endif 3528 3529 #ifndef PRODUCT 3530 if (PrintC1Statistics) { 3531 Label failed; 3532 __ testl(rax, rax); 3533 __ jcc(Assembler::notZero, failed); 3534 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1); 3535 __ bind(failed); 3536 } 3537 #endif 3538 3539 __ testl(rax, rax); 3540 __ jcc(Assembler::zero, *stub->continuation()); 3541 3542 #ifndef PRODUCT 3543 if (PrintC1Statistics) { 3544 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1); 3545 } 3546 #endif 3547 3548 __ mov(tmp, rax); 3549 3550 __ xorl(tmp, -1); 3551 3552 // Restore previously spilled arguments 3553 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3554 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3555 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3556 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3557 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3558 3559 3560 __ subl(length, tmp); 3561 __ addl(src_pos, tmp); 3562 __ addl(dst_pos, tmp); 3563 } 3564 3565 __ jmp(*stub->entry()); 3566 3567 __ bind(cont); 3568 __ pop(dst); 3569 __ pop(src); 3570 } 3571 } 3572 3573 #ifdef ASSERT 3574 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3575 // Sanity check the known type with the incoming class. For the 3576 // primitive case the types must match exactly with src.klass and 3577 // dst.klass each exactly matching the default type. For the 3578 // object array case, if no type check is needed then either the 3579 // dst type is exactly the expected type and the src type is a 3580 // subtype which we can't check or src is the same array as dst 3581 // but not necessarily exactly of type default_type. 3582 Label known_ok, halt; 3583 __ mov_metadata(tmp, default_type->constant_encoding()); 3584 #ifdef _LP64 3585 if (UseCompressedClassPointers) { 3586 __ encode_klass_not_null(tmp, rscratch1); 3587 } 3588 #endif 3589 3590 if (basic_type != T_OBJECT) { 3591 3592 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3593 else __ cmpptr(tmp, dst_klass_addr); 3594 __ jcc(Assembler::notEqual, halt); 3595 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); 3596 else __ cmpptr(tmp, src_klass_addr); 3597 __ jcc(Assembler::equal, known_ok); 3598 } else { 3599 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3600 else __ cmpptr(tmp, dst_klass_addr); 3601 __ jcc(Assembler::equal, known_ok); 3602 __ cmpptr(src, dst); 3603 __ jcc(Assembler::equal, known_ok); 3604 } 3605 __ bind(halt); 3606 __ stop("incorrect type information in arraycopy"); 3607 __ bind(known_ok); 3608 } 3609 #endif 3610 3611 #ifndef PRODUCT 3612 if (PrintC1Statistics) { 3613 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1); 3614 } 3615 #endif 3616 3617 #ifdef _LP64 3618 assert_different_registers(c_rarg0, dst, dst_pos, length); 3619 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3620 assert_different_registers(c_rarg1, length); 3621 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3622 __ mov(c_rarg2, length); 3623 3624 #else 3625 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3626 store_parameter(tmp, 0); 3627 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3628 store_parameter(tmp, 1); 3629 store_parameter(length, 2); 3630 #endif // _LP64 3631 3632 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3633 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3634 const char *name; 3635 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3636 __ call_VM_leaf(entry, 0); 3637 3638 if (stub != nullptr) { 3639 __ bind(*stub->continuation()); 3640 } 3641 } 3642 3643 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3644 assert(op->crc()->is_single_cpu(), "crc must be register"); 3645 assert(op->val()->is_single_cpu(), "byte value must be register"); 3646 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3647 Register crc = op->crc()->as_register(); 3648 Register val = op->val()->as_register(); 3649 Register res = op->result_opr()->as_register(); 3650 3651 assert_different_registers(val, crc, res); 3652 3653 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3654 __ notl(crc); // ~crc 3655 __ update_byte_crc32(crc, val, res); 3656 __ notl(crc); // ~crc 3657 __ mov(res, crc); 3658 } 3659 3660 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3661 Register obj = op->obj_opr()->as_register(); // may not be an oop 3662 Register hdr = op->hdr_opr()->as_register(); 3663 Register lock = op->lock_opr()->as_register(); 3664 if (LockingMode == LM_MONITOR) { 3665 if (op->info() != nullptr) { 3666 add_debug_info_for_null_check_here(op->info()); 3667 __ null_check(obj); 3668 } 3669 __ jmp(*op->stub()->entry()); 3670 } else if (op->code() == lir_lock) { 3671 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3672 Register tmp = LockingMode == LM_LIGHTWEIGHT ? op->scratch_opr()->as_register() : noreg; 3673 // add debug info for NullPointerException only if one is possible 3674 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 3675 if (op->info() != nullptr) { 3676 add_debug_info_for_null_check(null_check_offset, op->info()); 3677 } 3678 // done 3679 } else if (op->code() == lir_unlock) { 3680 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3681 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3682 } else { 3683 Unimplemented(); 3684 } 3685 __ bind(*op->stub()->continuation()); 3686 } 3687 3688 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 3689 Register obj = op->obj()->as_pointer_register(); 3690 Register result = op->result_opr()->as_pointer_register(); 3691 3692 CodeEmitInfo* info = op->info(); 3693 if (info != nullptr) { 3694 add_debug_info_for_null_check_here(info); 3695 } 3696 3697 #ifdef _LP64 3698 if (UseCompressedClassPointers) { 3699 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3700 __ decode_klass_not_null(result, rscratch1); 3701 } else 3702 #endif 3703 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3704 } 3705 3706 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3707 ciMethod* method = op->profiled_method(); 3708 int bci = op->profiled_bci(); 3709 ciMethod* callee = op->profiled_callee(); 3710 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3711 3712 // Update counter for all call types 3713 ciMethodData* md = method->method_data_or_null(); 3714 assert(md != nullptr, "Sanity"); 3715 ciProfileData* data = md->bci_to_data(bci); 3716 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 3717 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3718 Register mdo = op->mdo()->as_register(); 3719 __ mov_metadata(mdo, md->constant_encoding()); 3720 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3721 // Perform additional virtual call profiling for invokevirtual and 3722 // invokeinterface bytecodes 3723 if (op->should_profile_receiver_type()) { 3724 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3725 Register recv = op->recv()->as_register(); 3726 assert_different_registers(mdo, recv); 3727 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3728 ciKlass* known_klass = op->known_holder(); 3729 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 3730 // We know the type that will be seen at this call site; we can 3731 // statically update the MethodData* rather than needing to do 3732 // dynamic tests on the receiver type 3733 3734 // NOTE: we should probably put a lock around this search to 3735 // avoid collisions by concurrent compilations 3736 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3737 uint i; 3738 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3739 ciKlass* receiver = vc_data->receiver(i); 3740 if (known_klass->equals(receiver)) { 3741 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3742 __ addptr(data_addr, DataLayout::counter_increment); 3743 return; 3744 } 3745 } 3746 3747 // Receiver type not found in profile data; select an empty slot 3748 3749 // Note that this is less efficient than it should be because it 3750 // always does a write to the receiver part of the 3751 // VirtualCallData rather than just the first time 3752 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3753 ciKlass* receiver = vc_data->receiver(i); 3754 if (receiver == nullptr) { 3755 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3756 __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); 3757 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3758 __ addptr(data_addr, DataLayout::counter_increment); 3759 return; 3760 } 3761 } 3762 } else { 3763 __ load_klass(recv, recv, tmp_load_klass); 3764 Label update_done; 3765 type_profile_helper(mdo, md, data, recv, &update_done); 3766 // Receiver did not match any saved receiver and there is no empty row for it. 3767 // Increment total counter to indicate polymorphic case. 3768 __ addptr(counter_addr, DataLayout::counter_increment); 3769 3770 __ bind(update_done); 3771 } 3772 } else { 3773 // Static call 3774 __ addptr(counter_addr, DataLayout::counter_increment); 3775 } 3776 } 3777 3778 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3779 Register obj = op->obj()->as_register(); 3780 Register tmp = op->tmp()->as_pointer_register(); 3781 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3782 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3783 ciKlass* exact_klass = op->exact_klass(); 3784 intptr_t current_klass = op->current_klass(); 3785 bool not_null = op->not_null(); 3786 bool no_conflict = op->no_conflict(); 3787 3788 Label update, next, none; 3789 3790 bool do_null = !not_null; 3791 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3792 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3793 3794 assert(do_null || do_update, "why are we here?"); 3795 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3796 3797 __ verify_oop(obj); 3798 3799 #ifdef ASSERT 3800 if (obj == tmp) { 3801 #ifdef _LP64 3802 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); 3803 #else 3804 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); 3805 #endif 3806 } else { 3807 #ifdef _LP64 3808 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index()); 3809 #else 3810 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index()); 3811 #endif 3812 } 3813 #endif 3814 if (do_null) { 3815 __ testptr(obj, obj); 3816 __ jccb(Assembler::notZero, update); 3817 if (!TypeEntries::was_null_seen(current_klass)) { 3818 __ testptr(mdo_addr, TypeEntries::null_seen); 3819 #ifndef ASSERT 3820 __ jccb(Assembler::notZero, next); // already set 3821 #else 3822 __ jcc(Assembler::notZero, next); // already set 3823 #endif 3824 // atomic update to prevent overwriting Klass* with 0 3825 __ lock(); 3826 __ orptr(mdo_addr, TypeEntries::null_seen); 3827 } 3828 if (do_update) { 3829 #ifndef ASSERT 3830 __ jmpb(next); 3831 } 3832 #else 3833 __ jmp(next); 3834 } 3835 } else { 3836 __ testptr(obj, obj); 3837 __ jcc(Assembler::notZero, update); 3838 __ stop("unexpected null obj"); 3839 #endif 3840 } 3841 3842 __ bind(update); 3843 3844 if (do_update) { 3845 #ifdef ASSERT 3846 if (exact_klass != nullptr) { 3847 Label ok; 3848 __ load_klass(tmp, obj, tmp_load_klass); 3849 __ push(tmp); 3850 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3851 __ cmpptr(tmp, Address(rsp, 0)); 3852 __ jcc(Assembler::equal, ok); 3853 __ stop("exact klass and actual klass differ"); 3854 __ bind(ok); 3855 __ pop(tmp); 3856 } 3857 #endif 3858 if (!no_conflict) { 3859 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 3860 if (exact_klass != nullptr) { 3861 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3862 } else { 3863 __ load_klass(tmp, obj, tmp_load_klass); 3864 } 3865 #ifdef _LP64 3866 __ mov(rscratch1, tmp); // save original value before XOR 3867 #endif 3868 __ xorptr(tmp, mdo_addr); 3869 __ testptr(tmp, TypeEntries::type_klass_mask); 3870 // klass seen before, nothing to do. The unknown bit may have been 3871 // set already but no need to check. 3872 __ jccb(Assembler::zero, next); 3873 3874 __ testptr(tmp, TypeEntries::type_unknown); 3875 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3876 3877 if (TypeEntries::is_type_none(current_klass)) { 3878 __ testptr(mdo_addr, TypeEntries::type_mask); 3879 __ jccb(Assembler::zero, none); 3880 #ifdef _LP64 3881 // There is a chance that the checks above (re-reading profiling 3882 // data from memory) fail if another thread has just set the 3883 // profiling to this obj's klass 3884 __ mov(tmp, rscratch1); // get back original value before XOR 3885 __ xorptr(tmp, mdo_addr); 3886 __ testptr(tmp, TypeEntries::type_klass_mask); 3887 __ jccb(Assembler::zero, next); 3888 #endif 3889 } 3890 } else { 3891 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3892 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3893 3894 __ testptr(mdo_addr, TypeEntries::type_unknown); 3895 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3896 } 3897 3898 // different than before. Cannot keep accurate profile. 3899 __ orptr(mdo_addr, TypeEntries::type_unknown); 3900 3901 if (TypeEntries::is_type_none(current_klass)) { 3902 __ jmpb(next); 3903 3904 __ bind(none); 3905 // first time here. Set profile type. 3906 __ movptr(mdo_addr, tmp); 3907 #ifdef ASSERT 3908 __ andptr(tmp, TypeEntries::type_klass_mask); 3909 __ verify_klass_ptr(tmp); 3910 #endif 3911 } 3912 } else { 3913 // There's a single possible klass at this profile point 3914 assert(exact_klass != nullptr, "should be"); 3915 if (TypeEntries::is_type_none(current_klass)) { 3916 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3917 __ xorptr(tmp, mdo_addr); 3918 __ testptr(tmp, TypeEntries::type_klass_mask); 3919 #ifdef ASSERT 3920 __ jcc(Assembler::zero, next); 3921 3922 { 3923 Label ok; 3924 __ push(tmp); 3925 __ testptr(mdo_addr, TypeEntries::type_mask); 3926 __ jcc(Assembler::zero, ok); 3927 // may have been set by another thread 3928 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3929 __ xorptr(tmp, mdo_addr); 3930 __ testptr(tmp, TypeEntries::type_mask); 3931 __ jcc(Assembler::zero, ok); 3932 3933 __ stop("unexpected profiling mismatch"); 3934 __ bind(ok); 3935 __ pop(tmp); 3936 } 3937 #else 3938 __ jccb(Assembler::zero, next); 3939 #endif 3940 // first time here. Set profile type. 3941 __ movptr(mdo_addr, tmp); 3942 #ifdef ASSERT 3943 __ andptr(tmp, TypeEntries::type_klass_mask); 3944 __ verify_klass_ptr(tmp); 3945 #endif 3946 } else { 3947 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3948 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3949 3950 __ testptr(mdo_addr, TypeEntries::type_unknown); 3951 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3952 3953 __ orptr(mdo_addr, TypeEntries::type_unknown); 3954 } 3955 } 3956 } 3957 __ bind(next); 3958 } 3959 3960 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 3961 Register obj = op->obj()->as_register(); 3962 Register tmp = op->tmp()->as_pointer_register(); 3963 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3964 bool not_null = op->not_null(); 3965 int flag = op->flag(); 3966 3967 Label not_inline_type; 3968 if (!not_null) { 3969 __ testptr(obj, obj); 3970 __ jccb(Assembler::zero, not_inline_type); 3971 } 3972 3973 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type); 3974 3975 __ orb(mdo_addr, flag); 3976 3977 __ bind(not_inline_type); 3978 } 3979 3980 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3981 Unimplemented(); 3982 } 3983 3984 3985 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3986 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3987 } 3988 3989 3990 void LIR_Assembler::align_backward_branch_target() { 3991 __ align(BytesPerWord); 3992 } 3993 3994 3995 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 3996 if (left->is_single_cpu()) { 3997 __ negl(left->as_register()); 3998 move_regs(left->as_register(), dest->as_register()); 3999 4000 } else if (left->is_double_cpu()) { 4001 Register lo = left->as_register_lo(); 4002 #ifdef _LP64 4003 Register dst = dest->as_register_lo(); 4004 __ movptr(dst, lo); 4005 __ negptr(dst); 4006 #else 4007 Register hi = left->as_register_hi(); 4008 __ lneg(hi, lo); 4009 if (dest->as_register_lo() == hi) { 4010 assert(dest->as_register_hi() != lo, "destroying register"); 4011 move_regs(hi, dest->as_register_hi()); 4012 move_regs(lo, dest->as_register_lo()); 4013 } else { 4014 move_regs(lo, dest->as_register_lo()); 4015 move_regs(hi, dest->as_register_hi()); 4016 } 4017 #endif // _LP64 4018 4019 } else if (dest->is_single_xmm()) { 4020 #ifdef _LP64 4021 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4022 assert(tmp->is_valid(), "need temporary"); 4023 assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 4024 __ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2); 4025 } 4026 else 4027 #endif 4028 { 4029 assert(!tmp->is_valid(), "do not need temporary"); 4030 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 4031 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 4032 } 4033 __ xorps(dest->as_xmm_float_reg(), 4034 ExternalAddress((address)float_signflip_pool), 4035 rscratch1); 4036 } 4037 } else if (dest->is_double_xmm()) { 4038 #ifdef _LP64 4039 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4040 assert(tmp->is_valid(), "need temporary"); 4041 assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg()); 4042 __ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2); 4043 } 4044 else 4045 #endif 4046 { 4047 assert(!tmp->is_valid(), "do not need temporary"); 4048 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 4049 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 4050 } 4051 __ xorpd(dest->as_xmm_double_reg(), 4052 ExternalAddress((address)double_signflip_pool), 4053 rscratch1); 4054 } 4055 #ifndef _LP64 4056 } else if (left->is_single_fpu() || left->is_double_fpu()) { 4057 assert(left->fpu() == 0, "arg must be on TOS"); 4058 assert(dest->fpu() == 0, "dest must be TOS"); 4059 __ fchs(); 4060 #endif // !_LP64 4061 4062 } else { 4063 ShouldNotReachHere(); 4064 } 4065 } 4066 4067 4068 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 4069 assert(src->is_address(), "must be an address"); 4070 assert(dest->is_register(), "must be a register"); 4071 4072 PatchingStub* patch = nullptr; 4073 if (patch_code != lir_patch_none) { 4074 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 4075 } 4076 4077 Register reg = dest->as_pointer_register(); 4078 LIR_Address* addr = src->as_address_ptr(); 4079 __ lea(reg, as_Address(addr)); 4080 4081 if (patch != nullptr) { 4082 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 4083 } 4084 } 4085 4086 4087 4088 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 4089 assert(!tmp->is_valid(), "don't need temporary"); 4090 __ call(RuntimeAddress(dest)); 4091 if (info != nullptr) { 4092 add_call_info_here(info); 4093 } 4094 __ post_call_nop(); 4095 } 4096 4097 4098 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 4099 assert(type == T_LONG, "only for volatile long fields"); 4100 4101 if (info != nullptr) { 4102 add_debug_info_for_null_check_here(info); 4103 } 4104 4105 if (src->is_double_xmm()) { 4106 if (dest->is_double_cpu()) { 4107 #ifdef _LP64 4108 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 4109 #else 4110 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 4111 __ psrlq(src->as_xmm_double_reg(), 32); 4112 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 4113 #endif // _LP64 4114 } else if (dest->is_double_stack()) { 4115 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 4116 } else if (dest->is_address()) { 4117 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 4118 } else { 4119 ShouldNotReachHere(); 4120 } 4121 4122 } else if (dest->is_double_xmm()) { 4123 if (src->is_double_stack()) { 4124 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 4125 } else if (src->is_address()) { 4126 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 4127 } else { 4128 ShouldNotReachHere(); 4129 } 4130 4131 #ifndef _LP64 4132 } else if (src->is_double_fpu()) { 4133 assert(src->fpu_regnrLo() == 0, "must be TOS"); 4134 if (dest->is_double_stack()) { 4135 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 4136 } else if (dest->is_address()) { 4137 __ fistp_d(as_Address(dest->as_address_ptr())); 4138 } else { 4139 ShouldNotReachHere(); 4140 } 4141 4142 } else if (dest->is_double_fpu()) { 4143 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 4144 if (src->is_double_stack()) { 4145 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 4146 } else if (src->is_address()) { 4147 __ fild_d(as_Address(src->as_address_ptr())); 4148 } else { 4149 ShouldNotReachHere(); 4150 } 4151 #endif // !_LP64 4152 4153 } else { 4154 ShouldNotReachHere(); 4155 } 4156 } 4157 4158 #ifdef ASSERT 4159 // emit run-time assertion 4160 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 4161 assert(op->code() == lir_assert, "must be"); 4162 4163 if (op->in_opr1()->is_valid()) { 4164 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 4165 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 4166 } else { 4167 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 4168 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 4169 } 4170 4171 Label ok; 4172 if (op->condition() != lir_cond_always) { 4173 Assembler::Condition acond = Assembler::zero; 4174 switch (op->condition()) { 4175 case lir_cond_equal: acond = Assembler::equal; break; 4176 case lir_cond_notEqual: acond = Assembler::notEqual; break; 4177 case lir_cond_less: acond = Assembler::less; break; 4178 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 4179 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 4180 case lir_cond_greater: acond = Assembler::greater; break; 4181 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 4182 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 4183 default: ShouldNotReachHere(); 4184 } 4185 __ jcc(acond, ok); 4186 } 4187 if (op->halt()) { 4188 const char* str = __ code_string(op->msg()); 4189 __ stop(str); 4190 } else { 4191 breakpoint(); 4192 } 4193 __ bind(ok); 4194 } 4195 #endif 4196 4197 void LIR_Assembler::membar() { 4198 // QQQ sparc TSO uses this, 4199 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4200 } 4201 4202 void LIR_Assembler::membar_acquire() { 4203 // No x86 machines currently require load fences 4204 } 4205 4206 void LIR_Assembler::membar_release() { 4207 // No x86 machines currently require store fences 4208 } 4209 4210 void LIR_Assembler::membar_loadload() { 4211 // no-op 4212 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 4213 } 4214 4215 void LIR_Assembler::membar_storestore() { 4216 // no-op 4217 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 4218 } 4219 4220 void LIR_Assembler::membar_loadstore() { 4221 // no-op 4222 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 4223 } 4224 4225 void LIR_Assembler::membar_storeload() { 4226 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4227 } 4228 4229 void LIR_Assembler::on_spin_wait() { 4230 __ pause (); 4231 } 4232 4233 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 4234 assert(result_reg->is_register(), "check"); 4235 #ifdef _LP64 4236 // __ get_thread(result_reg->as_register_lo()); 4237 __ mov(result_reg->as_register(), r15_thread); 4238 #else 4239 __ get_thread(result_reg->as_register()); 4240 #endif // _LP64 4241 } 4242 4243 void LIR_Assembler::check_orig_pc() { 4244 __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD); 4245 } 4246 4247 void LIR_Assembler::peephole(LIR_List*) { 4248 // do nothing for now 4249 } 4250 4251 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 4252 assert(data == dest, "xchg/xadd uses only 2 operands"); 4253 4254 if (data->type() == T_INT) { 4255 if (code == lir_xadd) { 4256 __ lock(); 4257 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 4258 } else { 4259 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 4260 } 4261 } else if (data->is_oop()) { 4262 assert (code == lir_xchg, "xadd for oops"); 4263 Register obj = data->as_register(); 4264 #ifdef _LP64 4265 if (UseCompressedOops) { 4266 __ encode_heap_oop(obj); 4267 __ xchgl(obj, as_Address(src->as_address_ptr())); 4268 __ decode_heap_oop(obj); 4269 } else { 4270 __ xchgptr(obj, as_Address(src->as_address_ptr())); 4271 } 4272 #else 4273 __ xchgl(obj, as_Address(src->as_address_ptr())); 4274 #endif 4275 } else if (data->type() == T_LONG) { 4276 #ifdef _LP64 4277 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 4278 if (code == lir_xadd) { 4279 __ lock(); 4280 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 4281 } else { 4282 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 4283 } 4284 #else 4285 ShouldNotReachHere(); 4286 #endif 4287 } else { 4288 ShouldNotReachHere(); 4289 } 4290 } 4291 4292 #undef __