1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Compilation.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "c1/c1_ValueStack.hpp" 34 #include "ci/ciArrayKlass.hpp" 35 #include "ci/ciInlineKlass.hpp" 36 #include "ci/ciInstance.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/gc_globals.hpp" 40 #include "nativeInst_x86.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/objArrayKlass.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/safepointMechanism.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "utilities/powerOfTwo.hpp" 48 #include "vmreg_x86.inline.hpp" 49 50 51 // These masks are used to provide 128-bit aligned bitmasks to the XMM 52 // instructions, to allow sign-masking or sign-bit flipping. They allow 53 // fast versions of NegF/NegD and AbsF/AbsD. 54 55 // Note: 'double' and 'long long' have 32-bits alignment on x86. 56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 57 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 58 // of 128-bits operands for SSE instructions. 59 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 60 // Store the value to a 128-bits operand. 61 operand[0] = lo; 62 operand[1] = hi; 63 return operand; 64 } 65 66 // Buffer for 128-bits masks used by SSE instructions. 67 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 68 69 // Static initialization during VM startup. 70 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 71 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 72 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000)); 73 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); 74 75 76 NEEDS_CLEANUP // remove this definitions ? 77 const Register IC_Klass = rax; // where the IC klass is cached 78 const Register SYNC_header = rax; // synchronization header 79 const Register SHIFT_count = rcx; // where count for shift operations must be 80 81 #define __ _masm-> 82 83 84 static void select_different_registers(Register preserve, 85 Register extra, 86 Register &tmp1, 87 Register &tmp2) { 88 if (tmp1 == preserve) { 89 assert_different_registers(tmp1, tmp2, extra); 90 tmp1 = extra; 91 } else if (tmp2 == preserve) { 92 assert_different_registers(tmp1, tmp2, extra); 93 tmp2 = extra; 94 } 95 assert_different_registers(preserve, tmp1, tmp2); 96 } 97 98 99 100 static void select_different_registers(Register preserve, 101 Register extra, 102 Register &tmp1, 103 Register &tmp2, 104 Register &tmp3) { 105 if (tmp1 == preserve) { 106 assert_different_registers(tmp1, tmp2, tmp3, extra); 107 tmp1 = extra; 108 } else if (tmp2 == preserve) { 109 assert_different_registers(tmp1, tmp2, tmp3, extra); 110 tmp2 = extra; 111 } else if (tmp3 == preserve) { 112 assert_different_registers(tmp1, tmp2, tmp3, extra); 113 tmp3 = extra; 114 } 115 assert_different_registers(preserve, tmp1, tmp2, tmp3); 116 } 117 118 119 120 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 121 if (opr->is_constant()) { 122 LIR_Const* constant = opr->as_constant_ptr(); 123 switch (constant->type()) { 124 case T_INT: { 125 return true; 126 } 127 128 default: 129 return false; 130 } 131 } 132 return false; 133 } 134 135 136 LIR_Opr LIR_Assembler::receiverOpr() { 137 return FrameMap::receiver_opr; 138 } 139 140 LIR_Opr LIR_Assembler::osrBufferPointer() { 141 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 142 } 143 144 //--------------fpu register translations----------------------- 145 146 147 address LIR_Assembler::float_constant(float f) { 148 address const_addr = __ float_constant(f); 149 if (const_addr == nullptr) { 150 bailout("const section overflow"); 151 return __ code()->consts()->start(); 152 } else { 153 return const_addr; 154 } 155 } 156 157 158 address LIR_Assembler::double_constant(double d) { 159 address const_addr = __ double_constant(d); 160 if (const_addr == nullptr) { 161 bailout("const section overflow"); 162 return __ code()->consts()->start(); 163 } else { 164 return const_addr; 165 } 166 } 167 168 #ifndef _LP64 169 void LIR_Assembler::fpop() { 170 __ fpop(); 171 } 172 173 void LIR_Assembler::fxch(int i) { 174 __ fxch(i); 175 } 176 177 void LIR_Assembler::fld(int i) { 178 __ fld_s(i); 179 } 180 181 void LIR_Assembler::ffree(int i) { 182 __ ffree(i); 183 } 184 #endif // !_LP64 185 186 void LIR_Assembler::breakpoint() { 187 __ int3(); 188 } 189 190 void LIR_Assembler::push(LIR_Opr opr) { 191 if (opr->is_single_cpu()) { 192 __ push_reg(opr->as_register()); 193 } else if (opr->is_double_cpu()) { 194 NOT_LP64(__ push_reg(opr->as_register_hi())); 195 __ push_reg(opr->as_register_lo()); 196 } else if (opr->is_stack()) { 197 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 198 } else if (opr->is_constant()) { 199 LIR_Const* const_opr = opr->as_constant_ptr(); 200 if (const_opr->type() == T_OBJECT) { 201 __ push_oop(const_opr->as_jobject(), rscratch1); 202 } else if (const_opr->type() == T_INT) { 203 __ push_jint(const_opr->as_jint()); 204 } else { 205 ShouldNotReachHere(); 206 } 207 208 } else { 209 ShouldNotReachHere(); 210 } 211 } 212 213 void LIR_Assembler::pop(LIR_Opr opr) { 214 if (opr->is_single_cpu()) { 215 __ pop_reg(opr->as_register()); 216 } else { 217 ShouldNotReachHere(); 218 } 219 } 220 221 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { 222 return addr->base()->is_illegal() && addr->index()->is_illegal(); 223 } 224 225 //------------------------------------------- 226 227 Address LIR_Assembler::as_Address(LIR_Address* addr) { 228 return as_Address(addr, rscratch1); 229 } 230 231 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 232 if (addr->base()->is_illegal()) { 233 assert(addr->index()->is_illegal(), "must be illegal too"); 234 AddressLiteral laddr((address)addr->disp(), relocInfo::none); 235 if (! __ reachable(laddr)) { 236 __ movptr(tmp, laddr.addr()); 237 Address res(tmp, 0); 238 return res; 239 } else { 240 return __ as_Address(laddr); 241 } 242 } 243 244 Register base = addr->base()->as_pointer_register(); 245 246 if (addr->index()->is_illegal()) { 247 return Address( base, addr->disp()); 248 } else if (addr->index()->is_cpu_register()) { 249 Register index = addr->index()->as_pointer_register(); 250 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 251 } else if (addr->index()->is_constant()) { 252 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 253 assert(Assembler::is_simm32(addr_offset), "must be"); 254 255 return Address(base, addr_offset); 256 } else { 257 Unimplemented(); 258 return Address(); 259 } 260 } 261 262 263 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 264 Address base = as_Address(addr); 265 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 266 } 267 268 269 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 270 return as_Address(addr); 271 } 272 273 274 void LIR_Assembler::osr_entry() { 275 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 276 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 277 ValueStack* entry_state = osr_entry->state(); 278 int number_of_locks = entry_state->locks_size(); 279 280 // we jump here if osr happens with the interpreter 281 // state set up to continue at the beginning of the 282 // loop that triggered osr - in particular, we have 283 // the following registers setup: 284 // 285 // rcx: osr buffer 286 // 287 288 // build frame 289 ciMethod* m = compilation()->method(); 290 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 291 292 // OSR buffer is 293 // 294 // locals[nlocals-1..0] 295 // monitors[0..number_of_locks] 296 // 297 // locals is a direct copy of the interpreter frame so in the osr buffer 298 // so first slot in the local array is the last local from the interpreter 299 // and last slot is local[0] (receiver) from the interpreter 300 // 301 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 302 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 303 // in the interpreter frame (the method lock if a sync method) 304 305 // Initialize monitors in the compiled activation. 306 // rcx: pointer to osr buffer 307 // 308 // All other registers are dead at this point and the locals will be 309 // copied into place by code emitted in the IR. 310 311 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 312 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 313 int monitor_offset = BytesPerWord * method()->max_locals() + 314 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); 315 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 316 // the OSR buffer using 2 word entries: first the lock and then 317 // the oop. 318 for (int i = 0; i < number_of_locks; i++) { 319 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 320 #ifdef ASSERT 321 // verify the interpreter's monitor has a non-null object 322 { 323 Label L; 324 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD); 325 __ jcc(Assembler::notZero, L); 326 __ stop("locked object is null"); 327 __ bind(L); 328 } 329 #endif 330 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 331 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 332 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 333 __ movptr(frame_map()->address_for_monitor_object(i), rbx); 334 } 335 } 336 } 337 338 339 // inline cache check; done before the frame is built. 340 int LIR_Assembler::check_icache() { 341 Register receiver = FrameMap::receiver_opr->as_register(); 342 Register ic_klass = IC_Klass; 343 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 344 const bool do_post_padding = VerifyOops || UseCompressedClassPointers; 345 if (!do_post_padding) { 346 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 347 __ align(CodeEntryAlignment, __ offset() + ic_cmp_size); 348 } 349 int offset = __ offset(); 350 __ inline_cache_check(receiver, IC_Klass); 351 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 352 if (do_post_padding) { 353 // force alignment after the cache check. 354 // It's been verified to be aligned if !VerifyOops 355 __ align(CodeEntryAlignment); 356 } 357 return offset; 358 } 359 360 void LIR_Assembler::clinit_barrier(ciMethod* method) { 361 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 362 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 363 364 Label L_skip_barrier; 365 Register klass = rscratch1; 366 Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg ); 367 assert(thread != noreg, "x86_32 not implemented"); 368 369 __ mov_metadata(klass, method->holder()->constant_encoding()); 370 __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/); 371 372 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 373 374 __ bind(L_skip_barrier); 375 } 376 377 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 378 jobject o = nullptr; 379 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 380 __ movoop(reg, o); 381 patching_epilog(patch, lir_patch_normal, reg, info); 382 } 383 384 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 385 Metadata* o = nullptr; 386 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 387 __ mov_metadata(reg, o); 388 patching_epilog(patch, lir_patch_normal, reg, info); 389 } 390 391 // This specifies the rsp decrement needed to build the frame 392 int LIR_Assembler::initial_frame_size_in_bytes() const { 393 // if rounding, must let FrameMap know! 394 395 // The frame_map records size in slots (32bit word) 396 397 // subtract two words to account for return address and link 398 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 399 } 400 401 402 int LIR_Assembler::emit_exception_handler() { 403 // generate code for exception handler 404 address handler_base = __ start_a_stub(exception_handler_size()); 405 if (handler_base == nullptr) { 406 // not enough space left for the handler 407 bailout("exception handler overflow"); 408 return -1; 409 } 410 411 int offset = code_offset(); 412 413 // the exception oop and pc are in rax, and rdx 414 // no other registers need to be preserved, so invalidate them 415 __ invalidate_registers(false, true, true, false, true, true); 416 417 // check that there is really an exception 418 __ verify_not_null_oop(rax); 419 420 // search an exception handler (rax: exception oop, rdx: throwing pc) 421 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 422 __ should_not_reach_here(); 423 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 424 __ end_a_stub(); 425 426 return offset; 427 } 428 429 430 // Emit the code to remove the frame from the stack in the exception 431 // unwind path. 432 int LIR_Assembler::emit_unwind_handler() { 433 #ifndef PRODUCT 434 if (CommentedAssembly) { 435 _masm->block_comment("Unwind handler"); 436 } 437 #endif 438 439 int offset = code_offset(); 440 441 // Fetch the exception from TLS and clear out exception related thread state 442 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 443 NOT_LP64(__ get_thread(thread)); 444 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); 445 __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); 446 __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); 447 448 __ bind(_unwind_handler_entry); 449 __ verify_not_null_oop(rax); 450 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 451 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) 452 } 453 454 // Perform needed unlocking 455 MonitorExitStub* stub = nullptr; 456 if (method()->is_synchronized()) { 457 monitor_address(0, FrameMap::rax_opr); 458 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 459 if (LockingMode == LM_MONITOR) { 460 __ jmp(*stub->entry()); 461 } else { 462 __ unlock_object(rdi, rsi, rax, *stub->entry()); 463 } 464 __ bind(*stub->continuation()); 465 } 466 467 if (compilation()->env()->dtrace_method_probes()) { 468 #ifdef _LP64 469 __ mov(rdi, r15_thread); 470 __ mov_metadata(rsi, method()->constant_encoding()); 471 #else 472 __ get_thread(rax); 473 __ movptr(Address(rsp, 0), rax); 474 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); 475 #endif 476 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 477 } 478 479 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 480 __ mov(rax, rbx); // Restore the exception 481 } 482 483 // remove the activation and dispatch to the unwind handler 484 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 485 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 486 487 // Emit the slow path assembly 488 if (stub != nullptr) { 489 stub->emit_code(this); 490 } 491 492 return offset; 493 } 494 495 496 int LIR_Assembler::emit_deopt_handler() { 497 // generate code for exception handler 498 address handler_base = __ start_a_stub(deopt_handler_size()); 499 if (handler_base == nullptr) { 500 // not enough space left for the handler 501 bailout("deopt handler overflow"); 502 return -1; 503 } 504 505 int offset = code_offset(); 506 InternalAddress here(__ pc()); 507 508 __ pushptr(here.addr(), rscratch1); 509 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 510 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 511 __ end_a_stub(); 512 513 return offset; 514 } 515 516 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 517 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 518 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 519 assert(result->fpu() == 0, "result must already be on TOS"); 520 } 521 if (InlineTypeReturnedAsFields) { 522 #ifndef _LP64 523 Unimplemented(); 524 #endif 525 // Check if we are returning an non-null inline type and load its fields into registers 526 ciType* return_type = compilation()->method()->return_type(); 527 if (return_type->is_inlinetype()) { 528 ciInlineKlass* vk = return_type->as_inline_klass(); 529 if (vk->can_be_returned_as_fields()) { 530 address unpack_handler = vk->unpack_handler(); 531 assert(unpack_handler != nullptr, "must be"); 532 __ call(RuntimeAddress(unpack_handler)); 533 } 534 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) { 535 Label skip; 536 __ test_oop_is_not_inline_type(rax, rscratch1, skip); 537 538 // Load fields from a buffered value with an inline class specific handler 539 __ load_klass(rdi, rax, rscratch1); 540 __ movptr(rdi, Address(rdi, InstanceKlass::adr_inlineklass_fixed_block_offset())); 541 __ movptr(rdi, Address(rdi, InlineKlass::unpack_handler_offset())); 542 // Unpack handler can be null if inline type is not scalarizable in returns 543 __ testptr(rdi, rdi); 544 __ jcc(Assembler::zero, skip); 545 __ call(rdi); 546 547 __ bind(skip); 548 } 549 // At this point, rax points to the value object (for interpreter or C1 caller). 550 // The fields of the object are copied into registers (for C2 caller). 551 } 552 553 // Pop the stack before the safepoint code 554 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair()); 555 556 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 557 __ reserved_stack_check(); 558 } 559 560 // Note: we do not need to round double result; float result has the right precision 561 // the poll sets the condition code, but no data registers 562 563 #ifdef _LP64 564 const Register thread = r15_thread; 565 #else 566 const Register thread = rbx; 567 __ get_thread(thread); 568 #endif 569 code_stub->set_safepoint_offset(__ offset()); 570 __ relocate(relocInfo::poll_return_type); 571 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */); 572 __ ret(0); 573 } 574 575 576 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) { 577 return (__ store_inline_type_fields_to_buf(vk, false)); 578 } 579 580 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 581 guarantee(info != nullptr, "Shouldn't be null"); 582 int offset = __ offset(); 583 #ifdef _LP64 584 const Register poll_addr = rscratch1; 585 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset())); 586 #else 587 assert(tmp->is_cpu_register(), "needed"); 588 const Register poll_addr = tmp->as_register(); 589 __ get_thread(poll_addr); 590 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset()))); 591 #endif 592 add_debug_info_for_branch(info); 593 __ relocate(relocInfo::poll_type); 594 address pre_pc = __ pc(); 595 __ testl(rax, Address(poll_addr, 0)); 596 address post_pc = __ pc(); 597 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); 598 return offset; 599 } 600 601 602 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 603 if (from_reg != to_reg) __ mov(to_reg, from_reg); 604 } 605 606 void LIR_Assembler::swap_reg(Register a, Register b) { 607 __ xchgptr(a, b); 608 } 609 610 611 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 612 assert(src->is_constant(), "should not call otherwise"); 613 assert(dest->is_register(), "should not call otherwise"); 614 LIR_Const* c = src->as_constant_ptr(); 615 616 switch (c->type()) { 617 case T_INT: { 618 assert(patch_code == lir_patch_none, "no patching handled here"); 619 __ movl(dest->as_register(), c->as_jint()); 620 break; 621 } 622 623 case T_ADDRESS: { 624 assert(patch_code == lir_patch_none, "no patching handled here"); 625 __ movptr(dest->as_register(), c->as_jint()); 626 break; 627 } 628 629 case T_LONG: { 630 assert(patch_code == lir_patch_none, "no patching handled here"); 631 #ifdef _LP64 632 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 633 #else 634 __ movptr(dest->as_register_lo(), c->as_jint_lo()); 635 __ movptr(dest->as_register_hi(), c->as_jint_hi()); 636 #endif // _LP64 637 break; 638 } 639 640 case T_OBJECT: { 641 if (patch_code != lir_patch_none) { 642 jobject2reg_with_patching(dest->as_register(), info); 643 } else { 644 __ movoop(dest->as_register(), c->as_jobject()); 645 } 646 break; 647 } 648 649 case T_METADATA: { 650 if (patch_code != lir_patch_none) { 651 klass2reg_with_patching(dest->as_register(), info); 652 } else { 653 __ mov_metadata(dest->as_register(), c->as_metadata()); 654 } 655 break; 656 } 657 658 case T_FLOAT: { 659 if (dest->is_single_xmm()) { 660 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) { 661 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 662 } else { 663 __ movflt(dest->as_xmm_float_reg(), 664 InternalAddress(float_constant(c->as_jfloat()))); 665 } 666 } else { 667 #ifndef _LP64 668 assert(dest->is_single_fpu(), "must be"); 669 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 670 if (c->is_zero_float()) { 671 __ fldz(); 672 } else if (c->is_one_float()) { 673 __ fld1(); 674 } else { 675 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 676 } 677 #else 678 ShouldNotReachHere(); 679 #endif // !_LP64 680 } 681 break; 682 } 683 684 case T_DOUBLE: { 685 if (dest->is_double_xmm()) { 686 if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) { 687 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 688 } else { 689 __ movdbl(dest->as_xmm_double_reg(), 690 InternalAddress(double_constant(c->as_jdouble()))); 691 } 692 } else { 693 #ifndef _LP64 694 assert(dest->is_double_fpu(), "must be"); 695 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 696 if (c->is_zero_double()) { 697 __ fldz(); 698 } else if (c->is_one_double()) { 699 __ fld1(); 700 } else { 701 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 702 } 703 #else 704 ShouldNotReachHere(); 705 #endif // !_LP64 706 } 707 break; 708 } 709 710 default: 711 ShouldNotReachHere(); 712 } 713 } 714 715 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 716 assert(src->is_constant(), "should not call otherwise"); 717 assert(dest->is_stack(), "should not call otherwise"); 718 LIR_Const* c = src->as_constant_ptr(); 719 720 switch (c->type()) { 721 case T_INT: // fall through 722 case T_FLOAT: 723 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 724 break; 725 726 case T_ADDRESS: 727 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 728 break; 729 730 case T_OBJECT: 731 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); 732 break; 733 734 case T_LONG: // fall through 735 case T_DOUBLE: 736 #ifdef _LP64 737 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 738 lo_word_offset_in_bytes), 739 (intptr_t)c->as_jlong_bits(), 740 rscratch1); 741 #else 742 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 743 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 744 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 745 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 746 #endif // _LP64 747 break; 748 749 default: 750 ShouldNotReachHere(); 751 } 752 } 753 754 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 755 assert(src->is_constant(), "should not call otherwise"); 756 assert(dest->is_address(), "should not call otherwise"); 757 LIR_Const* c = src->as_constant_ptr(); 758 LIR_Address* addr = dest->as_address_ptr(); 759 760 int null_check_here = code_offset(); 761 switch (type) { 762 case T_INT: // fall through 763 case T_FLOAT: 764 __ movl(as_Address(addr), c->as_jint_bits()); 765 break; 766 767 case T_ADDRESS: 768 __ movptr(as_Address(addr), c->as_jint_bits()); 769 break; 770 771 case T_OBJECT: // fall through 772 case T_ARRAY: 773 if (c->as_jobject() == nullptr) { 774 if (UseCompressedOops && !wide) { 775 __ movl(as_Address(addr), NULL_WORD); 776 } else { 777 #ifdef _LP64 778 __ xorptr(rscratch1, rscratch1); 779 null_check_here = code_offset(); 780 __ movptr(as_Address(addr), rscratch1); 781 #else 782 __ movptr(as_Address(addr), NULL_WORD); 783 #endif 784 } 785 } else { 786 if (is_literal_address(addr)) { 787 ShouldNotReachHere(); 788 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); 789 } else { 790 #ifdef _LP64 791 __ movoop(rscratch1, c->as_jobject()); 792 if (UseCompressedOops && !wide) { 793 __ encode_heap_oop(rscratch1); 794 null_check_here = code_offset(); 795 __ movl(as_Address_lo(addr), rscratch1); 796 } else { 797 null_check_here = code_offset(); 798 __ movptr(as_Address_lo(addr), rscratch1); 799 } 800 #else 801 __ movoop(as_Address(addr), c->as_jobject(), noreg); 802 #endif 803 } 804 } 805 break; 806 807 case T_LONG: // fall through 808 case T_DOUBLE: 809 #ifdef _LP64 810 if (is_literal_address(addr)) { 811 ShouldNotReachHere(); 812 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 813 } else { 814 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 815 null_check_here = code_offset(); 816 __ movptr(as_Address_lo(addr), r10); 817 } 818 #else 819 // Always reachable in 32bit so this doesn't produce useless move literal 820 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 821 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 822 #endif // _LP64 823 break; 824 825 case T_BOOLEAN: // fall through 826 case T_BYTE: 827 __ movb(as_Address(addr), c->as_jint() & 0xFF); 828 break; 829 830 case T_CHAR: // fall through 831 case T_SHORT: 832 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 833 break; 834 835 default: 836 ShouldNotReachHere(); 837 }; 838 839 if (info != nullptr) { 840 add_debug_info_for_null_check(null_check_here, info); 841 } 842 } 843 844 845 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 846 assert(src->is_register(), "should not call otherwise"); 847 assert(dest->is_register(), "should not call otherwise"); 848 849 // move between cpu-registers 850 if (dest->is_single_cpu()) { 851 #ifdef _LP64 852 if (src->type() == T_LONG) { 853 // Can do LONG -> OBJECT 854 move_regs(src->as_register_lo(), dest->as_register()); 855 return; 856 } 857 #endif 858 assert(src->is_single_cpu(), "must match"); 859 if (src->type() == T_OBJECT) { 860 __ verify_oop(src->as_register()); 861 } 862 move_regs(src->as_register(), dest->as_register()); 863 864 } else if (dest->is_double_cpu()) { 865 #ifdef _LP64 866 if (is_reference_type(src->type())) { 867 // Surprising to me but we can see move of a long to t_object 868 __ verify_oop(src->as_register()); 869 move_regs(src->as_register(), dest->as_register_lo()); 870 return; 871 } 872 #endif 873 assert(src->is_double_cpu(), "must match"); 874 Register f_lo = src->as_register_lo(); 875 Register f_hi = src->as_register_hi(); 876 Register t_lo = dest->as_register_lo(); 877 Register t_hi = dest->as_register_hi(); 878 #ifdef _LP64 879 assert(f_hi == f_lo, "must be same"); 880 assert(t_hi == t_lo, "must be same"); 881 move_regs(f_lo, t_lo); 882 #else 883 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 884 885 886 if (f_lo == t_hi && f_hi == t_lo) { 887 swap_reg(f_lo, f_hi); 888 } else if (f_hi == t_lo) { 889 assert(f_lo != t_hi, "overwriting register"); 890 move_regs(f_hi, t_hi); 891 move_regs(f_lo, t_lo); 892 } else { 893 assert(f_hi != t_lo, "overwriting register"); 894 move_regs(f_lo, t_lo); 895 move_regs(f_hi, t_hi); 896 } 897 #endif // LP64 898 899 #ifndef _LP64 900 // special moves from fpu-register to xmm-register 901 // necessary for method results 902 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 903 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 904 __ fld_s(Address(rsp, 0)); 905 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 906 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 907 __ fld_d(Address(rsp, 0)); 908 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 909 __ fstp_s(Address(rsp, 0)); 910 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 911 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 912 __ fstp_d(Address(rsp, 0)); 913 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 914 #endif // !_LP64 915 916 // move between xmm-registers 917 } else if (dest->is_single_xmm()) { 918 assert(src->is_single_xmm(), "must match"); 919 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 920 } else if (dest->is_double_xmm()) { 921 assert(src->is_double_xmm(), "must match"); 922 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 923 924 #ifndef _LP64 925 // move between fpu-registers (no instruction necessary because of fpu-stack) 926 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 927 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 928 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 929 #endif // !_LP64 930 931 } else { 932 ShouldNotReachHere(); 933 } 934 } 935 936 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 937 assert(src->is_register(), "should not call otherwise"); 938 assert(dest->is_stack(), "should not call otherwise"); 939 940 if (src->is_single_cpu()) { 941 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 942 if (is_reference_type(type)) { 943 __ verify_oop(src->as_register()); 944 __ movptr (dst, src->as_register()); 945 } else if (type == T_METADATA || type == T_ADDRESS) { 946 __ movptr (dst, src->as_register()); 947 } else { 948 __ movl (dst, src->as_register()); 949 } 950 951 } else if (src->is_double_cpu()) { 952 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 953 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 954 __ movptr (dstLO, src->as_register_lo()); 955 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 956 957 } else if (src->is_single_xmm()) { 958 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 959 __ movflt(dst_addr, src->as_xmm_float_reg()); 960 961 } else if (src->is_double_xmm()) { 962 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 963 __ movdbl(dst_addr, src->as_xmm_double_reg()); 964 965 #ifndef _LP64 966 } else if (src->is_single_fpu()) { 967 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 968 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 969 if (pop_fpu_stack) __ fstp_s (dst_addr); 970 else __ fst_s (dst_addr); 971 972 } else if (src->is_double_fpu()) { 973 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 974 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 975 if (pop_fpu_stack) __ fstp_d (dst_addr); 976 else __ fst_d (dst_addr); 977 #endif // !_LP64 978 979 } else { 980 ShouldNotReachHere(); 981 } 982 } 983 984 985 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) { 986 LIR_Address* to_addr = dest->as_address_ptr(); 987 PatchingStub* patch = nullptr; 988 Register compressed_src = rscratch1; 989 990 if (is_reference_type(type)) { 991 __ verify_oop(src->as_register()); 992 #ifdef _LP64 993 if (UseCompressedOops && !wide) { 994 __ movptr(compressed_src, src->as_register()); 995 __ encode_heap_oop(compressed_src); 996 if (patch_code != lir_patch_none) { 997 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); 998 } 999 } 1000 #endif 1001 } 1002 1003 if (patch_code != lir_patch_none) { 1004 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1005 Address toa = as_Address(to_addr); 1006 assert(toa.disp() != 0, "must have"); 1007 } 1008 1009 int null_check_here = code_offset(); 1010 switch (type) { 1011 case T_FLOAT: { 1012 #ifdef _LP64 1013 assert(src->is_single_xmm(), "not a float"); 1014 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1015 #else 1016 if (src->is_single_xmm()) { 1017 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1018 } else { 1019 assert(src->is_single_fpu(), "must be"); 1020 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1021 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1022 else __ fst_s (as_Address(to_addr)); 1023 } 1024 #endif // _LP64 1025 break; 1026 } 1027 1028 case T_DOUBLE: { 1029 #ifdef _LP64 1030 assert(src->is_double_xmm(), "not a double"); 1031 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1032 #else 1033 if (src->is_double_xmm()) { 1034 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1035 } else { 1036 assert(src->is_double_fpu(), "must be"); 1037 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1038 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1039 else __ fst_d (as_Address(to_addr)); 1040 } 1041 #endif // _LP64 1042 break; 1043 } 1044 1045 case T_ARRAY: // fall through 1046 case T_OBJECT: // fall through 1047 if (UseCompressedOops && !wide) { 1048 __ movl(as_Address(to_addr), compressed_src); 1049 } else { 1050 __ movptr(as_Address(to_addr), src->as_register()); 1051 } 1052 break; 1053 case T_METADATA: 1054 // We get here to store a method pointer to the stack to pass to 1055 // a dtrace runtime call. This can't work on 64 bit with 1056 // compressed klass ptrs: T_METADATA can be a compressed klass 1057 // ptr or a 64 bit method pointer. 1058 LP64_ONLY(ShouldNotReachHere()); 1059 __ movptr(as_Address(to_addr), src->as_register()); 1060 break; 1061 case T_ADDRESS: 1062 __ movptr(as_Address(to_addr), src->as_register()); 1063 break; 1064 case T_INT: 1065 __ movl(as_Address(to_addr), src->as_register()); 1066 break; 1067 1068 case T_LONG: { 1069 Register from_lo = src->as_register_lo(); 1070 Register from_hi = src->as_register_hi(); 1071 #ifdef _LP64 1072 __ movptr(as_Address_lo(to_addr), from_lo); 1073 #else 1074 Register base = to_addr->base()->as_register(); 1075 Register index = noreg; 1076 if (to_addr->index()->is_register()) { 1077 index = to_addr->index()->as_register(); 1078 } 1079 if (base == from_lo || index == from_lo) { 1080 assert(base != from_hi, "can't be"); 1081 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1082 __ movl(as_Address_hi(to_addr), from_hi); 1083 if (patch != nullptr) { 1084 patching_epilog(patch, lir_patch_high, base, info); 1085 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1086 patch_code = lir_patch_low; 1087 } 1088 __ movl(as_Address_lo(to_addr), from_lo); 1089 } else { 1090 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1091 __ movl(as_Address_lo(to_addr), from_lo); 1092 if (patch != nullptr) { 1093 patching_epilog(patch, lir_patch_low, base, info); 1094 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1095 patch_code = lir_patch_high; 1096 } 1097 __ movl(as_Address_hi(to_addr), from_hi); 1098 } 1099 #endif // _LP64 1100 break; 1101 } 1102 1103 case T_BYTE: // fall through 1104 case T_BOOLEAN: { 1105 Register src_reg = src->as_register(); 1106 Address dst_addr = as_Address(to_addr); 1107 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1108 __ movb(dst_addr, src_reg); 1109 break; 1110 } 1111 1112 case T_CHAR: // fall through 1113 case T_SHORT: 1114 __ movw(as_Address(to_addr), src->as_register()); 1115 break; 1116 1117 default: 1118 ShouldNotReachHere(); 1119 } 1120 if (info != nullptr) { 1121 add_debug_info_for_null_check(null_check_here, info); 1122 } 1123 1124 if (patch_code != lir_patch_none) { 1125 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1126 } 1127 } 1128 1129 1130 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1131 assert(src->is_stack(), "should not call otherwise"); 1132 assert(dest->is_register(), "should not call otherwise"); 1133 1134 if (dest->is_single_cpu()) { 1135 if (is_reference_type(type)) { 1136 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1137 __ verify_oop(dest->as_register()); 1138 } else if (type == T_METADATA || type == T_ADDRESS) { 1139 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1140 } else { 1141 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1142 } 1143 1144 } else if (dest->is_double_cpu()) { 1145 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1146 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1147 __ movptr(dest->as_register_lo(), src_addr_LO); 1148 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1149 1150 } else if (dest->is_single_xmm()) { 1151 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1152 __ movflt(dest->as_xmm_float_reg(), src_addr); 1153 1154 } else if (dest->is_double_xmm()) { 1155 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1156 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1157 1158 #ifndef _LP64 1159 } else if (dest->is_single_fpu()) { 1160 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1161 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1162 __ fld_s(src_addr); 1163 1164 } else if (dest->is_double_fpu()) { 1165 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1166 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1167 __ fld_d(src_addr); 1168 #endif // _LP64 1169 1170 } else { 1171 ShouldNotReachHere(); 1172 } 1173 } 1174 1175 1176 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1177 if (src->is_single_stack()) { 1178 if (is_reference_type(type)) { 1179 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1180 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1181 } else { 1182 #ifndef _LP64 1183 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1184 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1185 #else 1186 //no pushl on 64bits 1187 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1188 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1189 #endif 1190 } 1191 1192 } else if (src->is_double_stack()) { 1193 #ifdef _LP64 1194 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1195 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1196 #else 1197 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1198 // push and pop the part at src + wordSize, adding wordSize for the previous push 1199 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1200 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1201 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1202 #endif // _LP64 1203 1204 } else { 1205 ShouldNotReachHere(); 1206 } 1207 } 1208 1209 1210 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 1211 assert(src->is_address(), "should not call otherwise"); 1212 assert(dest->is_register(), "should not call otherwise"); 1213 1214 LIR_Address* addr = src->as_address_ptr(); 1215 Address from_addr = as_Address(addr); 1216 1217 if (addr->base()->type() == T_OBJECT) { 1218 __ verify_oop(addr->base()->as_pointer_register()); 1219 } 1220 1221 switch (type) { 1222 case T_BOOLEAN: // fall through 1223 case T_BYTE: // fall through 1224 case T_CHAR: // fall through 1225 case T_SHORT: 1226 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1227 // on pre P6 processors we may get partial register stalls 1228 // so blow away the value of to_rinfo before loading a 1229 // partial word into it. Do it here so that it precedes 1230 // the potential patch point below. 1231 __ xorptr(dest->as_register(), dest->as_register()); 1232 } 1233 break; 1234 default: 1235 break; 1236 } 1237 1238 PatchingStub* patch = nullptr; 1239 if (patch_code != lir_patch_none) { 1240 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1241 assert(from_addr.disp() != 0, "must have"); 1242 } 1243 if (info != nullptr) { 1244 add_debug_info_for_null_check_here(info); 1245 } 1246 1247 switch (type) { 1248 case T_FLOAT: { 1249 if (dest->is_single_xmm()) { 1250 __ movflt(dest->as_xmm_float_reg(), from_addr); 1251 } else { 1252 #ifndef _LP64 1253 assert(dest->is_single_fpu(), "must be"); 1254 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1255 __ fld_s(from_addr); 1256 #else 1257 ShouldNotReachHere(); 1258 #endif // !LP64 1259 } 1260 break; 1261 } 1262 1263 case T_DOUBLE: { 1264 if (dest->is_double_xmm()) { 1265 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1266 } else { 1267 #ifndef _LP64 1268 assert(dest->is_double_fpu(), "must be"); 1269 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1270 __ fld_d(from_addr); 1271 #else 1272 ShouldNotReachHere(); 1273 #endif // !LP64 1274 } 1275 break; 1276 } 1277 1278 case T_OBJECT: // fall through 1279 case T_ARRAY: // fall through 1280 if (UseCompressedOops && !wide) { 1281 __ movl(dest->as_register(), from_addr); 1282 } else { 1283 __ movptr(dest->as_register(), from_addr); 1284 } 1285 break; 1286 1287 case T_ADDRESS: 1288 __ movptr(dest->as_register(), from_addr); 1289 break; 1290 case T_INT: 1291 __ movl(dest->as_register(), from_addr); 1292 break; 1293 1294 case T_LONG: { 1295 Register to_lo = dest->as_register_lo(); 1296 Register to_hi = dest->as_register_hi(); 1297 #ifdef _LP64 1298 __ movptr(to_lo, as_Address_lo(addr)); 1299 #else 1300 Register base = addr->base()->as_register(); 1301 Register index = noreg; 1302 if (addr->index()->is_register()) { 1303 index = addr->index()->as_register(); 1304 } 1305 if ((base == to_lo && index == to_hi) || 1306 (base == to_hi && index == to_lo)) { 1307 // addresses with 2 registers are only formed as a result of 1308 // array access so this code will never have to deal with 1309 // patches or null checks. 1310 assert(info == nullptr && patch == nullptr, "must be"); 1311 __ lea(to_hi, as_Address(addr)); 1312 __ movl(to_lo, Address(to_hi, 0)); 1313 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1314 } else if (base == to_lo || index == to_lo) { 1315 assert(base != to_hi, "can't be"); 1316 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1317 __ movl(to_hi, as_Address_hi(addr)); 1318 if (patch != nullptr) { 1319 patching_epilog(patch, lir_patch_high, base, info); 1320 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1321 patch_code = lir_patch_low; 1322 } 1323 __ movl(to_lo, as_Address_lo(addr)); 1324 } else { 1325 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1326 __ movl(to_lo, as_Address_lo(addr)); 1327 if (patch != nullptr) { 1328 patching_epilog(patch, lir_patch_low, base, info); 1329 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1330 patch_code = lir_patch_high; 1331 } 1332 __ movl(to_hi, as_Address_hi(addr)); 1333 } 1334 #endif // _LP64 1335 break; 1336 } 1337 1338 case T_BOOLEAN: // fall through 1339 case T_BYTE: { 1340 Register dest_reg = dest->as_register(); 1341 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1342 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1343 __ movsbl(dest_reg, from_addr); 1344 } else { 1345 __ movb(dest_reg, from_addr); 1346 __ shll(dest_reg, 24); 1347 __ sarl(dest_reg, 24); 1348 } 1349 break; 1350 } 1351 1352 case T_CHAR: { 1353 Register dest_reg = dest->as_register(); 1354 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1355 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1356 __ movzwl(dest_reg, from_addr); 1357 } else { 1358 __ movw(dest_reg, from_addr); 1359 } 1360 break; 1361 } 1362 1363 case T_SHORT: { 1364 Register dest_reg = dest->as_register(); 1365 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1366 __ movswl(dest_reg, from_addr); 1367 } else { 1368 __ movw(dest_reg, from_addr); 1369 __ shll(dest_reg, 16); 1370 __ sarl(dest_reg, 16); 1371 } 1372 break; 1373 } 1374 1375 default: 1376 ShouldNotReachHere(); 1377 } 1378 1379 if (patch != nullptr) { 1380 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1381 } 1382 1383 if (is_reference_type(type)) { 1384 #ifdef _LP64 1385 if (UseCompressedOops && !wide) { 1386 __ decode_heap_oop(dest->as_register()); 1387 } 1388 #endif 1389 1390 if (!(UseZGC && !ZGenerational)) { 1391 // Load barrier has not yet been applied, so ZGC can't verify the oop here 1392 __ verify_oop(dest->as_register()); 1393 } 1394 } 1395 } 1396 1397 1398 NEEDS_CLEANUP; // This could be static? 1399 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1400 int elem_size = type2aelembytes(type); 1401 switch (elem_size) { 1402 case 1: return Address::times_1; 1403 case 2: return Address::times_2; 1404 case 4: return Address::times_4; 1405 case 8: return Address::times_8; 1406 } 1407 ShouldNotReachHere(); 1408 return Address::no_scale; 1409 } 1410 1411 1412 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1413 switch (op->code()) { 1414 case lir_idiv: 1415 case lir_irem: 1416 arithmetic_idiv(op->code(), 1417 op->in_opr1(), 1418 op->in_opr2(), 1419 op->in_opr3(), 1420 op->result_opr(), 1421 op->info()); 1422 break; 1423 case lir_fmad: 1424 __ fmad(op->result_opr()->as_xmm_double_reg(), 1425 op->in_opr1()->as_xmm_double_reg(), 1426 op->in_opr2()->as_xmm_double_reg(), 1427 op->in_opr3()->as_xmm_double_reg()); 1428 break; 1429 case lir_fmaf: 1430 __ fmaf(op->result_opr()->as_xmm_float_reg(), 1431 op->in_opr1()->as_xmm_float_reg(), 1432 op->in_opr2()->as_xmm_float_reg(), 1433 op->in_opr3()->as_xmm_float_reg()); 1434 break; 1435 default: ShouldNotReachHere(); break; 1436 } 1437 } 1438 1439 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1440 #ifdef ASSERT 1441 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 1442 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 1443 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 1444 #endif 1445 1446 if (op->cond() == lir_cond_always) { 1447 if (op->info() != nullptr) add_debug_info_for_branch(op->info()); 1448 __ jmp (*(op->label())); 1449 } else { 1450 Assembler::Condition acond = Assembler::zero; 1451 if (op->code() == lir_cond_float_branch) { 1452 assert(op->ublock() != nullptr, "must have unordered successor"); 1453 __ jcc(Assembler::parity, *(op->ublock()->label())); 1454 switch(op->cond()) { 1455 case lir_cond_equal: acond = Assembler::equal; break; 1456 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1457 case lir_cond_less: acond = Assembler::below; break; 1458 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1459 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1460 case lir_cond_greater: acond = Assembler::above; break; 1461 default: ShouldNotReachHere(); 1462 } 1463 } else { 1464 switch (op->cond()) { 1465 case lir_cond_equal: acond = Assembler::equal; break; 1466 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1467 case lir_cond_less: acond = Assembler::less; break; 1468 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1469 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1470 case lir_cond_greater: acond = Assembler::greater; break; 1471 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1472 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1473 default: ShouldNotReachHere(); 1474 } 1475 } 1476 __ jcc(acond,*(op->label())); 1477 } 1478 } 1479 1480 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1481 LIR_Opr src = op->in_opr(); 1482 LIR_Opr dest = op->result_opr(); 1483 1484 switch (op->bytecode()) { 1485 case Bytecodes::_i2l: 1486 #ifdef _LP64 1487 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1488 #else 1489 move_regs(src->as_register(), dest->as_register_lo()); 1490 move_regs(src->as_register(), dest->as_register_hi()); 1491 __ sarl(dest->as_register_hi(), 31); 1492 #endif // LP64 1493 break; 1494 1495 case Bytecodes::_l2i: 1496 #ifdef _LP64 1497 __ movl(dest->as_register(), src->as_register_lo()); 1498 #else 1499 move_regs(src->as_register_lo(), dest->as_register()); 1500 #endif 1501 break; 1502 1503 case Bytecodes::_i2b: 1504 move_regs(src->as_register(), dest->as_register()); 1505 __ sign_extend_byte(dest->as_register()); 1506 break; 1507 1508 case Bytecodes::_i2c: 1509 move_regs(src->as_register(), dest->as_register()); 1510 __ andl(dest->as_register(), 0xFFFF); 1511 break; 1512 1513 case Bytecodes::_i2s: 1514 move_regs(src->as_register(), dest->as_register()); 1515 __ sign_extend_short(dest->as_register()); 1516 break; 1517 1518 1519 #ifdef _LP64 1520 case Bytecodes::_f2d: 1521 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1522 break; 1523 1524 case Bytecodes::_d2f: 1525 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1526 break; 1527 1528 case Bytecodes::_i2f: 1529 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1530 break; 1531 1532 case Bytecodes::_i2d: 1533 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1534 break; 1535 1536 case Bytecodes::_l2f: 1537 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo()); 1538 break; 1539 1540 case Bytecodes::_l2d: 1541 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo()); 1542 break; 1543 1544 case Bytecodes::_f2i: 1545 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg()); 1546 break; 1547 1548 case Bytecodes::_d2i: 1549 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg()); 1550 break; 1551 1552 case Bytecodes::_f2l: 1553 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg()); 1554 break; 1555 1556 case Bytecodes::_d2l: 1557 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg()); 1558 break; 1559 #else 1560 case Bytecodes::_f2d: 1561 case Bytecodes::_d2f: 1562 if (dest->is_single_xmm()) { 1563 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1564 } else if (dest->is_double_xmm()) { 1565 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1566 } else { 1567 assert(src->fpu() == dest->fpu(), "register must be equal"); 1568 // do nothing (float result is rounded later through spilling) 1569 } 1570 break; 1571 1572 case Bytecodes::_i2f: 1573 case Bytecodes::_i2d: 1574 if (dest->is_single_xmm()) { 1575 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1576 } else if (dest->is_double_xmm()) { 1577 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1578 } else { 1579 assert(dest->fpu() == 0, "result must be on TOS"); 1580 __ movl(Address(rsp, 0), src->as_register()); 1581 __ fild_s(Address(rsp, 0)); 1582 } 1583 break; 1584 1585 case Bytecodes::_l2f: 1586 case Bytecodes::_l2d: 1587 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1588 assert(dest->fpu() == 0, "result must be on TOS"); 1589 __ movptr(Address(rsp, 0), src->as_register_lo()); 1590 __ movl(Address(rsp, BytesPerWord), src->as_register_hi()); 1591 __ fild_d(Address(rsp, 0)); 1592 // float result is rounded later through spilling 1593 break; 1594 1595 case Bytecodes::_f2i: 1596 case Bytecodes::_d2i: 1597 if (src->is_single_xmm()) { 1598 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1599 } else if (src->is_double_xmm()) { 1600 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1601 } else { 1602 assert(src->fpu() == 0, "input must be on TOS"); 1603 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc())); 1604 __ fist_s(Address(rsp, 0)); 1605 __ movl(dest->as_register(), Address(rsp, 0)); 1606 __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std())); 1607 } 1608 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1609 assert(op->stub() != nullptr, "stub required"); 1610 __ cmpl(dest->as_register(), 0x80000000); 1611 __ jcc(Assembler::equal, *op->stub()->entry()); 1612 __ bind(*op->stub()->continuation()); 1613 break; 1614 1615 case Bytecodes::_f2l: 1616 case Bytecodes::_d2l: 1617 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1618 assert(src->fpu() == 0, "input must be on TOS"); 1619 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1620 1621 // instruction sequence too long to inline it here 1622 { 1623 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); 1624 } 1625 break; 1626 #endif // _LP64 1627 1628 default: ShouldNotReachHere(); 1629 } 1630 } 1631 1632 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1633 if (op->init_check()) { 1634 add_debug_info_for_null_check_here(op->stub()->info()); 1635 __ cmpb(Address(op->klass()->as_register(), 1636 InstanceKlass::init_state_offset()), 1637 InstanceKlass::fully_initialized); 1638 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1639 } 1640 __ allocate_object(op->obj()->as_register(), 1641 op->tmp1()->as_register(), 1642 op->tmp2()->as_register(), 1643 op->header_size(), 1644 op->object_size(), 1645 op->klass()->as_register(), 1646 *op->stub()->entry()); 1647 __ bind(*op->stub()->continuation()); 1648 } 1649 1650 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1651 Register len = op->len()->as_register(); 1652 LP64_ONLY( __ movslq(len, len); ) 1653 1654 if (UseSlowPath || op->is_null_free() || 1655 (!UseFastNewObjectArray && is_reference_type(op->type())) || 1656 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 1657 __ jmp(*op->stub()->entry()); 1658 } else { 1659 Register tmp1 = op->tmp1()->as_register(); 1660 Register tmp2 = op->tmp2()->as_register(); 1661 Register tmp3 = op->tmp3()->as_register(); 1662 if (len == tmp1) { 1663 tmp1 = tmp3; 1664 } else if (len == tmp2) { 1665 tmp2 = tmp3; 1666 } else if (len == tmp3) { 1667 // everything is ok 1668 } else { 1669 __ mov(tmp3, len); 1670 } 1671 __ allocate_array(op->obj()->as_register(), 1672 len, 1673 tmp1, 1674 tmp2, 1675 arrayOopDesc::header_size(op->type()), 1676 array_element_size(op->type()), 1677 op->klass()->as_register(), 1678 *op->stub()->entry()); 1679 } 1680 __ bind(*op->stub()->continuation()); 1681 } 1682 1683 void LIR_Assembler::type_profile_helper(Register mdo, 1684 ciMethodData *md, ciProfileData *data, 1685 Register recv, Label* update_done) { 1686 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1687 Label next_test; 1688 // See if the receiver is receiver[n]. 1689 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1690 __ jccb(Assembler::notEqual, next_test); 1691 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1692 __ addptr(data_addr, DataLayout::counter_increment); 1693 __ jmp(*update_done); 1694 __ bind(next_test); 1695 } 1696 1697 // Didn't find receiver; find next empty slot and fill it in 1698 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1699 Label next_test; 1700 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1701 __ cmpptr(recv_addr, NULL_WORD); 1702 __ jccb(Assembler::notEqual, next_test); 1703 __ movptr(recv_addr, recv); 1704 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1705 __ jmp(*update_done); 1706 __ bind(next_test); 1707 } 1708 } 1709 1710 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1711 // we always need a stub for the failure case. 1712 CodeStub* stub = op->stub(); 1713 Register obj = op->object()->as_register(); 1714 Register k_RInfo = op->tmp1()->as_register(); 1715 Register klass_RInfo = op->tmp2()->as_register(); 1716 Register dst = op->result_opr()->as_register(); 1717 ciKlass* k = op->klass(); 1718 Register Rtmp1 = noreg; 1719 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1720 1721 // check if it needs to be profiled 1722 ciMethodData* md = nullptr; 1723 ciProfileData* data = nullptr; 1724 1725 if (op->should_profile()) { 1726 ciMethod* method = op->profiled_method(); 1727 assert(method != nullptr, "Should have method"); 1728 int bci = op->profiled_bci(); 1729 md = method->method_data_or_null(); 1730 assert(md != nullptr, "Sanity"); 1731 data = md->bci_to_data(bci); 1732 assert(data != nullptr, "need data for type check"); 1733 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1734 } 1735 Label* success_target = success; 1736 Label* failure_target = failure; 1737 1738 if (obj == k_RInfo) { 1739 k_RInfo = dst; 1740 } else if (obj == klass_RInfo) { 1741 klass_RInfo = dst; 1742 } 1743 if (k->is_loaded() && !UseCompressedClassPointers) { 1744 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1745 } else { 1746 Rtmp1 = op->tmp3()->as_register(); 1747 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1748 } 1749 1750 assert_different_registers(obj, k_RInfo, klass_RInfo); 1751 1752 if (op->need_null_check()) { 1753 __ testptr(obj, obj); 1754 if (op->should_profile()) { 1755 Label not_null; 1756 Register mdo = klass_RInfo; 1757 __ mov_metadata(mdo, md->constant_encoding()); 1758 __ jccb(Assembler::notEqual, not_null); 1759 // Object is null; update MDO and exit 1760 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1761 int header_bits = BitData::null_seen_byte_constant(); 1762 __ orb(data_addr, header_bits); 1763 __ jmp(*obj_is_null); 1764 __ bind(not_null); 1765 1766 Label update_done; 1767 Register recv = k_RInfo; 1768 __ load_klass(recv, obj, tmp_load_klass); 1769 type_profile_helper(mdo, md, data, recv, &update_done); 1770 1771 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1772 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment); 1773 1774 __ bind(update_done); 1775 } else { 1776 __ jcc(Assembler::equal, *obj_is_null); 1777 } 1778 } 1779 1780 if (!k->is_loaded()) { 1781 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1782 } else { 1783 #ifdef _LP64 1784 __ mov_metadata(k_RInfo, k->constant_encoding()); 1785 #endif // _LP64 1786 } 1787 __ verify_oop(obj); 1788 1789 if (op->fast_check()) { 1790 // get object class 1791 // not a safepoint as obj null check happens earlier 1792 #ifdef _LP64 1793 if (UseCompressedClassPointers) { 1794 __ load_klass(Rtmp1, obj, tmp_load_klass); 1795 __ cmpptr(k_RInfo, Rtmp1); 1796 } else { 1797 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1798 } 1799 #else 1800 if (k->is_loaded()) { 1801 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1802 } else { 1803 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1804 } 1805 #endif 1806 __ jcc(Assembler::notEqual, *failure_target); 1807 // successful cast, fall through to profile or jump 1808 } else { 1809 // get object class 1810 // not a safepoint as obj null check happens earlier 1811 __ load_klass(klass_RInfo, obj, tmp_load_klass); 1812 if (k->is_loaded()) { 1813 // See if we get an immediate positive hit 1814 #ifdef _LP64 1815 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1816 #else 1817 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1818 #endif // _LP64 1819 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1820 __ jcc(Assembler::notEqual, *failure_target); 1821 // successful cast, fall through to profile or jump 1822 } else { 1823 // See if we get an immediate positive hit 1824 __ jcc(Assembler::equal, *success_target); 1825 // check for self 1826 #ifdef _LP64 1827 __ cmpptr(klass_RInfo, k_RInfo); 1828 #else 1829 __ cmpklass(klass_RInfo, k->constant_encoding()); 1830 #endif // _LP64 1831 __ jcc(Assembler::equal, *success_target); 1832 1833 __ push(klass_RInfo); 1834 #ifdef _LP64 1835 __ push(k_RInfo); 1836 #else 1837 __ pushklass(k->constant_encoding(), noreg); 1838 #endif // _LP64 1839 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1840 __ pop(klass_RInfo); 1841 __ pop(klass_RInfo); 1842 // result is a boolean 1843 __ testl(klass_RInfo, klass_RInfo); 1844 __ jcc(Assembler::equal, *failure_target); 1845 // successful cast, fall through to profile or jump 1846 } 1847 } else { 1848 // perform the fast part of the checking logic 1849 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1850 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1851 __ push(klass_RInfo); 1852 __ push(k_RInfo); 1853 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1854 __ pop(klass_RInfo); 1855 __ pop(k_RInfo); 1856 // result is a boolean 1857 __ testl(k_RInfo, k_RInfo); 1858 __ jcc(Assembler::equal, *failure_target); 1859 // successful cast, fall through to profile or jump 1860 } 1861 } 1862 __ jmp(*success); 1863 } 1864 1865 1866 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1867 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1868 LIR_Code code = op->code(); 1869 if (code == lir_store_check) { 1870 Register value = op->object()->as_register(); 1871 Register array = op->array()->as_register(); 1872 Register k_RInfo = op->tmp1()->as_register(); 1873 Register klass_RInfo = op->tmp2()->as_register(); 1874 Register Rtmp1 = op->tmp3()->as_register(); 1875 1876 CodeStub* stub = op->stub(); 1877 1878 // check if it needs to be profiled 1879 ciMethodData* md = nullptr; 1880 ciProfileData* data = nullptr; 1881 1882 if (op->should_profile()) { 1883 ciMethod* method = op->profiled_method(); 1884 assert(method != nullptr, "Should have method"); 1885 int bci = op->profiled_bci(); 1886 md = method->method_data_or_null(); 1887 assert(md != nullptr, "Sanity"); 1888 data = md->bci_to_data(bci); 1889 assert(data != nullptr, "need data for type check"); 1890 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1891 } 1892 Label done; 1893 Label* success_target = &done; 1894 Label* failure_target = stub->entry(); 1895 1896 __ testptr(value, value); 1897 if (op->should_profile()) { 1898 Label not_null; 1899 Register mdo = klass_RInfo; 1900 __ mov_metadata(mdo, md->constant_encoding()); 1901 __ jccb(Assembler::notEqual, not_null); 1902 // Object is null; update MDO and exit 1903 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1904 int header_bits = BitData::null_seen_byte_constant(); 1905 __ orb(data_addr, header_bits); 1906 __ jmp(done); 1907 __ bind(not_null); 1908 1909 Label update_done; 1910 Register recv = k_RInfo; 1911 __ load_klass(recv, value, tmp_load_klass); 1912 type_profile_helper(mdo, md, data, recv, &update_done); 1913 1914 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1915 __ addptr(counter_addr, DataLayout::counter_increment); 1916 __ bind(update_done); 1917 } else { 1918 __ jcc(Assembler::equal, done); 1919 } 1920 1921 add_debug_info_for_null_check_here(op->info_for_exception()); 1922 __ load_klass(k_RInfo, array, tmp_load_klass); 1923 __ load_klass(klass_RInfo, value, tmp_load_klass); 1924 1925 // get instance klass (it's already uncompressed) 1926 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1927 // perform the fast part of the checking logic 1928 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1929 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1930 __ push(klass_RInfo); 1931 __ push(k_RInfo); 1932 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1933 __ pop(klass_RInfo); 1934 __ pop(k_RInfo); 1935 // result is a boolean 1936 __ testl(k_RInfo, k_RInfo); 1937 __ jcc(Assembler::equal, *failure_target); 1938 // fall through to the success case 1939 1940 __ bind(done); 1941 } else 1942 if (code == lir_checkcast) { 1943 Register obj = op->object()->as_register(); 1944 Register dst = op->result_opr()->as_register(); 1945 Label success; 1946 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1947 __ bind(success); 1948 if (dst != obj) { 1949 __ mov(dst, obj); 1950 } 1951 } else 1952 if (code == lir_instanceof) { 1953 Register obj = op->object()->as_register(); 1954 Register dst = op->result_opr()->as_register(); 1955 Label success, failure, done; 1956 emit_typecheck_helper(op, &success, &failure, &failure); 1957 __ bind(failure); 1958 __ xorptr(dst, dst); 1959 __ jmpb(done); 1960 __ bind(success); 1961 __ movptr(dst, 1); 1962 __ bind(done); 1963 } else { 1964 ShouldNotReachHere(); 1965 } 1966 1967 } 1968 1969 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) { 1970 // We are loading/storing from/to an array that *may* be a flat array (the 1971 // declared type is Object[], abstract[], interface[] or VT.ref[]). 1972 // If this array is a flat array, take the slow path. 1973 Register klass = op->tmp()->as_register(); 1974 if (UseArrayMarkWordCheck) { 1975 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1976 } else { 1977 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1978 __ load_klass(klass, op->array()->as_register(), tmp_load_klass); 1979 __ movl(klass, Address(klass, Klass::layout_helper_offset())); 1980 __ testl(klass, Klass::_lh_array_tag_flat_value_bit_inplace); 1981 __ jcc(Assembler::notZero, *op->stub()->entry()); 1982 } 1983 if (!op->value()->is_illegal()) { 1984 // The array is not a flat array, but it might be null-free. If we are storing 1985 // a null into a null-free array, take the slow path (which will throw NPE). 1986 Label skip; 1987 __ cmpptr(op->value()->as_register(), NULL_WORD); 1988 __ jcc(Assembler::notEqual, skip); 1989 if (UseArrayMarkWordCheck) { 1990 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry()); 1991 } else { 1992 __ testl(klass, Klass::_lh_null_free_array_bit_inplace); 1993 __ jcc(Assembler::notZero, *op->stub()->entry()); 1994 } 1995 __ bind(skip); 1996 } 1997 } 1998 1999 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) { 2000 // We are storing into an array that *may* be null-free (the declared type is 2001 // Object[], abstract[], interface[] or VT.ref[]). 2002 if (UseArrayMarkWordCheck) { 2003 Label test_mark_word; 2004 Register tmp = op->tmp()->as_register(); 2005 __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes())); 2006 __ testl(tmp, markWord::unlocked_value); 2007 __ jccb(Assembler::notZero, test_mark_word); 2008 __ load_prototype_header(tmp, op->array()->as_register(), rscratch1); 2009 __ bind(test_mark_word); 2010 __ testl(tmp, markWord::null_free_array_bit_in_place); 2011 } else { 2012 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 2013 Register klass = op->tmp()->as_register(); 2014 __ load_klass(klass, op->array()->as_register(), tmp_load_klass); 2015 __ movl(klass, Address(klass, Klass::layout_helper_offset())); 2016 __ testl(klass, Klass::_lh_null_free_array_bit_inplace); 2017 } 2018 } 2019 2020 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) { 2021 Label L_oops_equal; 2022 Label L_oops_not_equal; 2023 Label L_end; 2024 2025 Register left = op->left()->as_register(); 2026 Register right = op->right()->as_register(); 2027 2028 __ cmpptr(left, right); 2029 __ jcc(Assembler::equal, L_oops_equal); 2030 2031 // (1) Null check -- if one of the operands is null, the other must not be null (because 2032 // the two references are not equal), so they are not substitutable, 2033 // FIXME: do null check only if the operand is nullable 2034 __ testptr(left, right); 2035 __ jcc(Assembler::zero, L_oops_not_equal); 2036 2037 ciKlass* left_klass = op->left_klass(); 2038 ciKlass* right_klass = op->right_klass(); 2039 2040 // (2) Inline type check -- if either of the operands is not a inline type, 2041 // they are not substitutable. We do this only if we are not sure that the 2042 // operands are inline type 2043 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node. 2044 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) { 2045 Register tmp1 = op->tmp1()->as_register(); 2046 __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2047 __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes())); 2048 __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes())); 2049 __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern); 2050 __ jcc(Assembler::notEqual, L_oops_not_equal); 2051 } 2052 2053 // (3) Same klass check: if the operands are of different klasses, they are not substitutable. 2054 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) { 2055 // No need to load klass -- the operands are statically known to be the same inline klass. 2056 __ jmp(*op->stub()->entry()); 2057 } else { 2058 Register left_klass_op = op->left_klass_op()->as_register(); 2059 Register right_klass_op = op->right_klass_op()->as_register(); 2060 2061 if (UseCompressedClassPointers) { 2062 __ movl(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2063 __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2064 __ cmpl(left_klass_op, right_klass_op); 2065 } else { 2066 __ movptr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes())); 2067 __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes())); 2068 __ cmpptr(left_klass_op, right_klass_op); 2069 } 2070 2071 __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check 2072 // fall through to L_oops_not_equal 2073 } 2074 2075 __ bind(L_oops_not_equal); 2076 move(op->not_equal_result(), op->result_opr()); 2077 __ jmp(L_end); 2078 2079 __ bind(L_oops_equal); 2080 move(op->equal_result(), op->result_opr()); 2081 __ jmp(L_end); 2082 2083 // We've returned from the stub. RAX contains 0x0 IFF the two 2084 // operands are not substitutable. (Don't compare against 0x1 in case the 2085 // C compiler is naughty) 2086 __ bind(*op->stub()->continuation()); 2087 __ cmpl(rax, 0); 2088 __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal 2089 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal 2090 // fall-through 2091 __ bind(L_end); 2092 } 2093 2094 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2095 if (LP64_ONLY(false &&) op->code() == lir_cas_long) { 2096 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 2097 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 2098 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 2099 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 2100 Register addr = op->addr()->as_register(); 2101 __ lock(); 2102 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 2103 2104 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 2105 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 2106 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2107 Register newval = op->new_value()->as_register(); 2108 Register cmpval = op->cmp_value()->as_register(); 2109 assert(cmpval == rax, "wrong register"); 2110 assert(newval != noreg, "new val must be register"); 2111 assert(cmpval != newval, "cmp and new values must be in different registers"); 2112 assert(cmpval != addr, "cmp and addr must be in different registers"); 2113 assert(newval != addr, "new value and addr must be in different registers"); 2114 2115 if ( op->code() == lir_cas_obj) { 2116 #ifdef _LP64 2117 if (UseCompressedOops) { 2118 __ encode_heap_oop(cmpval); 2119 __ mov(rscratch1, newval); 2120 __ encode_heap_oop(rscratch1); 2121 __ lock(); 2122 // cmpval (rax) is implicitly used by this instruction 2123 __ cmpxchgl(rscratch1, Address(addr, 0)); 2124 } else 2125 #endif 2126 { 2127 __ lock(); 2128 __ cmpxchgptr(newval, Address(addr, 0)); 2129 } 2130 } else { 2131 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2132 __ lock(); 2133 __ cmpxchgl(newval, Address(addr, 0)); 2134 } 2135 #ifdef _LP64 2136 } else if (op->code() == lir_cas_long) { 2137 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2138 Register newval = op->new_value()->as_register_lo(); 2139 Register cmpval = op->cmp_value()->as_register_lo(); 2140 assert(cmpval == rax, "wrong register"); 2141 assert(newval != noreg, "new val must be register"); 2142 assert(cmpval != newval, "cmp and new values must be in different registers"); 2143 assert(cmpval != addr, "cmp and addr must be in different registers"); 2144 assert(newval != addr, "new value and addr must be in different registers"); 2145 __ lock(); 2146 __ cmpxchgq(newval, Address(addr, 0)); 2147 #endif // _LP64 2148 } else { 2149 Unimplemented(); 2150 } 2151 } 2152 2153 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) { 2154 assert(dst->is_cpu_register(), "must be"); 2155 assert(dst->type() == src->type(), "must be"); 2156 2157 if (src->is_cpu_register()) { 2158 reg2reg(src, dst); 2159 } else if (src->is_stack()) { 2160 stack2reg(src, dst, dst->type()); 2161 } else if (src->is_constant()) { 2162 const2reg(src, dst, lir_patch_none, nullptr); 2163 } else { 2164 ShouldNotReachHere(); 2165 } 2166 } 2167 2168 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 2169 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 2170 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86"); 2171 2172 Assembler::Condition acond, ncond; 2173 switch (condition) { 2174 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2175 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2176 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2177 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2178 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2179 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2180 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2181 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2182 default: acond = Assembler::equal; ncond = Assembler::notEqual; 2183 ShouldNotReachHere(); 2184 } 2185 2186 if (opr1->is_cpu_register()) { 2187 reg2reg(opr1, result); 2188 } else if (opr1->is_stack()) { 2189 stack2reg(opr1, result, result->type()); 2190 } else if (opr1->is_constant()) { 2191 const2reg(opr1, result, lir_patch_none, nullptr); 2192 } else { 2193 ShouldNotReachHere(); 2194 } 2195 2196 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2197 // optimized version that does not require a branch 2198 if (opr2->is_single_cpu()) { 2199 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2200 __ cmov(ncond, result->as_register(), opr2->as_register()); 2201 } else if (opr2->is_double_cpu()) { 2202 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2203 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2204 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2205 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2206 } else if (opr2->is_single_stack()) { 2207 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2208 } else if (opr2->is_double_stack()) { 2209 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2210 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2211 } else { 2212 ShouldNotReachHere(); 2213 } 2214 2215 } else { 2216 Label skip; 2217 __ jccb(acond, skip); 2218 if (opr2->is_cpu_register()) { 2219 reg2reg(opr2, result); 2220 } else if (opr2->is_stack()) { 2221 stack2reg(opr2, result, result->type()); 2222 } else if (opr2->is_constant()) { 2223 const2reg(opr2, result, lir_patch_none, nullptr); 2224 } else { 2225 ShouldNotReachHere(); 2226 } 2227 __ bind(skip); 2228 } 2229 } 2230 2231 2232 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2233 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2234 2235 if (left->is_single_cpu()) { 2236 assert(left == dest, "left and dest must be equal"); 2237 Register lreg = left->as_register(); 2238 2239 if (right->is_single_cpu()) { 2240 // cpu register - cpu register 2241 Register rreg = right->as_register(); 2242 switch (code) { 2243 case lir_add: __ addl (lreg, rreg); break; 2244 case lir_sub: __ subl (lreg, rreg); break; 2245 case lir_mul: __ imull(lreg, rreg); break; 2246 default: ShouldNotReachHere(); 2247 } 2248 2249 } else if (right->is_stack()) { 2250 // cpu register - stack 2251 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2252 switch (code) { 2253 case lir_add: __ addl(lreg, raddr); break; 2254 case lir_sub: __ subl(lreg, raddr); break; 2255 default: ShouldNotReachHere(); 2256 } 2257 2258 } else if (right->is_constant()) { 2259 // cpu register - constant 2260 jint c = right->as_constant_ptr()->as_jint(); 2261 switch (code) { 2262 case lir_add: { 2263 __ incrementl(lreg, c); 2264 break; 2265 } 2266 case lir_sub: { 2267 __ decrementl(lreg, c); 2268 break; 2269 } 2270 default: ShouldNotReachHere(); 2271 } 2272 2273 } else { 2274 ShouldNotReachHere(); 2275 } 2276 2277 } else if (left->is_double_cpu()) { 2278 assert(left == dest, "left and dest must be equal"); 2279 Register lreg_lo = left->as_register_lo(); 2280 Register lreg_hi = left->as_register_hi(); 2281 2282 if (right->is_double_cpu()) { 2283 // cpu register - cpu register 2284 Register rreg_lo = right->as_register_lo(); 2285 Register rreg_hi = right->as_register_hi(); 2286 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2287 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2288 switch (code) { 2289 case lir_add: 2290 __ addptr(lreg_lo, rreg_lo); 2291 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2292 break; 2293 case lir_sub: 2294 __ subptr(lreg_lo, rreg_lo); 2295 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2296 break; 2297 case lir_mul: 2298 #ifdef _LP64 2299 __ imulq(lreg_lo, rreg_lo); 2300 #else 2301 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2302 __ imull(lreg_hi, rreg_lo); 2303 __ imull(rreg_hi, lreg_lo); 2304 __ addl (rreg_hi, lreg_hi); 2305 __ mull (rreg_lo); 2306 __ addl (lreg_hi, rreg_hi); 2307 #endif // _LP64 2308 break; 2309 default: 2310 ShouldNotReachHere(); 2311 } 2312 2313 } else if (right->is_constant()) { 2314 // cpu register - constant 2315 #ifdef _LP64 2316 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2317 __ movptr(r10, (intptr_t) c); 2318 switch (code) { 2319 case lir_add: 2320 __ addptr(lreg_lo, r10); 2321 break; 2322 case lir_sub: 2323 __ subptr(lreg_lo, r10); 2324 break; 2325 default: 2326 ShouldNotReachHere(); 2327 } 2328 #else 2329 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2330 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2331 switch (code) { 2332 case lir_add: 2333 __ addptr(lreg_lo, c_lo); 2334 __ adcl(lreg_hi, c_hi); 2335 break; 2336 case lir_sub: 2337 __ subptr(lreg_lo, c_lo); 2338 __ sbbl(lreg_hi, c_hi); 2339 break; 2340 default: 2341 ShouldNotReachHere(); 2342 } 2343 #endif // _LP64 2344 2345 } else { 2346 ShouldNotReachHere(); 2347 } 2348 2349 } else if (left->is_single_xmm()) { 2350 assert(left == dest, "left and dest must be equal"); 2351 XMMRegister lreg = left->as_xmm_float_reg(); 2352 2353 if (right->is_single_xmm()) { 2354 XMMRegister rreg = right->as_xmm_float_reg(); 2355 switch (code) { 2356 case lir_add: __ addss(lreg, rreg); break; 2357 case lir_sub: __ subss(lreg, rreg); break; 2358 case lir_mul: __ mulss(lreg, rreg); break; 2359 case lir_div: __ divss(lreg, rreg); break; 2360 default: ShouldNotReachHere(); 2361 } 2362 } else { 2363 Address raddr; 2364 if (right->is_single_stack()) { 2365 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2366 } else if (right->is_constant()) { 2367 // hack for now 2368 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2369 } else { 2370 ShouldNotReachHere(); 2371 } 2372 switch (code) { 2373 case lir_add: __ addss(lreg, raddr); break; 2374 case lir_sub: __ subss(lreg, raddr); break; 2375 case lir_mul: __ mulss(lreg, raddr); break; 2376 case lir_div: __ divss(lreg, raddr); break; 2377 default: ShouldNotReachHere(); 2378 } 2379 } 2380 2381 } else if (left->is_double_xmm()) { 2382 assert(left == dest, "left and dest must be equal"); 2383 2384 XMMRegister lreg = left->as_xmm_double_reg(); 2385 if (right->is_double_xmm()) { 2386 XMMRegister rreg = right->as_xmm_double_reg(); 2387 switch (code) { 2388 case lir_add: __ addsd(lreg, rreg); break; 2389 case lir_sub: __ subsd(lreg, rreg); break; 2390 case lir_mul: __ mulsd(lreg, rreg); break; 2391 case lir_div: __ divsd(lreg, rreg); break; 2392 default: ShouldNotReachHere(); 2393 } 2394 } else { 2395 Address raddr; 2396 if (right->is_double_stack()) { 2397 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2398 } else if (right->is_constant()) { 2399 // hack for now 2400 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2401 } else { 2402 ShouldNotReachHere(); 2403 } 2404 switch (code) { 2405 case lir_add: __ addsd(lreg, raddr); break; 2406 case lir_sub: __ subsd(lreg, raddr); break; 2407 case lir_mul: __ mulsd(lreg, raddr); break; 2408 case lir_div: __ divsd(lreg, raddr); break; 2409 default: ShouldNotReachHere(); 2410 } 2411 } 2412 2413 #ifndef _LP64 2414 } else if (left->is_single_fpu()) { 2415 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2416 2417 if (right->is_single_fpu()) { 2418 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2419 2420 } else { 2421 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2422 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2423 2424 Address raddr; 2425 if (right->is_single_stack()) { 2426 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2427 } else if (right->is_constant()) { 2428 address const_addr = float_constant(right->as_jfloat()); 2429 assert(const_addr != nullptr, "incorrect float/double constant maintenance"); 2430 // hack for now 2431 raddr = __ as_Address(InternalAddress(const_addr)); 2432 } else { 2433 ShouldNotReachHere(); 2434 } 2435 2436 switch (code) { 2437 case lir_add: __ fadd_s(raddr); break; 2438 case lir_sub: __ fsub_s(raddr); break; 2439 case lir_mul: __ fmul_s(raddr); break; 2440 case lir_div: __ fdiv_s(raddr); break; 2441 default: ShouldNotReachHere(); 2442 } 2443 } 2444 2445 } else if (left->is_double_fpu()) { 2446 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2447 2448 if (code == lir_mul || code == lir_div) { 2449 // Double values require special handling for strictfp mul/div on x86 2450 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias1())); 2451 __ fmulp(left->fpu_regnrLo() + 1); 2452 } 2453 2454 if (right->is_double_fpu()) { 2455 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2456 2457 } else { 2458 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2459 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2460 2461 Address raddr; 2462 if (right->is_double_stack()) { 2463 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2464 } else if (right->is_constant()) { 2465 // hack for now 2466 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2467 } else { 2468 ShouldNotReachHere(); 2469 } 2470 2471 switch (code) { 2472 case lir_add: __ fadd_d(raddr); break; 2473 case lir_sub: __ fsub_d(raddr); break; 2474 case lir_mul: __ fmul_d(raddr); break; 2475 case lir_div: __ fdiv_d(raddr); break; 2476 default: ShouldNotReachHere(); 2477 } 2478 } 2479 2480 if (code == lir_mul || code == lir_div) { 2481 // Double values require special handling for strictfp mul/div on x86 2482 __ fld_x(ExternalAddress(StubRoutines::x86::addr_fpu_subnormal_bias2())); 2483 __ fmulp(dest->fpu_regnrLo() + 1); 2484 } 2485 #endif // !_LP64 2486 2487 } else if (left->is_single_stack() || left->is_address()) { 2488 assert(left == dest, "left and dest must be equal"); 2489 2490 Address laddr; 2491 if (left->is_single_stack()) { 2492 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2493 } else if (left->is_address()) { 2494 laddr = as_Address(left->as_address_ptr()); 2495 } else { 2496 ShouldNotReachHere(); 2497 } 2498 2499 if (right->is_single_cpu()) { 2500 Register rreg = right->as_register(); 2501 switch (code) { 2502 case lir_add: __ addl(laddr, rreg); break; 2503 case lir_sub: __ subl(laddr, rreg); break; 2504 default: ShouldNotReachHere(); 2505 } 2506 } else if (right->is_constant()) { 2507 jint c = right->as_constant_ptr()->as_jint(); 2508 switch (code) { 2509 case lir_add: { 2510 __ incrementl(laddr, c); 2511 break; 2512 } 2513 case lir_sub: { 2514 __ decrementl(laddr, c); 2515 break; 2516 } 2517 default: ShouldNotReachHere(); 2518 } 2519 } else { 2520 ShouldNotReachHere(); 2521 } 2522 2523 } else { 2524 ShouldNotReachHere(); 2525 } 2526 } 2527 2528 #ifndef _LP64 2529 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2530 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2531 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2532 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2533 2534 bool left_is_tos = (left_index == 0); 2535 bool dest_is_tos = (dest_index == 0); 2536 int non_tos_index = (left_is_tos ? right_index : left_index); 2537 2538 switch (code) { 2539 case lir_add: 2540 if (pop_fpu_stack) __ faddp(non_tos_index); 2541 else if (dest_is_tos) __ fadd (non_tos_index); 2542 else __ fadda(non_tos_index); 2543 break; 2544 2545 case lir_sub: 2546 if (left_is_tos) { 2547 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2548 else if (dest_is_tos) __ fsub (non_tos_index); 2549 else __ fsubra(non_tos_index); 2550 } else { 2551 if (pop_fpu_stack) __ fsubp (non_tos_index); 2552 else if (dest_is_tos) __ fsubr (non_tos_index); 2553 else __ fsuba (non_tos_index); 2554 } 2555 break; 2556 2557 case lir_mul: 2558 if (pop_fpu_stack) __ fmulp(non_tos_index); 2559 else if (dest_is_tos) __ fmul (non_tos_index); 2560 else __ fmula(non_tos_index); 2561 break; 2562 2563 case lir_div: 2564 if (left_is_tos) { 2565 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2566 else if (dest_is_tos) __ fdiv (non_tos_index); 2567 else __ fdivra(non_tos_index); 2568 } else { 2569 if (pop_fpu_stack) __ fdivp (non_tos_index); 2570 else if (dest_is_tos) __ fdivr (non_tos_index); 2571 else __ fdiva (non_tos_index); 2572 } 2573 break; 2574 2575 case lir_rem: 2576 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2577 __ fremr(noreg); 2578 break; 2579 2580 default: 2581 ShouldNotReachHere(); 2582 } 2583 } 2584 #endif // _LP64 2585 2586 2587 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) { 2588 if (value->is_double_xmm()) { 2589 switch(code) { 2590 case lir_abs : 2591 { 2592 #ifdef _LP64 2593 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 2594 assert(tmp->is_valid(), "need temporary"); 2595 __ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2); 2596 } else 2597 #endif 2598 { 2599 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2600 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2601 } 2602 assert(!tmp->is_valid(), "do not need temporary"); 2603 __ andpd(dest->as_xmm_double_reg(), 2604 ExternalAddress((address)double_signmask_pool), 2605 rscratch1); 2606 } 2607 } 2608 break; 2609 2610 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2611 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2612 default : ShouldNotReachHere(); 2613 } 2614 2615 #ifndef _LP64 2616 } else if (value->is_double_fpu()) { 2617 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2618 switch(code) { 2619 case lir_abs : __ fabs() ; break; 2620 case lir_sqrt : __ fsqrt(); break; 2621 default : ShouldNotReachHere(); 2622 } 2623 #endif // !_LP64 2624 } else if (code == lir_f2hf) { 2625 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 2626 } else if (code == lir_hf2f) { 2627 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register()); 2628 } else { 2629 Unimplemented(); 2630 } 2631 } 2632 2633 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2634 // assert(left->destroys_register(), "check"); 2635 if (left->is_single_cpu()) { 2636 Register reg = left->as_register(); 2637 if (right->is_constant()) { 2638 int val = right->as_constant_ptr()->as_jint(); 2639 switch (code) { 2640 case lir_logic_and: __ andl (reg, val); break; 2641 case lir_logic_or: __ orl (reg, val); break; 2642 case lir_logic_xor: __ xorl (reg, val); break; 2643 default: ShouldNotReachHere(); 2644 } 2645 } else if (right->is_stack()) { 2646 // added support for stack operands 2647 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2648 switch (code) { 2649 case lir_logic_and: __ andl (reg, raddr); break; 2650 case lir_logic_or: __ orl (reg, raddr); break; 2651 case lir_logic_xor: __ xorl (reg, raddr); break; 2652 default: ShouldNotReachHere(); 2653 } 2654 } else { 2655 Register rright = right->as_register(); 2656 switch (code) { 2657 case lir_logic_and: __ andptr (reg, rright); break; 2658 case lir_logic_or : __ orptr (reg, rright); break; 2659 case lir_logic_xor: __ xorptr (reg, rright); break; 2660 default: ShouldNotReachHere(); 2661 } 2662 } 2663 move_regs(reg, dst->as_register()); 2664 } else { 2665 Register l_lo = left->as_register_lo(); 2666 Register l_hi = left->as_register_hi(); 2667 if (right->is_constant()) { 2668 #ifdef _LP64 2669 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2670 switch (code) { 2671 case lir_logic_and: 2672 __ andq(l_lo, rscratch1); 2673 break; 2674 case lir_logic_or: 2675 __ orq(l_lo, rscratch1); 2676 break; 2677 case lir_logic_xor: 2678 __ xorq(l_lo, rscratch1); 2679 break; 2680 default: ShouldNotReachHere(); 2681 } 2682 #else 2683 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2684 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2685 switch (code) { 2686 case lir_logic_and: 2687 __ andl(l_lo, r_lo); 2688 __ andl(l_hi, r_hi); 2689 break; 2690 case lir_logic_or: 2691 __ orl(l_lo, r_lo); 2692 __ orl(l_hi, r_hi); 2693 break; 2694 case lir_logic_xor: 2695 __ xorl(l_lo, r_lo); 2696 __ xorl(l_hi, r_hi); 2697 break; 2698 default: ShouldNotReachHere(); 2699 } 2700 #endif // _LP64 2701 } else { 2702 #ifdef _LP64 2703 Register r_lo; 2704 if (is_reference_type(right->type())) { 2705 r_lo = right->as_register(); 2706 } else { 2707 r_lo = right->as_register_lo(); 2708 } 2709 #else 2710 Register r_lo = right->as_register_lo(); 2711 Register r_hi = right->as_register_hi(); 2712 assert(l_lo != r_hi, "overwriting registers"); 2713 #endif 2714 switch (code) { 2715 case lir_logic_and: 2716 __ andptr(l_lo, r_lo); 2717 NOT_LP64(__ andptr(l_hi, r_hi);) 2718 break; 2719 case lir_logic_or: 2720 __ orptr(l_lo, r_lo); 2721 NOT_LP64(__ orptr(l_hi, r_hi);) 2722 break; 2723 case lir_logic_xor: 2724 __ xorptr(l_lo, r_lo); 2725 NOT_LP64(__ xorptr(l_hi, r_hi);) 2726 break; 2727 default: ShouldNotReachHere(); 2728 } 2729 } 2730 2731 Register dst_lo = dst->as_register_lo(); 2732 Register dst_hi = dst->as_register_hi(); 2733 2734 #ifdef _LP64 2735 move_regs(l_lo, dst_lo); 2736 #else 2737 if (dst_lo == l_hi) { 2738 assert(dst_hi != l_lo, "overwriting registers"); 2739 move_regs(l_hi, dst_hi); 2740 move_regs(l_lo, dst_lo); 2741 } else { 2742 assert(dst_lo != l_hi, "overwriting registers"); 2743 move_regs(l_lo, dst_lo); 2744 move_regs(l_hi, dst_hi); 2745 } 2746 #endif // _LP64 2747 } 2748 } 2749 2750 2751 // we assume that rax, and rdx can be overwritten 2752 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2753 2754 assert(left->is_single_cpu(), "left must be register"); 2755 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2756 assert(result->is_single_cpu(), "result must be register"); 2757 2758 // assert(left->destroys_register(), "check"); 2759 // assert(right->destroys_register(), "check"); 2760 2761 Register lreg = left->as_register(); 2762 Register dreg = result->as_register(); 2763 2764 if (right->is_constant()) { 2765 jint divisor = right->as_constant_ptr()->as_jint(); 2766 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2767 if (code == lir_idiv) { 2768 assert(lreg == rax, "must be rax,"); 2769 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2770 __ cdql(); // sign extend into rdx:rax 2771 if (divisor == 2) { 2772 __ subl(lreg, rdx); 2773 } else { 2774 __ andl(rdx, divisor - 1); 2775 __ addl(lreg, rdx); 2776 } 2777 __ sarl(lreg, log2i_exact(divisor)); 2778 move_regs(lreg, dreg); 2779 } else if (code == lir_irem) { 2780 Label done; 2781 __ mov(dreg, lreg); 2782 __ andl(dreg, 0x80000000 | (divisor - 1)); 2783 __ jcc(Assembler::positive, done); 2784 __ decrement(dreg); 2785 __ orl(dreg, ~(divisor - 1)); 2786 __ increment(dreg); 2787 __ bind(done); 2788 } else { 2789 ShouldNotReachHere(); 2790 } 2791 } else { 2792 Register rreg = right->as_register(); 2793 assert(lreg == rax, "left register must be rax,"); 2794 assert(rreg != rdx, "right register must not be rdx"); 2795 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2796 2797 move_regs(lreg, rax); 2798 2799 int idivl_offset = __ corrected_idivl(rreg); 2800 if (ImplicitDiv0Checks) { 2801 add_debug_info_for_div0(idivl_offset, info); 2802 } 2803 if (code == lir_irem) { 2804 move_regs(rdx, dreg); // result is in rdx 2805 } else { 2806 move_regs(rax, dreg); 2807 } 2808 } 2809 } 2810 2811 2812 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2813 if (opr1->is_single_cpu()) { 2814 Register reg1 = opr1->as_register(); 2815 if (opr2->is_single_cpu()) { 2816 // cpu register - cpu register 2817 if (is_reference_type(opr1->type())) { 2818 __ cmpoop(reg1, opr2->as_register()); 2819 } else { 2820 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 2821 __ cmpl(reg1, opr2->as_register()); 2822 } 2823 } else if (opr2->is_stack()) { 2824 // cpu register - stack 2825 if (is_reference_type(opr1->type())) { 2826 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2827 } else { 2828 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2829 } 2830 } else if (opr2->is_constant()) { 2831 // cpu register - constant 2832 LIR_Const* c = opr2->as_constant_ptr(); 2833 if (c->type() == T_INT) { 2834 jint i = c->as_jint(); 2835 if (i == 0) { 2836 __ testl(reg1, reg1); 2837 } else { 2838 __ cmpl(reg1, i); 2839 } 2840 } else if (c->type() == T_METADATA) { 2841 // All we need for now is a comparison with null for equality. 2842 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 2843 Metadata* m = c->as_metadata(); 2844 if (m == nullptr) { 2845 __ testptr(reg1, reg1); 2846 } else { 2847 ShouldNotReachHere(); 2848 } 2849 } else if (is_reference_type(c->type())) { 2850 // In 64bit oops are single register 2851 jobject o = c->as_jobject(); 2852 if (o == nullptr) { 2853 __ testptr(reg1, reg1); 2854 } else { 2855 __ cmpoop(reg1, o, rscratch1); 2856 } 2857 } else { 2858 fatal("unexpected type: %s", basictype_to_str(c->type())); 2859 } 2860 // cpu register - address 2861 } else if (opr2->is_address()) { 2862 if (op->info() != nullptr) { 2863 add_debug_info_for_null_check_here(op->info()); 2864 } 2865 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2866 } else { 2867 ShouldNotReachHere(); 2868 } 2869 2870 } else if(opr1->is_double_cpu()) { 2871 Register xlo = opr1->as_register_lo(); 2872 Register xhi = opr1->as_register_hi(); 2873 if (opr2->is_double_cpu()) { 2874 #ifdef _LP64 2875 __ cmpptr(xlo, opr2->as_register_lo()); 2876 #else 2877 // cpu register - cpu register 2878 Register ylo = opr2->as_register_lo(); 2879 Register yhi = opr2->as_register_hi(); 2880 __ subl(xlo, ylo); 2881 __ sbbl(xhi, yhi); 2882 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2883 __ orl(xhi, xlo); 2884 } 2885 #endif // _LP64 2886 } else if (opr2->is_constant()) { 2887 // cpu register - constant 0 2888 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2889 #ifdef _LP64 2890 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2891 #else 2892 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2893 __ orl(xhi, xlo); 2894 #endif // _LP64 2895 } else { 2896 ShouldNotReachHere(); 2897 } 2898 2899 } else if (opr1->is_single_xmm()) { 2900 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2901 if (opr2->is_single_xmm()) { 2902 // xmm register - xmm register 2903 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2904 } else if (opr2->is_stack()) { 2905 // xmm register - stack 2906 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2907 } else if (opr2->is_constant()) { 2908 // xmm register - constant 2909 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2910 } else if (opr2->is_address()) { 2911 // xmm register - address 2912 if (op->info() != nullptr) { 2913 add_debug_info_for_null_check_here(op->info()); 2914 } 2915 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2916 } else { 2917 ShouldNotReachHere(); 2918 } 2919 2920 } else if (opr1->is_double_xmm()) { 2921 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2922 if (opr2->is_double_xmm()) { 2923 // xmm register - xmm register 2924 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2925 } else if (opr2->is_stack()) { 2926 // xmm register - stack 2927 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2928 } else if (opr2->is_constant()) { 2929 // xmm register - constant 2930 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2931 } else if (opr2->is_address()) { 2932 // xmm register - address 2933 if (op->info() != nullptr) { 2934 add_debug_info_for_null_check_here(op->info()); 2935 } 2936 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2937 } else { 2938 ShouldNotReachHere(); 2939 } 2940 2941 #ifndef _LP64 2942 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2943 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2944 assert(opr2->is_fpu_register(), "both must be registers"); 2945 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2946 #endif // LP64 2947 2948 } else if (opr1->is_address() && opr2->is_constant()) { 2949 LIR_Const* c = opr2->as_constant_ptr(); 2950 #ifdef _LP64 2951 if (is_reference_type(c->type())) { 2952 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2953 __ movoop(rscratch1, c->as_jobject()); 2954 } 2955 #endif // LP64 2956 if (op->info() != nullptr) { 2957 add_debug_info_for_null_check_here(op->info()); 2958 } 2959 // special case: address - constant 2960 LIR_Address* addr = opr1->as_address_ptr(); 2961 if (c->type() == T_INT) { 2962 __ cmpl(as_Address(addr), c->as_jint()); 2963 } else if (is_reference_type(c->type())) { 2964 #ifdef _LP64 2965 // %%% Make this explode if addr isn't reachable until we figure out a 2966 // better strategy by giving noreg as the temp for as_Address 2967 __ cmpoop(rscratch1, as_Address(addr, noreg)); 2968 #else 2969 __ cmpoop(as_Address(addr), c->as_jobject()); 2970 #endif // _LP64 2971 } else { 2972 ShouldNotReachHere(); 2973 } 2974 2975 } else { 2976 ShouldNotReachHere(); 2977 } 2978 } 2979 2980 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2981 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2982 if (left->is_single_xmm()) { 2983 assert(right->is_single_xmm(), "must match"); 2984 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2985 } else if (left->is_double_xmm()) { 2986 assert(right->is_double_xmm(), "must match"); 2987 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2988 2989 } else { 2990 #ifdef _LP64 2991 ShouldNotReachHere(); 2992 #else 2993 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2994 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2995 2996 assert(left->fpu() == 0, "left must be on TOS"); 2997 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2998 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2999 #endif // LP64 3000 } 3001 } else { 3002 assert(code == lir_cmp_l2i, "check"); 3003 #ifdef _LP64 3004 Label done; 3005 Register dest = dst->as_register(); 3006 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 3007 __ movl(dest, -1); 3008 __ jccb(Assembler::less, done); 3009 __ setb(Assembler::notZero, dest); 3010 __ movzbl(dest, dest); 3011 __ bind(done); 3012 #else 3013 __ lcmp2int(left->as_register_hi(), 3014 left->as_register_lo(), 3015 right->as_register_hi(), 3016 right->as_register_lo()); 3017 move_regs(left->as_register_hi(), dst->as_register()); 3018 #endif // _LP64 3019 } 3020 } 3021 3022 3023 void LIR_Assembler::align_call(LIR_Code code) { 3024 // make sure that the displacement word of the call ends up word aligned 3025 int offset = __ offset(); 3026 switch (code) { 3027 case lir_static_call: 3028 case lir_optvirtual_call: 3029 case lir_dynamic_call: 3030 offset += NativeCall::displacement_offset; 3031 break; 3032 case lir_icvirtual_call: 3033 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; 3034 break; 3035 default: ShouldNotReachHere(); 3036 } 3037 __ align(BytesPerWord, offset); 3038 } 3039 3040 3041 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 3042 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 3043 "must be aligned"); 3044 __ call(AddressLiteral(op->addr(), rtype)); 3045 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3046 __ post_call_nop(); 3047 } 3048 3049 3050 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 3051 __ ic_call(op->addr()); 3052 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields()); 3053 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 3054 "must be aligned"); 3055 __ post_call_nop(); 3056 } 3057 3058 3059 void LIR_Assembler::emit_static_call_stub() { 3060 address call_pc = __ pc(); 3061 address stub = __ start_a_stub(call_stub_size()); 3062 if (stub == nullptr) { 3063 bailout("static call stub overflow"); 3064 return; 3065 } 3066 3067 int start = __ offset(); 3068 3069 // make sure that the displacement word of the call ends up word aligned 3070 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); 3071 __ relocate(static_stub_Relocation::spec(call_pc)); 3072 __ mov_metadata(rbx, (Metadata*)nullptr); 3073 // must be set to -1 at code generation time 3074 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); 3075 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 3076 __ jump(RuntimeAddress(__ pc())); 3077 3078 assert(__ offset() - start <= call_stub_size(), "stub too big"); 3079 __ end_a_stub(); 3080 } 3081 3082 3083 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 3084 assert(exceptionOop->as_register() == rax, "must match"); 3085 assert(exceptionPC->as_register() == rdx, "must match"); 3086 3087 // exception object is not added to oop map by LinearScan 3088 // (LinearScan assumes that no oops are in fixed registers) 3089 info->add_register_oop(exceptionOop); 3090 Runtime1::StubID unwind_id; 3091 3092 // get current pc information 3093 // pc is only needed if the method has an exception handler, the unwind code does not need it. 3094 int pc_for_athrow_offset = __ offset(); 3095 InternalAddress pc_for_athrow(__ pc()); 3096 __ lea(exceptionPC->as_register(), pc_for_athrow); 3097 add_call_info(pc_for_athrow_offset, info); // for exception handler 3098 3099 __ verify_not_null_oop(rax); 3100 // search an exception handler (rax: exception oop, rdx: throwing pc) 3101 if (compilation()->has_fpu_code()) { 3102 unwind_id = Runtime1::handle_exception_id; 3103 } else { 3104 unwind_id = Runtime1::handle_exception_nofpu_id; 3105 } 3106 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 3107 3108 // enough room for two byte trap 3109 __ nop(); 3110 } 3111 3112 3113 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 3114 assert(exceptionOop->as_register() == rax, "must match"); 3115 3116 __ jmp(_unwind_handler_entry); 3117 } 3118 3119 3120 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 3121 3122 // optimized version for linear scan: 3123 // * count must be already in ECX (guaranteed by LinearScan) 3124 // * left and dest must be equal 3125 // * tmp must be unused 3126 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 3127 assert(left == dest, "left and dest must be equal"); 3128 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3129 3130 if (left->is_single_cpu()) { 3131 Register value = left->as_register(); 3132 assert(value != SHIFT_count, "left cannot be ECX"); 3133 3134 switch (code) { 3135 case lir_shl: __ shll(value); break; 3136 case lir_shr: __ sarl(value); break; 3137 case lir_ushr: __ shrl(value); break; 3138 default: ShouldNotReachHere(); 3139 } 3140 } else if (left->is_double_cpu()) { 3141 Register lo = left->as_register_lo(); 3142 Register hi = left->as_register_hi(); 3143 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3144 #ifdef _LP64 3145 switch (code) { 3146 case lir_shl: __ shlptr(lo); break; 3147 case lir_shr: __ sarptr(lo); break; 3148 case lir_ushr: __ shrptr(lo); break; 3149 default: ShouldNotReachHere(); 3150 } 3151 #else 3152 3153 switch (code) { 3154 case lir_shl: __ lshl(hi, lo); break; 3155 case lir_shr: __ lshr(hi, lo, true); break; 3156 case lir_ushr: __ lshr(hi, lo, false); break; 3157 default: ShouldNotReachHere(); 3158 } 3159 #endif // LP64 3160 } else { 3161 ShouldNotReachHere(); 3162 } 3163 } 3164 3165 3166 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3167 if (dest->is_single_cpu()) { 3168 // first move left into dest so that left is not destroyed by the shift 3169 Register value = dest->as_register(); 3170 count = count & 0x1F; // Java spec 3171 3172 move_regs(left->as_register(), value); 3173 switch (code) { 3174 case lir_shl: __ shll(value, count); break; 3175 case lir_shr: __ sarl(value, count); break; 3176 case lir_ushr: __ shrl(value, count); break; 3177 default: ShouldNotReachHere(); 3178 } 3179 } else if (dest->is_double_cpu()) { 3180 #ifndef _LP64 3181 Unimplemented(); 3182 #else 3183 // first move left into dest so that left is not destroyed by the shift 3184 Register value = dest->as_register_lo(); 3185 count = count & 0x1F; // Java spec 3186 3187 move_regs(left->as_register_lo(), value); 3188 switch (code) { 3189 case lir_shl: __ shlptr(value, count); break; 3190 case lir_shr: __ sarptr(value, count); break; 3191 case lir_ushr: __ shrptr(value, count); break; 3192 default: ShouldNotReachHere(); 3193 } 3194 #endif // _LP64 3195 } else { 3196 ShouldNotReachHere(); 3197 } 3198 } 3199 3200 3201 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3202 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3203 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3204 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3205 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3206 } 3207 3208 3209 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3210 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3211 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3212 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3213 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3214 } 3215 3216 3217 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3218 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3219 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3220 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3221 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1); 3222 } 3223 3224 3225 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { 3226 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3227 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3228 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3229 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1); 3230 } 3231 3232 3233 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) { 3234 if (null_check) { 3235 __ testptr(obj, obj); 3236 __ jcc(Assembler::zero, *slow_path->entry()); 3237 } 3238 if (UseArrayMarkWordCheck) { 3239 if (is_dest) { 3240 __ test_null_free_array_oop(obj, tmp, *slow_path->entry()); 3241 } else { 3242 __ test_flat_array_oop(obj, tmp, *slow_path->entry()); 3243 } 3244 } else { 3245 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3246 __ load_klass(tmp, obj, tmp_load_klass); 3247 __ movl(tmp, Address(tmp, Klass::layout_helper_offset())); 3248 if (is_dest) { 3249 // Take the slow path if it's a null_free destination array, in case the source array contains nullptrs. 3250 __ testl(tmp, Klass::_lh_null_free_array_bit_inplace); 3251 } else { 3252 __ testl(tmp, Klass::_lh_array_tag_flat_value_bit_inplace); 3253 } 3254 __ jcc(Assembler::notZero, *slow_path->entry()); 3255 } 3256 } 3257 3258 3259 // This code replaces a call to arraycopy; no exception may 3260 // be thrown in this code, they must be thrown in the System.arraycopy 3261 // activation frame; we could save some checks if this would not be the case 3262 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3263 ciArrayKlass* default_type = op->expected_type(); 3264 Register src = op->src()->as_register(); 3265 Register dst = op->dst()->as_register(); 3266 Register src_pos = op->src_pos()->as_register(); 3267 Register dst_pos = op->dst_pos()->as_register(); 3268 Register length = op->length()->as_register(); 3269 Register tmp = op->tmp()->as_register(); 3270 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3271 3272 CodeStub* stub = op->stub(); 3273 int flags = op->flags(); 3274 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 3275 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 3276 3277 if (flags & LIR_OpArrayCopy::always_slow_path) { 3278 __ jmp(*stub->entry()); 3279 __ bind(*stub->continuation()); 3280 return; 3281 } 3282 3283 // if we don't know anything, just go through the generic arraycopy 3284 if (default_type == nullptr) { 3285 // save outgoing arguments on stack in case call to System.arraycopy is needed 3286 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3287 // for interpreter calling conventions. Now we have to do it in new style conventions. 3288 // For the moment until C1 gets the new register allocator I just force all the 3289 // args to the right place (except the register args) and then on the back side 3290 // reload the register args properly if we go slow path. Yuck 3291 3292 // These are proper for the calling convention 3293 store_parameter(length, 2); 3294 store_parameter(dst_pos, 1); 3295 store_parameter(dst, 0); 3296 3297 // these are just temporary placements until we need to reload 3298 store_parameter(src_pos, 3); 3299 store_parameter(src, 4); 3300 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3301 3302 address copyfunc_addr = StubRoutines::generic_arraycopy(); 3303 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 3304 3305 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3306 #ifdef _LP64 3307 // The arguments are in java calling convention so we can trivially shift them to C 3308 // convention 3309 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3310 __ mov(c_rarg0, j_rarg0); 3311 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3312 __ mov(c_rarg1, j_rarg1); 3313 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3314 __ mov(c_rarg2, j_rarg2); 3315 assert_different_registers(c_rarg3, j_rarg4); 3316 __ mov(c_rarg3, j_rarg3); 3317 #ifdef _WIN64 3318 // Allocate abi space for args but be sure to keep stack aligned 3319 __ subptr(rsp, 6*wordSize); 3320 store_parameter(j_rarg4, 4); 3321 #ifndef PRODUCT 3322 if (PrintC1Statistics) { 3323 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3324 } 3325 #endif 3326 __ call(RuntimeAddress(copyfunc_addr)); 3327 __ addptr(rsp, 6*wordSize); 3328 #else 3329 __ mov(c_rarg4, j_rarg4); 3330 #ifndef PRODUCT 3331 if (PrintC1Statistics) { 3332 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3333 } 3334 #endif 3335 __ call(RuntimeAddress(copyfunc_addr)); 3336 #endif // _WIN64 3337 #else 3338 __ push(length); 3339 __ push(dst_pos); 3340 __ push(dst); 3341 __ push(src_pos); 3342 __ push(src); 3343 3344 #ifndef PRODUCT 3345 if (PrintC1Statistics) { 3346 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 3347 } 3348 #endif 3349 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 3350 3351 #endif // _LP64 3352 3353 __ testl(rax, rax); 3354 __ jcc(Assembler::equal, *stub->continuation()); 3355 3356 __ mov(tmp, rax); 3357 __ xorl(tmp, -1); 3358 3359 // Reload values from the stack so they are where the stub 3360 // expects them. 3361 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3362 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3363 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3364 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3365 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3366 3367 __ subl(length, tmp); 3368 __ addl(src_pos, tmp); 3369 __ addl(dst_pos, tmp); 3370 __ jmp(*stub->entry()); 3371 3372 __ bind(*stub->continuation()); 3373 return; 3374 } 3375 3376 // Handle inline type arrays 3377 if (flags & LIR_OpArrayCopy::src_inlinetype_check) { 3378 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check)); 3379 } 3380 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) { 3381 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check)); 3382 } 3383 3384 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3385 3386 int elem_size = type2aelembytes(basic_type); 3387 Address::ScaleFactor scale; 3388 3389 switch (elem_size) { 3390 case 1 : 3391 scale = Address::times_1; 3392 break; 3393 case 2 : 3394 scale = Address::times_2; 3395 break; 3396 case 4 : 3397 scale = Address::times_4; 3398 break; 3399 case 8 : 3400 scale = Address::times_8; 3401 break; 3402 default: 3403 scale = Address::no_scale; 3404 ShouldNotReachHere(); 3405 } 3406 3407 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3408 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3409 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3410 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3411 3412 // length and pos's are all sign extended at this point on 64bit 3413 3414 // test for null 3415 if (flags & LIR_OpArrayCopy::src_null_check) { 3416 __ testptr(src, src); 3417 __ jcc(Assembler::zero, *stub->entry()); 3418 } 3419 if (flags & LIR_OpArrayCopy::dst_null_check) { 3420 __ testptr(dst, dst); 3421 __ jcc(Assembler::zero, *stub->entry()); 3422 } 3423 3424 // If the compiler was not able to prove that exact type of the source or the destination 3425 // of the arraycopy is an array type, check at runtime if the source or the destination is 3426 // an instance type. 3427 if (flags & LIR_OpArrayCopy::type_check) { 3428 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3429 __ load_klass(tmp, dst, tmp_load_klass); 3430 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3431 __ jcc(Assembler::greaterEqual, *stub->entry()); 3432 } 3433 3434 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3435 __ load_klass(tmp, src, tmp_load_klass); 3436 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 3437 __ jcc(Assembler::greaterEqual, *stub->entry()); 3438 } 3439 } 3440 3441 // check if negative 3442 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3443 __ testl(src_pos, src_pos); 3444 __ jcc(Assembler::less, *stub->entry()); 3445 } 3446 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3447 __ testl(dst_pos, dst_pos); 3448 __ jcc(Assembler::less, *stub->entry()); 3449 } 3450 3451 if (flags & LIR_OpArrayCopy::src_range_check) { 3452 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3453 __ cmpl(tmp, src_length_addr); 3454 __ jcc(Assembler::above, *stub->entry()); 3455 } 3456 if (flags & LIR_OpArrayCopy::dst_range_check) { 3457 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3458 __ cmpl(tmp, dst_length_addr); 3459 __ jcc(Assembler::above, *stub->entry()); 3460 } 3461 3462 if (flags & LIR_OpArrayCopy::length_positive_check) { 3463 __ testl(length, length); 3464 __ jcc(Assembler::less, *stub->entry()); 3465 } 3466 3467 #ifdef _LP64 3468 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3469 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3470 #endif 3471 3472 if (flags & LIR_OpArrayCopy::type_check) { 3473 // We don't know the array types are compatible 3474 if (basic_type != T_OBJECT) { 3475 // Simple test for basic type arrays 3476 if (UseCompressedClassPointers) { 3477 __ movl(tmp, src_klass_addr); 3478 __ cmpl(tmp, dst_klass_addr); 3479 } else { 3480 __ movptr(tmp, src_klass_addr); 3481 __ cmpptr(tmp, dst_klass_addr); 3482 } 3483 __ jcc(Assembler::notEqual, *stub->entry()); 3484 } else { 3485 // For object arrays, if src is a sub class of dst then we can 3486 // safely do the copy. 3487 Label cont, slow; 3488 3489 __ push(src); 3490 __ push(dst); 3491 3492 __ load_klass(src, src, tmp_load_klass); 3493 __ load_klass(dst, dst, tmp_load_klass); 3494 3495 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); 3496 3497 __ push(src); 3498 __ push(dst); 3499 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 3500 __ pop(dst); 3501 __ pop(src); 3502 3503 __ testl(src, src); 3504 __ jcc(Assembler::notEqual, cont); 3505 3506 __ bind(slow); 3507 __ pop(dst); 3508 __ pop(src); 3509 3510 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 3511 if (copyfunc_addr != nullptr) { // use stub if available 3512 // src is not a sub class of dst so we have to do a 3513 // per-element check. 3514 3515 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 3516 if ((flags & mask) != mask) { 3517 // Check that at least both of them object arrays. 3518 assert(flags & mask, "one of the two should be known to be an object array"); 3519 3520 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3521 __ load_klass(tmp, src, tmp_load_klass); 3522 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3523 __ load_klass(tmp, dst, tmp_load_klass); 3524 } 3525 int lh_offset = in_bytes(Klass::layout_helper_offset()); 3526 Address klass_lh_addr(tmp, lh_offset); 3527 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 3528 __ cmpl(klass_lh_addr, objArray_lh); 3529 __ jcc(Assembler::notEqual, *stub->entry()); 3530 } 3531 3532 // Spill because stubs can use any register they like and it's 3533 // easier to restore just those that we care about. 3534 store_parameter(dst, 0); 3535 store_parameter(dst_pos, 1); 3536 store_parameter(length, 2); 3537 store_parameter(src_pos, 3); 3538 store_parameter(src, 4); 3539 3540 #ifndef _LP64 3541 __ movptr(tmp, dst_klass_addr); 3542 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 3543 __ push(tmp); 3544 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 3545 __ push(tmp); 3546 __ push(length); 3547 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3548 __ push(tmp); 3549 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3550 __ push(tmp); 3551 3552 __ call_VM_leaf(copyfunc_addr, 5); 3553 #else 3554 __ movl2ptr(length, length); //higher 32bits must be null 3555 3556 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3557 assert_different_registers(c_rarg0, dst, dst_pos, length); 3558 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3559 assert_different_registers(c_rarg1, dst, length); 3560 3561 __ mov(c_rarg2, length); 3562 assert_different_registers(c_rarg2, dst); 3563 3564 #ifdef _WIN64 3565 // Allocate abi space for args but be sure to keep stack aligned 3566 __ subptr(rsp, 6*wordSize); 3567 __ load_klass(c_rarg3, dst, tmp_load_klass); 3568 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 3569 store_parameter(c_rarg3, 4); 3570 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 3571 __ call(RuntimeAddress(copyfunc_addr)); 3572 __ addptr(rsp, 6*wordSize); 3573 #else 3574 __ load_klass(c_rarg4, dst, tmp_load_klass); 3575 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 3576 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 3577 __ call(RuntimeAddress(copyfunc_addr)); 3578 #endif 3579 3580 #endif 3581 3582 #ifndef PRODUCT 3583 if (PrintC1Statistics) { 3584 Label failed; 3585 __ testl(rax, rax); 3586 __ jcc(Assembler::notZero, failed); 3587 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1); 3588 __ bind(failed); 3589 } 3590 #endif 3591 3592 __ testl(rax, rax); 3593 __ jcc(Assembler::zero, *stub->continuation()); 3594 3595 #ifndef PRODUCT 3596 if (PrintC1Statistics) { 3597 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1); 3598 } 3599 #endif 3600 3601 __ mov(tmp, rax); 3602 3603 __ xorl(tmp, -1); 3604 3605 // Restore previously spilled arguments 3606 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3607 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3608 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3609 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3610 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3611 3612 3613 __ subl(length, tmp); 3614 __ addl(src_pos, tmp); 3615 __ addl(dst_pos, tmp); 3616 } 3617 3618 __ jmp(*stub->entry()); 3619 3620 __ bind(cont); 3621 __ pop(dst); 3622 __ pop(src); 3623 } 3624 } 3625 3626 #ifdef ASSERT 3627 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3628 // Sanity check the known type with the incoming class. For the 3629 // primitive case the types must match exactly with src.klass and 3630 // dst.klass each exactly matching the default type. For the 3631 // object array case, if no type check is needed then either the 3632 // dst type is exactly the expected type and the src type is a 3633 // subtype which we can't check or src is the same array as dst 3634 // but not necessarily exactly of type default_type. 3635 Label known_ok, halt; 3636 __ mov_metadata(tmp, default_type->constant_encoding()); 3637 #ifdef _LP64 3638 if (UseCompressedClassPointers) { 3639 __ encode_klass_not_null(tmp, rscratch1); 3640 } 3641 #endif 3642 3643 if (basic_type != T_OBJECT) { 3644 3645 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3646 else __ cmpptr(tmp, dst_klass_addr); 3647 __ jcc(Assembler::notEqual, halt); 3648 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); 3649 else __ cmpptr(tmp, src_klass_addr); 3650 __ jcc(Assembler::equal, known_ok); 3651 } else { 3652 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3653 else __ cmpptr(tmp, dst_klass_addr); 3654 __ jcc(Assembler::equal, known_ok); 3655 __ cmpptr(src, dst); 3656 __ jcc(Assembler::equal, known_ok); 3657 } 3658 __ bind(halt); 3659 __ stop("incorrect type information in arraycopy"); 3660 __ bind(known_ok); 3661 } 3662 #endif 3663 3664 #ifndef PRODUCT 3665 if (PrintC1Statistics) { 3666 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1); 3667 } 3668 #endif 3669 3670 #ifdef _LP64 3671 assert_different_registers(c_rarg0, dst, dst_pos, length); 3672 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3673 assert_different_registers(c_rarg1, length); 3674 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3675 __ mov(c_rarg2, length); 3676 3677 #else 3678 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3679 store_parameter(tmp, 0); 3680 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3681 store_parameter(tmp, 1); 3682 store_parameter(length, 2); 3683 #endif // _LP64 3684 3685 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3686 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3687 const char *name; 3688 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3689 __ call_VM_leaf(entry, 0); 3690 3691 __ bind(*stub->continuation()); 3692 } 3693 3694 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3695 assert(op->crc()->is_single_cpu(), "crc must be register"); 3696 assert(op->val()->is_single_cpu(), "byte value must be register"); 3697 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3698 Register crc = op->crc()->as_register(); 3699 Register val = op->val()->as_register(); 3700 Register res = op->result_opr()->as_register(); 3701 3702 assert_different_registers(val, crc, res); 3703 3704 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3705 __ notl(crc); // ~crc 3706 __ update_byte_crc32(crc, val, res); 3707 __ notl(crc); // ~crc 3708 __ mov(res, crc); 3709 } 3710 3711 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3712 Register obj = op->obj_opr()->as_register(); // may not be an oop 3713 Register hdr = op->hdr_opr()->as_register(); 3714 Register lock = op->lock_opr()->as_register(); 3715 if (LockingMode == LM_MONITOR) { 3716 if (op->info() != nullptr) { 3717 add_debug_info_for_null_check_here(op->info()); 3718 __ null_check(obj); 3719 } 3720 __ jmp(*op->stub()->entry()); 3721 } else if (op->code() == lir_lock) { 3722 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3723 Register tmp = LockingMode == LM_LIGHTWEIGHT ? op->scratch_opr()->as_register() : noreg; 3724 // add debug info for NullPointerException only if one is possible 3725 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 3726 if (op->info() != nullptr) { 3727 add_debug_info_for_null_check(null_check_offset, op->info()); 3728 } 3729 // done 3730 } else if (op->code() == lir_unlock) { 3731 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3732 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3733 } else { 3734 Unimplemented(); 3735 } 3736 __ bind(*op->stub()->continuation()); 3737 } 3738 3739 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 3740 Register obj = op->obj()->as_pointer_register(); 3741 Register result = op->result_opr()->as_pointer_register(); 3742 3743 CodeEmitInfo* info = op->info(); 3744 if (info != nullptr) { 3745 add_debug_info_for_null_check_here(info); 3746 } 3747 3748 #ifdef _LP64 3749 if (UseCompressedClassPointers) { 3750 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3751 __ decode_klass_not_null(result, rscratch1); 3752 } else 3753 #endif 3754 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes())); 3755 } 3756 3757 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3758 ciMethod* method = op->profiled_method(); 3759 int bci = op->profiled_bci(); 3760 ciMethod* callee = op->profiled_callee(); 3761 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3762 3763 // Update counter for all call types 3764 ciMethodData* md = method->method_data_or_null(); 3765 assert(md != nullptr, "Sanity"); 3766 ciProfileData* data = md->bci_to_data(bci); 3767 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 3768 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3769 Register mdo = op->mdo()->as_register(); 3770 __ mov_metadata(mdo, md->constant_encoding()); 3771 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3772 // Perform additional virtual call profiling for invokevirtual and 3773 // invokeinterface bytecodes 3774 if (op->should_profile_receiver_type()) { 3775 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3776 Register recv = op->recv()->as_register(); 3777 assert_different_registers(mdo, recv); 3778 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3779 ciKlass* known_klass = op->known_holder(); 3780 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 3781 // We know the type that will be seen at this call site; we can 3782 // statically update the MethodData* rather than needing to do 3783 // dynamic tests on the receiver type 3784 3785 // NOTE: we should probably put a lock around this search to 3786 // avoid collisions by concurrent compilations 3787 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3788 uint i; 3789 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3790 ciKlass* receiver = vc_data->receiver(i); 3791 if (known_klass->equals(receiver)) { 3792 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3793 __ addptr(data_addr, DataLayout::counter_increment); 3794 return; 3795 } 3796 } 3797 3798 // Receiver type not found in profile data; select an empty slot 3799 3800 // Note that this is less efficient than it should be because it 3801 // always does a write to the receiver part of the 3802 // VirtualCallData rather than just the first time 3803 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3804 ciKlass* receiver = vc_data->receiver(i); 3805 if (receiver == nullptr) { 3806 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3807 __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); 3808 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3809 __ addptr(data_addr, DataLayout::counter_increment); 3810 return; 3811 } 3812 } 3813 } else { 3814 __ load_klass(recv, recv, tmp_load_klass); 3815 Label update_done; 3816 type_profile_helper(mdo, md, data, recv, &update_done); 3817 // Receiver did not match any saved receiver and there is no empty row for it. 3818 // Increment total counter to indicate polymorphic case. 3819 __ addptr(counter_addr, DataLayout::counter_increment); 3820 3821 __ bind(update_done); 3822 } 3823 } else { 3824 // Static call 3825 __ addptr(counter_addr, DataLayout::counter_increment); 3826 } 3827 } 3828 3829 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3830 Register obj = op->obj()->as_register(); 3831 Register tmp = op->tmp()->as_pointer_register(); 3832 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3833 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3834 ciKlass* exact_klass = op->exact_klass(); 3835 intptr_t current_klass = op->current_klass(); 3836 bool not_null = op->not_null(); 3837 bool no_conflict = op->no_conflict(); 3838 3839 Label update, next, none; 3840 3841 bool do_null = !not_null; 3842 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3843 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3844 3845 assert(do_null || do_update, "why are we here?"); 3846 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3847 3848 __ verify_oop(obj); 3849 3850 #ifdef ASSERT 3851 if (obj == tmp) { 3852 #ifdef _LP64 3853 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); 3854 #else 3855 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); 3856 #endif 3857 } else { 3858 #ifdef _LP64 3859 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index()); 3860 #else 3861 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index()); 3862 #endif 3863 } 3864 #endif 3865 if (do_null) { 3866 __ testptr(obj, obj); 3867 __ jccb(Assembler::notZero, update); 3868 if (!TypeEntries::was_null_seen(current_klass)) { 3869 __ testptr(mdo_addr, TypeEntries::null_seen); 3870 #ifndef ASSERT 3871 __ jccb(Assembler::notZero, next); // already set 3872 #else 3873 __ jcc(Assembler::notZero, next); // already set 3874 #endif 3875 // atomic update to prevent overwriting Klass* with 0 3876 __ lock(); 3877 __ orptr(mdo_addr, TypeEntries::null_seen); 3878 } 3879 if (do_update) { 3880 #ifndef ASSERT 3881 __ jmpb(next); 3882 } 3883 #else 3884 __ jmp(next); 3885 } 3886 } else { 3887 __ testptr(obj, obj); 3888 __ jcc(Assembler::notZero, update); 3889 __ stop("unexpected null obj"); 3890 #endif 3891 } 3892 3893 __ bind(update); 3894 3895 if (do_update) { 3896 #ifdef ASSERT 3897 if (exact_klass != nullptr) { 3898 Label ok; 3899 __ load_klass(tmp, obj, tmp_load_klass); 3900 __ push(tmp); 3901 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3902 __ cmpptr(tmp, Address(rsp, 0)); 3903 __ jcc(Assembler::equal, ok); 3904 __ stop("exact klass and actual klass differ"); 3905 __ bind(ok); 3906 __ pop(tmp); 3907 } 3908 #endif 3909 if (!no_conflict) { 3910 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 3911 if (exact_klass != nullptr) { 3912 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3913 } else { 3914 __ load_klass(tmp, obj, tmp_load_klass); 3915 } 3916 #ifdef _LP64 3917 __ mov(rscratch1, tmp); // save original value before XOR 3918 #endif 3919 __ xorptr(tmp, mdo_addr); 3920 __ testptr(tmp, TypeEntries::type_klass_mask); 3921 // klass seen before, nothing to do. The unknown bit may have been 3922 // set already but no need to check. 3923 __ jccb(Assembler::zero, next); 3924 3925 __ testptr(tmp, TypeEntries::type_unknown); 3926 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3927 3928 if (TypeEntries::is_type_none(current_klass)) { 3929 __ testptr(mdo_addr, TypeEntries::type_mask); 3930 __ jccb(Assembler::zero, none); 3931 #ifdef _LP64 3932 // There is a chance that the checks above (re-reading profiling 3933 // data from memory) fail if another thread has just set the 3934 // profiling to this obj's klass 3935 __ mov(tmp, rscratch1); // get back original value before XOR 3936 __ xorptr(tmp, mdo_addr); 3937 __ testptr(tmp, TypeEntries::type_klass_mask); 3938 __ jccb(Assembler::zero, next); 3939 #endif 3940 } 3941 } else { 3942 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3943 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3944 3945 __ testptr(mdo_addr, TypeEntries::type_unknown); 3946 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3947 } 3948 3949 // different than before. Cannot keep accurate profile. 3950 __ orptr(mdo_addr, TypeEntries::type_unknown); 3951 3952 if (TypeEntries::is_type_none(current_klass)) { 3953 __ jmpb(next); 3954 3955 __ bind(none); 3956 // first time here. Set profile type. 3957 __ movptr(mdo_addr, tmp); 3958 #ifdef ASSERT 3959 __ andptr(tmp, TypeEntries::type_klass_mask); 3960 __ verify_klass_ptr(tmp); 3961 #endif 3962 } 3963 } else { 3964 // There's a single possible klass at this profile point 3965 assert(exact_klass != nullptr, "should be"); 3966 if (TypeEntries::is_type_none(current_klass)) { 3967 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3968 __ xorptr(tmp, mdo_addr); 3969 __ testptr(tmp, TypeEntries::type_klass_mask); 3970 #ifdef ASSERT 3971 __ jcc(Assembler::zero, next); 3972 3973 { 3974 Label ok; 3975 __ push(tmp); 3976 __ testptr(mdo_addr, TypeEntries::type_mask); 3977 __ jcc(Assembler::zero, ok); 3978 // may have been set by another thread 3979 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3980 __ xorptr(tmp, mdo_addr); 3981 __ testptr(tmp, TypeEntries::type_mask); 3982 __ jcc(Assembler::zero, ok); 3983 3984 __ stop("unexpected profiling mismatch"); 3985 __ bind(ok); 3986 __ pop(tmp); 3987 } 3988 #else 3989 __ jccb(Assembler::zero, next); 3990 #endif 3991 // first time here. Set profile type. 3992 __ movptr(mdo_addr, tmp); 3993 #ifdef ASSERT 3994 __ andptr(tmp, TypeEntries::type_klass_mask); 3995 __ verify_klass_ptr(tmp); 3996 #endif 3997 } else { 3998 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3999 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 4000 4001 __ testptr(mdo_addr, TypeEntries::type_unknown); 4002 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 4003 4004 __ orptr(mdo_addr, TypeEntries::type_unknown); 4005 } 4006 } 4007 } 4008 __ bind(next); 4009 } 4010 4011 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) { 4012 Register obj = op->obj()->as_register(); 4013 Register tmp = op->tmp()->as_pointer_register(); 4014 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 4015 bool not_null = op->not_null(); 4016 int flag = op->flag(); 4017 4018 Label not_inline_type; 4019 if (!not_null) { 4020 __ testptr(obj, obj); 4021 __ jccb(Assembler::zero, not_inline_type); 4022 } 4023 4024 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type); 4025 4026 __ orb(mdo_addr, flag); 4027 4028 __ bind(not_inline_type); 4029 } 4030 4031 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 4032 Unimplemented(); 4033 } 4034 4035 4036 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 4037 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 4038 } 4039 4040 4041 void LIR_Assembler::align_backward_branch_target() { 4042 __ align(BytesPerWord); 4043 } 4044 4045 4046 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 4047 if (left->is_single_cpu()) { 4048 __ negl(left->as_register()); 4049 move_regs(left->as_register(), dest->as_register()); 4050 4051 } else if (left->is_double_cpu()) { 4052 Register lo = left->as_register_lo(); 4053 #ifdef _LP64 4054 Register dst = dest->as_register_lo(); 4055 __ movptr(dst, lo); 4056 __ negptr(dst); 4057 #else 4058 Register hi = left->as_register_hi(); 4059 __ lneg(hi, lo); 4060 if (dest->as_register_lo() == hi) { 4061 assert(dest->as_register_hi() != lo, "destroying register"); 4062 move_regs(hi, dest->as_register_hi()); 4063 move_regs(lo, dest->as_register_lo()); 4064 } else { 4065 move_regs(lo, dest->as_register_lo()); 4066 move_regs(hi, dest->as_register_hi()); 4067 } 4068 #endif // _LP64 4069 4070 } else if (dest->is_single_xmm()) { 4071 #ifdef _LP64 4072 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4073 assert(tmp->is_valid(), "need temporary"); 4074 assert_different_registers(left->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 4075 __ vpxor(dest->as_xmm_float_reg(), tmp->as_xmm_float_reg(), left->as_xmm_float_reg(), 2); 4076 } 4077 else 4078 #endif 4079 { 4080 assert(!tmp->is_valid(), "do not need temporary"); 4081 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 4082 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 4083 } 4084 __ xorps(dest->as_xmm_float_reg(), 4085 ExternalAddress((address)float_signflip_pool), 4086 rscratch1); 4087 } 4088 } else if (dest->is_double_xmm()) { 4089 #ifdef _LP64 4090 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 4091 assert(tmp->is_valid(), "need temporary"); 4092 assert_different_registers(left->as_xmm_double_reg(), tmp->as_xmm_double_reg()); 4093 __ vpxor(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), left->as_xmm_double_reg(), 2); 4094 } 4095 else 4096 #endif 4097 { 4098 assert(!tmp->is_valid(), "do not need temporary"); 4099 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 4100 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 4101 } 4102 __ xorpd(dest->as_xmm_double_reg(), 4103 ExternalAddress((address)double_signflip_pool), 4104 rscratch1); 4105 } 4106 #ifndef _LP64 4107 } else if (left->is_single_fpu() || left->is_double_fpu()) { 4108 assert(left->fpu() == 0, "arg must be on TOS"); 4109 assert(dest->fpu() == 0, "dest must be TOS"); 4110 __ fchs(); 4111 #endif // !_LP64 4112 4113 } else { 4114 ShouldNotReachHere(); 4115 } 4116 } 4117 4118 4119 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 4120 assert(src->is_address(), "must be an address"); 4121 assert(dest->is_register(), "must be a register"); 4122 4123 PatchingStub* patch = nullptr; 4124 if (patch_code != lir_patch_none) { 4125 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 4126 } 4127 4128 Register reg = dest->as_pointer_register(); 4129 LIR_Address* addr = src->as_address_ptr(); 4130 __ lea(reg, as_Address(addr)); 4131 4132 if (patch != nullptr) { 4133 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 4134 } 4135 } 4136 4137 4138 4139 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 4140 assert(!tmp->is_valid(), "don't need temporary"); 4141 __ call(RuntimeAddress(dest)); 4142 if (info != nullptr) { 4143 add_call_info_here(info); 4144 } 4145 __ post_call_nop(); 4146 } 4147 4148 4149 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 4150 assert(type == T_LONG, "only for volatile long fields"); 4151 4152 if (info != nullptr) { 4153 add_debug_info_for_null_check_here(info); 4154 } 4155 4156 if (src->is_double_xmm()) { 4157 if (dest->is_double_cpu()) { 4158 #ifdef _LP64 4159 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 4160 #else 4161 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 4162 __ psrlq(src->as_xmm_double_reg(), 32); 4163 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 4164 #endif // _LP64 4165 } else if (dest->is_double_stack()) { 4166 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 4167 } else if (dest->is_address()) { 4168 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 4169 } else { 4170 ShouldNotReachHere(); 4171 } 4172 4173 } else if (dest->is_double_xmm()) { 4174 if (src->is_double_stack()) { 4175 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 4176 } else if (src->is_address()) { 4177 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 4178 } else { 4179 ShouldNotReachHere(); 4180 } 4181 4182 #ifndef _LP64 4183 } else if (src->is_double_fpu()) { 4184 assert(src->fpu_regnrLo() == 0, "must be TOS"); 4185 if (dest->is_double_stack()) { 4186 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 4187 } else if (dest->is_address()) { 4188 __ fistp_d(as_Address(dest->as_address_ptr())); 4189 } else { 4190 ShouldNotReachHere(); 4191 } 4192 4193 } else if (dest->is_double_fpu()) { 4194 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 4195 if (src->is_double_stack()) { 4196 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 4197 } else if (src->is_address()) { 4198 __ fild_d(as_Address(src->as_address_ptr())); 4199 } else { 4200 ShouldNotReachHere(); 4201 } 4202 #endif // !_LP64 4203 4204 } else { 4205 ShouldNotReachHere(); 4206 } 4207 } 4208 4209 #ifdef ASSERT 4210 // emit run-time assertion 4211 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 4212 assert(op->code() == lir_assert, "must be"); 4213 4214 if (op->in_opr1()->is_valid()) { 4215 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 4216 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 4217 } else { 4218 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 4219 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 4220 } 4221 4222 Label ok; 4223 if (op->condition() != lir_cond_always) { 4224 Assembler::Condition acond = Assembler::zero; 4225 switch (op->condition()) { 4226 case lir_cond_equal: acond = Assembler::equal; break; 4227 case lir_cond_notEqual: acond = Assembler::notEqual; break; 4228 case lir_cond_less: acond = Assembler::less; break; 4229 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 4230 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 4231 case lir_cond_greater: acond = Assembler::greater; break; 4232 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 4233 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 4234 default: ShouldNotReachHere(); 4235 } 4236 __ jcc(acond, ok); 4237 } 4238 if (op->halt()) { 4239 const char* str = __ code_string(op->msg()); 4240 __ stop(str); 4241 } else { 4242 breakpoint(); 4243 } 4244 __ bind(ok); 4245 } 4246 #endif 4247 4248 void LIR_Assembler::membar() { 4249 // QQQ sparc TSO uses this, 4250 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4251 } 4252 4253 void LIR_Assembler::membar_acquire() { 4254 // No x86 machines currently require load fences 4255 } 4256 4257 void LIR_Assembler::membar_release() { 4258 // No x86 machines currently require store fences 4259 } 4260 4261 void LIR_Assembler::membar_loadload() { 4262 // no-op 4263 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 4264 } 4265 4266 void LIR_Assembler::membar_storestore() { 4267 // no-op 4268 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 4269 } 4270 4271 void LIR_Assembler::membar_loadstore() { 4272 // no-op 4273 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 4274 } 4275 4276 void LIR_Assembler::membar_storeload() { 4277 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 4278 } 4279 4280 void LIR_Assembler::on_spin_wait() { 4281 __ pause (); 4282 } 4283 4284 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 4285 assert(result_reg->is_register(), "check"); 4286 #ifdef _LP64 4287 // __ get_thread(result_reg->as_register_lo()); 4288 __ mov(result_reg->as_register(), r15_thread); 4289 #else 4290 __ get_thread(result_reg->as_register()); 4291 #endif // _LP64 4292 } 4293 4294 void LIR_Assembler::check_orig_pc() { 4295 __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD); 4296 } 4297 4298 void LIR_Assembler::peephole(LIR_List*) { 4299 // do nothing for now 4300 } 4301 4302 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 4303 assert(data == dest, "xchg/xadd uses only 2 operands"); 4304 4305 if (data->type() == T_INT) { 4306 if (code == lir_xadd) { 4307 __ lock(); 4308 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 4309 } else { 4310 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 4311 } 4312 } else if (data->is_oop()) { 4313 assert (code == lir_xchg, "xadd for oops"); 4314 Register obj = data->as_register(); 4315 #ifdef _LP64 4316 if (UseCompressedOops) { 4317 __ encode_heap_oop(obj); 4318 __ xchgl(obj, as_Address(src->as_address_ptr())); 4319 __ decode_heap_oop(obj); 4320 } else { 4321 __ xchgptr(obj, as_Address(src->as_address_ptr())); 4322 } 4323 #else 4324 __ xchgl(obj, as_Address(src->as_address_ptr())); 4325 #endif 4326 } else if (data->type() == T_LONG) { 4327 #ifdef _LP64 4328 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 4329 if (code == lir_xadd) { 4330 __ lock(); 4331 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 4332 } else { 4333 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 4334 } 4335 #else 4336 ShouldNotReachHere(); 4337 #endif 4338 } else { 4339 ShouldNotReachHere(); 4340 } 4341 } 4342 4343 #undef __