1 /* 2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/macroAssembler.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "compiler/oopMap.hpp" 36 #include "gc/shared/collectedHeap.hpp" 37 #include "gc/shared/gc_globals.hpp" 38 #include "nativeInst_x86.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/safepointMechanism.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "utilities/powerOfTwo.hpp" 45 #include "vmreg_x86.inline.hpp" 46 47 48 // These masks are used to provide 128-bit aligned bitmasks to the XMM 49 // instructions, to allow sign-masking or sign-bit flipping. They allow 50 // fast versions of NegF/NegD and AbsF/AbsD. 51 52 // Note: 'double' and 'long long' have 32-bits alignment on x86. 53 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 54 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 55 // of 128-bits operands for SSE instructions. 56 jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 57 // Store the value to a 128-bits operand. 58 operand[0] = lo; 59 operand[1] = hi; 60 return operand; 61 } 62 63 // Buffer for 128-bits masks used by SSE instructions. 64 static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 65 66 // Static initialization during VM startup. 67 static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 68 static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 69 static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], (jlong)UCONST64(0x8000000080000000), (jlong)UCONST64(0x8000000080000000)); 70 static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jlong)UCONST64(0x8000000000000000), (jlong)UCONST64(0x8000000000000000)); 71 72 73 NEEDS_CLEANUP // remove this definitions ? 74 const Register SYNC_header = rax; // synchronization header 75 const Register SHIFT_count = rcx; // where count for shift operations must be 76 77 #define __ _masm-> 78 79 80 static void select_different_registers(Register preserve, 81 Register extra, 82 Register &tmp1, 83 Register &tmp2) { 84 if (tmp1 == preserve) { 85 assert_different_registers(tmp1, tmp2, extra); 86 tmp1 = extra; 87 } else if (tmp2 == preserve) { 88 assert_different_registers(tmp1, tmp2, extra); 89 tmp2 = extra; 90 } 91 assert_different_registers(preserve, tmp1, tmp2); 92 } 93 94 95 96 static void select_different_registers(Register preserve, 97 Register extra, 98 Register &tmp1, 99 Register &tmp2, 100 Register &tmp3) { 101 if (tmp1 == preserve) { 102 assert_different_registers(tmp1, tmp2, tmp3, extra); 103 tmp1 = extra; 104 } else if (tmp2 == preserve) { 105 assert_different_registers(tmp1, tmp2, tmp3, extra); 106 tmp2 = extra; 107 } else if (tmp3 == preserve) { 108 assert_different_registers(tmp1, tmp2, tmp3, extra); 109 tmp3 = extra; 110 } 111 assert_different_registers(preserve, tmp1, tmp2, tmp3); 112 } 113 114 115 116 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 117 if (opr->is_constant()) { 118 LIR_Const* constant = opr->as_constant_ptr(); 119 switch (constant->type()) { 120 case T_INT: { 121 return true; 122 } 123 124 default: 125 return false; 126 } 127 } 128 return false; 129 } 130 131 132 LIR_Opr LIR_Assembler::receiverOpr() { 133 return FrameMap::receiver_opr; 134 } 135 136 LIR_Opr LIR_Assembler::osrBufferPointer() { 137 return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 138 } 139 140 //--------------fpu register translations----------------------- 141 142 143 address LIR_Assembler::float_constant(float f) { 144 address const_addr = __ float_constant(f); 145 if (const_addr == nullptr) { 146 bailout("const section overflow"); 147 return __ code()->consts()->start(); 148 } else { 149 return const_addr; 150 } 151 } 152 153 154 address LIR_Assembler::double_constant(double d) { 155 address const_addr = __ double_constant(d); 156 if (const_addr == nullptr) { 157 bailout("const section overflow"); 158 return __ code()->consts()->start(); 159 } else { 160 return const_addr; 161 } 162 } 163 164 void LIR_Assembler::breakpoint() { 165 __ int3(); 166 } 167 168 void LIR_Assembler::push(LIR_Opr opr) { 169 if (opr->is_single_cpu()) { 170 __ push_reg(opr->as_register()); 171 } else if (opr->is_double_cpu()) { 172 NOT_LP64(__ push_reg(opr->as_register_hi())); 173 __ push_reg(opr->as_register_lo()); 174 } else if (opr->is_stack()) { 175 __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 176 } else if (opr->is_constant()) { 177 LIR_Const* const_opr = opr->as_constant_ptr(); 178 if (const_opr->type() == T_OBJECT) { 179 __ push_oop(const_opr->as_jobject(), rscratch1); 180 } else if (const_opr->type() == T_INT) { 181 __ push_jint(const_opr->as_jint()); 182 } else { 183 ShouldNotReachHere(); 184 } 185 186 } else { 187 ShouldNotReachHere(); 188 } 189 } 190 191 void LIR_Assembler::pop(LIR_Opr opr) { 192 if (opr->is_single_cpu()) { 193 __ pop_reg(opr->as_register()); 194 } else { 195 ShouldNotReachHere(); 196 } 197 } 198 199 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { 200 return addr->base()->is_illegal() && addr->index()->is_illegal(); 201 } 202 203 //------------------------------------------- 204 205 Address LIR_Assembler::as_Address(LIR_Address* addr) { 206 return as_Address(addr, rscratch1); 207 } 208 209 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 210 if (addr->base()->is_illegal()) { 211 assert(addr->index()->is_illegal(), "must be illegal too"); 212 AddressLiteral laddr((address)addr->disp(), relocInfo::none); 213 if (! __ reachable(laddr)) { 214 __ movptr(tmp, laddr.addr()); 215 Address res(tmp, 0); 216 return res; 217 } else { 218 return __ as_Address(laddr); 219 } 220 } 221 222 Register base = addr->base()->as_pointer_register(); 223 224 if (addr->index()->is_illegal()) { 225 return Address( base, addr->disp()); 226 } else if (addr->index()->is_cpu_register()) { 227 Register index = addr->index()->as_pointer_register(); 228 return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 229 } else if (addr->index()->is_constant()) { 230 intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 231 assert(Assembler::is_simm32(addr_offset), "must be"); 232 233 return Address(base, addr_offset); 234 } else { 235 Unimplemented(); 236 return Address(); 237 } 238 } 239 240 241 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 242 Address base = as_Address(addr); 243 return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 244 } 245 246 247 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 248 return as_Address(addr); 249 } 250 251 252 void LIR_Assembler::osr_entry() { 253 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 254 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 255 ValueStack* entry_state = osr_entry->state(); 256 int number_of_locks = entry_state->locks_size(); 257 258 // we jump here if osr happens with the interpreter 259 // state set up to continue at the beginning of the 260 // loop that triggered osr - in particular, we have 261 // the following registers setup: 262 // 263 // rcx: osr buffer 264 // 265 266 // build frame 267 ciMethod* m = compilation()->method(); 268 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 269 270 // OSR buffer is 271 // 272 // locals[nlocals-1..0] 273 // monitors[0..number_of_locks] 274 // 275 // locals is a direct copy of the interpreter frame so in the osr buffer 276 // so first slot in the local array is the last local from the interpreter 277 // and last slot is local[0] (receiver) from the interpreter 278 // 279 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 280 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 281 // in the interpreter frame (the method lock if a sync method) 282 283 // Initialize monitors in the compiled activation. 284 // rcx: pointer to osr buffer 285 // 286 // All other registers are dead at this point and the locals will be 287 // copied into place by code emitted in the IR. 288 289 Register OSR_buf = osrBufferPointer()->as_pointer_register(); 290 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 291 int monitor_offset = BytesPerWord * method()->max_locals() + 292 (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); 293 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 294 // the OSR buffer using 2 word entries: first the lock and then 295 // the oop. 296 for (int i = 0; i < number_of_locks; i++) { 297 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 298 #ifdef ASSERT 299 // verify the interpreter's monitor has a non-null object 300 { 301 Label L; 302 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD); 303 __ jcc(Assembler::notZero, L); 304 __ stop("locked object is null"); 305 __ bind(L); 306 } 307 #endif 308 __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 309 __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 310 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 311 __ movptr(frame_map()->address_for_monitor_object(i), rbx); 312 } 313 } 314 } 315 316 317 // inline cache check; done before the frame is built. 318 int LIR_Assembler::check_icache() { 319 return __ ic_check(CodeEntryAlignment); 320 } 321 322 void LIR_Assembler::clinit_barrier(ciMethod* method) { 323 assert(VM_Version::supports_fast_class_init_checks(), "sanity"); 324 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 325 326 Label L_skip_barrier; 327 Register klass = rscratch1; 328 Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg ); 329 assert(thread != noreg, "x86_32 not implemented"); 330 331 __ mov_metadata(klass, method->holder()->constant_encoding()); 332 __ clinit_barrier(klass, thread, &L_skip_barrier /*L_fast_path*/); 333 334 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); 335 336 __ bind(L_skip_barrier); 337 } 338 339 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 340 jobject o = nullptr; 341 PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 342 __ movoop(reg, o); 343 patching_epilog(patch, lir_patch_normal, reg, info); 344 } 345 346 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 347 Metadata* o = nullptr; 348 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 349 __ mov_metadata(reg, o); 350 patching_epilog(patch, lir_patch_normal, reg, info); 351 } 352 353 // This specifies the rsp decrement needed to build the frame 354 int LIR_Assembler::initial_frame_size_in_bytes() const { 355 // if rounding, must let FrameMap know! 356 357 // The frame_map records size in slots (32bit word) 358 359 // subtract two words to account for return address and link 360 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 361 } 362 363 364 int LIR_Assembler::emit_exception_handler() { 365 // generate code for exception handler 366 address handler_base = __ start_a_stub(exception_handler_size()); 367 if (handler_base == nullptr) { 368 // not enough space left for the handler 369 bailout("exception handler overflow"); 370 return -1; 371 } 372 373 int offset = code_offset(); 374 375 // the exception oop and pc are in rax, and rdx 376 // no other registers need to be preserved, so invalidate them 377 __ invalidate_registers(false, true, true, false, true, true); 378 379 // check that there is really an exception 380 __ verify_not_null_oop(rax); 381 382 // search an exception handler (rax: exception oop, rdx: throwing pc) 383 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); 384 __ should_not_reach_here(); 385 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 386 __ end_a_stub(); 387 388 return offset; 389 } 390 391 392 // Emit the code to remove the frame from the stack in the exception 393 // unwind path. 394 int LIR_Assembler::emit_unwind_handler() { 395 #ifndef PRODUCT 396 if (CommentedAssembly) { 397 _masm->block_comment("Unwind handler"); 398 } 399 #endif 400 401 int offset = code_offset(); 402 403 // Fetch the exception from TLS and clear out exception related thread state 404 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 405 NOT_LP64(__ get_thread(thread)); 406 __ movptr(rax, Address(thread, JavaThread::exception_oop_offset())); 407 __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD); 408 __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD); 409 410 __ bind(_unwind_handler_entry); 411 __ verify_not_null_oop(rax); 412 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 413 __ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved) 414 } 415 416 // Perform needed unlocking 417 MonitorExitStub* stub = nullptr; 418 if (method()->is_synchronized()) { 419 monitor_address(0, FrameMap::rax_opr); 420 stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 421 if (LockingMode == LM_MONITOR) { 422 __ jmp(*stub->entry()); 423 } else { 424 __ unlock_object(rdi, rsi, rax, *stub->entry()); 425 } 426 __ bind(*stub->continuation()); 427 } 428 429 if (compilation()->env()->dtrace_method_probes()) { 430 #ifdef _LP64 431 __ mov(rdi, r15_thread); 432 __ mov_metadata(rsi, method()->constant_encoding()); 433 #else 434 __ get_thread(rax); 435 __ movptr(Address(rsp, 0), rax); 436 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg); 437 #endif 438 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 439 } 440 441 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 442 __ mov(rax, rbx); // Restore the exception 443 } 444 445 // remove the activation and dispatch to the unwind handler 446 __ remove_frame(initial_frame_size_in_bytes()); 447 __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); 448 449 // Emit the slow path assembly 450 if (stub != nullptr) { 451 stub->emit_code(this); 452 } 453 454 return offset; 455 } 456 457 458 int LIR_Assembler::emit_deopt_handler() { 459 // generate code for exception handler 460 address handler_base = __ start_a_stub(deopt_handler_size()); 461 if (handler_base == nullptr) { 462 // not enough space left for the handler 463 bailout("deopt handler overflow"); 464 return -1; 465 } 466 467 int offset = code_offset(); 468 InternalAddress here(__ pc()); 469 470 __ pushptr(here.addr(), rscratch1); 471 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 472 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 473 __ end_a_stub(); 474 475 return offset; 476 } 477 478 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) { 479 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 480 if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 481 assert(result->fpu() == 0, "result must already be on TOS"); 482 } 483 484 // Pop the stack before the safepoint code 485 __ remove_frame(initial_frame_size_in_bytes()); 486 487 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 488 __ reserved_stack_check(); 489 } 490 491 // Note: we do not need to round double result; float result has the right precision 492 // the poll sets the condition code, but no data registers 493 494 #ifdef _LP64 495 const Register thread = r15_thread; 496 #else 497 const Register thread = rbx; 498 __ get_thread(thread); 499 #endif 500 code_stub->set_safepoint_offset(__ offset()); 501 __ relocate(relocInfo::poll_return_type); 502 __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */); 503 __ ret(0); 504 } 505 506 507 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 508 guarantee(info != nullptr, "Shouldn't be null"); 509 int offset = __ offset(); 510 #ifdef _LP64 511 const Register poll_addr = rscratch1; 512 __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset())); 513 #else 514 assert(tmp->is_cpu_register(), "needed"); 515 const Register poll_addr = tmp->as_register(); 516 __ get_thread(poll_addr); 517 __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset()))); 518 #endif 519 add_debug_info_for_branch(info); 520 __ relocate(relocInfo::poll_type); 521 address pre_pc = __ pc(); 522 __ testl(rax, Address(poll_addr, 0)); 523 address post_pc = __ pc(); 524 guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length"); 525 return offset; 526 } 527 528 529 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 530 if (from_reg != to_reg) __ mov(to_reg, from_reg); 531 } 532 533 void LIR_Assembler::swap_reg(Register a, Register b) { 534 __ xchgptr(a, b); 535 } 536 537 538 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 539 assert(src->is_constant(), "should not call otherwise"); 540 assert(dest->is_register(), "should not call otherwise"); 541 LIR_Const* c = src->as_constant_ptr(); 542 543 switch (c->type()) { 544 case T_INT: { 545 assert(patch_code == lir_patch_none, "no patching handled here"); 546 __ movl(dest->as_register(), c->as_jint()); 547 break; 548 } 549 550 case T_ADDRESS: { 551 assert(patch_code == lir_patch_none, "no patching handled here"); 552 __ movptr(dest->as_register(), c->as_jint()); 553 break; 554 } 555 556 case T_LONG: { 557 assert(patch_code == lir_patch_none, "no patching handled here"); 558 #ifdef _LP64 559 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 560 #else 561 __ movptr(dest->as_register_lo(), c->as_jint_lo()); 562 __ movptr(dest->as_register_hi(), c->as_jint_hi()); 563 #endif // _LP64 564 break; 565 } 566 567 case T_OBJECT: { 568 if (patch_code != lir_patch_none) { 569 jobject2reg_with_patching(dest->as_register(), info); 570 } else { 571 __ movoop(dest->as_register(), c->as_jobject()); 572 } 573 break; 574 } 575 576 case T_METADATA: { 577 if (patch_code != lir_patch_none) { 578 klass2reg_with_patching(dest->as_register(), info); 579 } else { 580 __ mov_metadata(dest->as_register(), c->as_metadata()); 581 } 582 break; 583 } 584 585 case T_FLOAT: { 586 if (dest->is_single_xmm()) { 587 if (UseAVX <= 2 && c->is_zero_float()) { 588 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 589 } else { 590 __ movflt(dest->as_xmm_float_reg(), 591 InternalAddress(float_constant(c->as_jfloat()))); 592 } 593 } else { 594 ShouldNotReachHere(); 595 } 596 break; 597 } 598 599 case T_DOUBLE: { 600 if (dest->is_double_xmm()) { 601 if (UseAVX <= 2 && c->is_zero_double()) { 602 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 603 } else { 604 __ movdbl(dest->as_xmm_double_reg(), 605 InternalAddress(double_constant(c->as_jdouble()))); 606 } 607 } else { 608 ShouldNotReachHere(); 609 } 610 break; 611 } 612 613 default: 614 ShouldNotReachHere(); 615 } 616 } 617 618 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 619 assert(src->is_constant(), "should not call otherwise"); 620 assert(dest->is_stack(), "should not call otherwise"); 621 LIR_Const* c = src->as_constant_ptr(); 622 623 switch (c->type()) { 624 case T_INT: // fall through 625 case T_FLOAT: 626 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 627 break; 628 629 case T_ADDRESS: 630 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 631 break; 632 633 case T_OBJECT: 634 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); 635 break; 636 637 case T_LONG: // fall through 638 case T_DOUBLE: 639 #ifdef _LP64 640 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 641 lo_word_offset_in_bytes), 642 (intptr_t)c->as_jlong_bits(), 643 rscratch1); 644 #else 645 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 646 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 647 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 648 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 649 #endif // _LP64 650 break; 651 652 default: 653 ShouldNotReachHere(); 654 } 655 } 656 657 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 658 assert(src->is_constant(), "should not call otherwise"); 659 assert(dest->is_address(), "should not call otherwise"); 660 LIR_Const* c = src->as_constant_ptr(); 661 LIR_Address* addr = dest->as_address_ptr(); 662 663 int null_check_here = code_offset(); 664 switch (type) { 665 case T_INT: // fall through 666 case T_FLOAT: 667 __ movl(as_Address(addr), c->as_jint_bits()); 668 break; 669 670 case T_ADDRESS: 671 __ movptr(as_Address(addr), c->as_jint_bits()); 672 break; 673 674 case T_OBJECT: // fall through 675 case T_ARRAY: 676 if (c->as_jobject() == nullptr) { 677 if (UseCompressedOops && !wide) { 678 __ movl(as_Address(addr), NULL_WORD); 679 } else { 680 #ifdef _LP64 681 __ xorptr(rscratch1, rscratch1); 682 null_check_here = code_offset(); 683 __ movptr(as_Address(addr), rscratch1); 684 #else 685 __ movptr(as_Address(addr), NULL_WORD); 686 #endif 687 } 688 } else { 689 if (is_literal_address(addr)) { 690 ShouldNotReachHere(); 691 __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1); 692 } else { 693 #ifdef _LP64 694 __ movoop(rscratch1, c->as_jobject()); 695 if (UseCompressedOops && !wide) { 696 __ encode_heap_oop(rscratch1); 697 null_check_here = code_offset(); 698 __ movl(as_Address_lo(addr), rscratch1); 699 } else { 700 null_check_here = code_offset(); 701 __ movptr(as_Address_lo(addr), rscratch1); 702 } 703 #else 704 __ movoop(as_Address(addr), c->as_jobject(), noreg); 705 #endif 706 } 707 } 708 break; 709 710 case T_LONG: // fall through 711 case T_DOUBLE: 712 #ifdef _LP64 713 if (is_literal_address(addr)) { 714 ShouldNotReachHere(); 715 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 716 } else { 717 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 718 null_check_here = code_offset(); 719 __ movptr(as_Address_lo(addr), r10); 720 } 721 #else 722 // Always reachable in 32bit so this doesn't produce useless move literal 723 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 724 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 725 #endif // _LP64 726 break; 727 728 case T_BOOLEAN: // fall through 729 case T_BYTE: 730 __ movb(as_Address(addr), c->as_jint() & 0xFF); 731 break; 732 733 case T_CHAR: // fall through 734 case T_SHORT: 735 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 736 break; 737 738 default: 739 ShouldNotReachHere(); 740 }; 741 742 if (info != nullptr) { 743 add_debug_info_for_null_check(null_check_here, info); 744 } 745 } 746 747 748 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 749 assert(src->is_register(), "should not call otherwise"); 750 assert(dest->is_register(), "should not call otherwise"); 751 752 // move between cpu-registers 753 if (dest->is_single_cpu()) { 754 #ifdef _LP64 755 if (src->type() == T_LONG) { 756 // Can do LONG -> OBJECT 757 move_regs(src->as_register_lo(), dest->as_register()); 758 return; 759 } 760 #endif 761 assert(src->is_single_cpu(), "must match"); 762 if (src->type() == T_OBJECT) { 763 __ verify_oop(src->as_register()); 764 } 765 move_regs(src->as_register(), dest->as_register()); 766 767 } else if (dest->is_double_cpu()) { 768 #ifdef _LP64 769 if (is_reference_type(src->type())) { 770 // Surprising to me but we can see move of a long to t_object 771 __ verify_oop(src->as_register()); 772 move_regs(src->as_register(), dest->as_register_lo()); 773 return; 774 } 775 #endif 776 assert(src->is_double_cpu(), "must match"); 777 Register f_lo = src->as_register_lo(); 778 Register f_hi = src->as_register_hi(); 779 Register t_lo = dest->as_register_lo(); 780 Register t_hi = dest->as_register_hi(); 781 #ifdef _LP64 782 assert(f_hi == f_lo, "must be same"); 783 assert(t_hi == t_lo, "must be same"); 784 move_regs(f_lo, t_lo); 785 #else 786 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 787 788 789 if (f_lo == t_hi && f_hi == t_lo) { 790 swap_reg(f_lo, f_hi); 791 } else if (f_hi == t_lo) { 792 assert(f_lo != t_hi, "overwriting register"); 793 move_regs(f_hi, t_hi); 794 move_regs(f_lo, t_lo); 795 } else { 796 assert(f_hi != t_lo, "overwriting register"); 797 move_regs(f_lo, t_lo); 798 move_regs(f_hi, t_hi); 799 } 800 #endif // LP64 801 802 // move between xmm-registers 803 } else if (dest->is_single_xmm()) { 804 assert(src->is_single_xmm(), "must match"); 805 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 806 } else if (dest->is_double_xmm()) { 807 assert(src->is_double_xmm(), "must match"); 808 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 809 810 } else { 811 ShouldNotReachHere(); 812 } 813 } 814 815 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 816 assert(src->is_register(), "should not call otherwise"); 817 assert(dest->is_stack(), "should not call otherwise"); 818 819 if (src->is_single_cpu()) { 820 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 821 if (is_reference_type(type)) { 822 __ verify_oop(src->as_register()); 823 __ movptr (dst, src->as_register()); 824 } else if (type == T_METADATA || type == T_ADDRESS) { 825 __ movptr (dst, src->as_register()); 826 } else { 827 __ movl (dst, src->as_register()); 828 } 829 830 } else if (src->is_double_cpu()) { 831 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 832 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 833 __ movptr (dstLO, src->as_register_lo()); 834 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 835 836 } else if (src->is_single_xmm()) { 837 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 838 __ movflt(dst_addr, src->as_xmm_float_reg()); 839 840 } else if (src->is_double_xmm()) { 841 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 842 __ movdbl(dst_addr, src->as_xmm_double_reg()); 843 844 } else { 845 ShouldNotReachHere(); 846 } 847 } 848 849 850 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 851 LIR_Address* to_addr = dest->as_address_ptr(); 852 PatchingStub* patch = nullptr; 853 Register compressed_src = rscratch1; 854 855 if (is_reference_type(type)) { 856 __ verify_oop(src->as_register()); 857 #ifdef _LP64 858 if (UseCompressedOops && !wide) { 859 __ movptr(compressed_src, src->as_register()); 860 __ encode_heap_oop(compressed_src); 861 if (patch_code != lir_patch_none) { 862 info->oop_map()->set_narrowoop(compressed_src->as_VMReg()); 863 } 864 } 865 #endif 866 } 867 868 if (patch_code != lir_patch_none) { 869 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 870 Address toa = as_Address(to_addr); 871 assert(toa.disp() != 0, "must have"); 872 } 873 874 int null_check_here = code_offset(); 875 switch (type) { 876 case T_FLOAT: { 877 assert(src->is_single_xmm(), "not a float"); 878 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 879 break; 880 } 881 882 case T_DOUBLE: { 883 assert(src->is_double_xmm(), "not a double"); 884 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 885 break; 886 } 887 888 case T_ARRAY: // fall through 889 case T_OBJECT: // fall through 890 if (UseCompressedOops && !wide) { 891 __ movl(as_Address(to_addr), compressed_src); 892 } else { 893 __ movptr(as_Address(to_addr), src->as_register()); 894 } 895 break; 896 case T_METADATA: 897 // We get here to store a method pointer to the stack to pass to 898 // a dtrace runtime call. This can't work on 64 bit with 899 // compressed klass ptrs: T_METADATA can be a compressed klass 900 // ptr or a 64 bit method pointer. 901 LP64_ONLY(ShouldNotReachHere()); 902 __ movptr(as_Address(to_addr), src->as_register()); 903 break; 904 case T_ADDRESS: 905 __ movptr(as_Address(to_addr), src->as_register()); 906 break; 907 case T_INT: 908 __ movl(as_Address(to_addr), src->as_register()); 909 break; 910 911 case T_LONG: { 912 Register from_lo = src->as_register_lo(); 913 Register from_hi = src->as_register_hi(); 914 #ifdef _LP64 915 __ movptr(as_Address_lo(to_addr), from_lo); 916 #else 917 Register base = to_addr->base()->as_register(); 918 Register index = noreg; 919 if (to_addr->index()->is_register()) { 920 index = to_addr->index()->as_register(); 921 } 922 if (base == from_lo || index == from_lo) { 923 assert(base != from_hi, "can't be"); 924 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 925 __ movl(as_Address_hi(to_addr), from_hi); 926 if (patch != nullptr) { 927 patching_epilog(patch, lir_patch_high, base, info); 928 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 929 patch_code = lir_patch_low; 930 } 931 __ movl(as_Address_lo(to_addr), from_lo); 932 } else { 933 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 934 __ movl(as_Address_lo(to_addr), from_lo); 935 if (patch != nullptr) { 936 patching_epilog(patch, lir_patch_low, base, info); 937 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 938 patch_code = lir_patch_high; 939 } 940 __ movl(as_Address_hi(to_addr), from_hi); 941 } 942 #endif // _LP64 943 break; 944 } 945 946 case T_BYTE: // fall through 947 case T_BOOLEAN: { 948 Register src_reg = src->as_register(); 949 Address dst_addr = as_Address(to_addr); 950 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 951 __ movb(dst_addr, src_reg); 952 break; 953 } 954 955 case T_CHAR: // fall through 956 case T_SHORT: 957 __ movw(as_Address(to_addr), src->as_register()); 958 break; 959 960 default: 961 ShouldNotReachHere(); 962 } 963 if (info != nullptr) { 964 add_debug_info_for_null_check(null_check_here, info); 965 } 966 967 if (patch_code != lir_patch_none) { 968 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 969 } 970 } 971 972 973 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 974 assert(src->is_stack(), "should not call otherwise"); 975 assert(dest->is_register(), "should not call otherwise"); 976 977 if (dest->is_single_cpu()) { 978 if (is_reference_type(type)) { 979 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 980 __ verify_oop(dest->as_register()); 981 } else if (type == T_METADATA || type == T_ADDRESS) { 982 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 983 } else { 984 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 985 } 986 987 } else if (dest->is_double_cpu()) { 988 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 989 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 990 __ movptr(dest->as_register_lo(), src_addr_LO); 991 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 992 993 } else if (dest->is_single_xmm()) { 994 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 995 __ movflt(dest->as_xmm_float_reg(), src_addr); 996 997 } else if (dest->is_double_xmm()) { 998 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 999 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1000 1001 } else { 1002 ShouldNotReachHere(); 1003 } 1004 } 1005 1006 1007 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1008 if (src->is_single_stack()) { 1009 if (is_reference_type(type)) { 1010 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1011 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1012 } else { 1013 #ifndef _LP64 1014 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1015 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1016 #else 1017 //no pushl on 64bits 1018 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1019 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1020 #endif 1021 } 1022 1023 } else if (src->is_double_stack()) { 1024 #ifdef _LP64 1025 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1026 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1027 #else 1028 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1029 // push and pop the part at src + wordSize, adding wordSize for the previous push 1030 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1031 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1032 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1033 #endif // _LP64 1034 1035 } else { 1036 ShouldNotReachHere(); 1037 } 1038 } 1039 1040 1041 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) { 1042 assert(src->is_address(), "should not call otherwise"); 1043 assert(dest->is_register(), "should not call otherwise"); 1044 1045 LIR_Address* addr = src->as_address_ptr(); 1046 Address from_addr = as_Address(addr); 1047 1048 if (addr->base()->type() == T_OBJECT) { 1049 __ verify_oop(addr->base()->as_pointer_register()); 1050 } 1051 1052 switch (type) { 1053 case T_BOOLEAN: // fall through 1054 case T_BYTE: // fall through 1055 case T_CHAR: // fall through 1056 case T_SHORT: 1057 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1058 // on pre P6 processors we may get partial register stalls 1059 // so blow away the value of to_rinfo before loading a 1060 // partial word into it. Do it here so that it precedes 1061 // the potential patch point below. 1062 __ xorptr(dest->as_register(), dest->as_register()); 1063 } 1064 break; 1065 default: 1066 break; 1067 } 1068 1069 PatchingStub* patch = nullptr; 1070 if (patch_code != lir_patch_none) { 1071 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1072 assert(from_addr.disp() != 0, "must have"); 1073 } 1074 if (info != nullptr) { 1075 add_debug_info_for_null_check_here(info); 1076 } 1077 1078 switch (type) { 1079 case T_FLOAT: { 1080 if (dest->is_single_xmm()) { 1081 __ movflt(dest->as_xmm_float_reg(), from_addr); 1082 } else { 1083 ShouldNotReachHere(); 1084 } 1085 break; 1086 } 1087 1088 case T_DOUBLE: { 1089 if (dest->is_double_xmm()) { 1090 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1091 } else { 1092 ShouldNotReachHere(); 1093 } 1094 break; 1095 } 1096 1097 case T_OBJECT: // fall through 1098 case T_ARRAY: // fall through 1099 if (UseCompressedOops && !wide) { 1100 __ movl(dest->as_register(), from_addr); 1101 } else { 1102 __ movptr(dest->as_register(), from_addr); 1103 } 1104 break; 1105 1106 case T_ADDRESS: 1107 __ movptr(dest->as_register(), from_addr); 1108 break; 1109 case T_INT: 1110 __ movl(dest->as_register(), from_addr); 1111 break; 1112 1113 case T_LONG: { 1114 Register to_lo = dest->as_register_lo(); 1115 Register to_hi = dest->as_register_hi(); 1116 #ifdef _LP64 1117 __ movptr(to_lo, as_Address_lo(addr)); 1118 #else 1119 Register base = addr->base()->as_register(); 1120 Register index = noreg; 1121 if (addr->index()->is_register()) { 1122 index = addr->index()->as_register(); 1123 } 1124 if ((base == to_lo && index == to_hi) || 1125 (base == to_hi && index == to_lo)) { 1126 // addresses with 2 registers are only formed as a result of 1127 // array access so this code will never have to deal with 1128 // patches or null checks. 1129 assert(info == nullptr && patch == nullptr, "must be"); 1130 __ lea(to_hi, as_Address(addr)); 1131 __ movl(to_lo, Address(to_hi, 0)); 1132 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1133 } else if (base == to_lo || index == to_lo) { 1134 assert(base != to_hi, "can't be"); 1135 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1136 __ movl(to_hi, as_Address_hi(addr)); 1137 if (patch != nullptr) { 1138 patching_epilog(patch, lir_patch_high, base, info); 1139 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1140 patch_code = lir_patch_low; 1141 } 1142 __ movl(to_lo, as_Address_lo(addr)); 1143 } else { 1144 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1145 __ movl(to_lo, as_Address_lo(addr)); 1146 if (patch != nullptr) { 1147 patching_epilog(patch, lir_patch_low, base, info); 1148 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1149 patch_code = lir_patch_high; 1150 } 1151 __ movl(to_hi, as_Address_hi(addr)); 1152 } 1153 #endif // _LP64 1154 break; 1155 } 1156 1157 case T_BOOLEAN: // fall through 1158 case T_BYTE: { 1159 Register dest_reg = dest->as_register(); 1160 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1161 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1162 __ movsbl(dest_reg, from_addr); 1163 } else { 1164 __ movb(dest_reg, from_addr); 1165 __ shll(dest_reg, 24); 1166 __ sarl(dest_reg, 24); 1167 } 1168 break; 1169 } 1170 1171 case T_CHAR: { 1172 Register dest_reg = dest->as_register(); 1173 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1174 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1175 __ movzwl(dest_reg, from_addr); 1176 } else { 1177 __ movw(dest_reg, from_addr); 1178 } 1179 break; 1180 } 1181 1182 case T_SHORT: { 1183 Register dest_reg = dest->as_register(); 1184 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1185 __ movswl(dest_reg, from_addr); 1186 } else { 1187 __ movw(dest_reg, from_addr); 1188 __ shll(dest_reg, 16); 1189 __ sarl(dest_reg, 16); 1190 } 1191 break; 1192 } 1193 1194 default: 1195 ShouldNotReachHere(); 1196 } 1197 1198 if (patch != nullptr) { 1199 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1200 } 1201 1202 if (is_reference_type(type)) { 1203 #ifdef _LP64 1204 if (UseCompressedOops && !wide) { 1205 __ decode_heap_oop(dest->as_register()); 1206 } 1207 #endif 1208 1209 __ verify_oop(dest->as_register()); 1210 } 1211 } 1212 1213 1214 NEEDS_CLEANUP; // This could be static? 1215 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1216 int elem_size = type2aelembytes(type); 1217 switch (elem_size) { 1218 case 1: return Address::times_1; 1219 case 2: return Address::times_2; 1220 case 4: return Address::times_4; 1221 case 8: return Address::times_8; 1222 } 1223 ShouldNotReachHere(); 1224 return Address::no_scale; 1225 } 1226 1227 1228 void LIR_Assembler::emit_op3(LIR_Op3* op) { 1229 switch (op->code()) { 1230 case lir_idiv: 1231 case lir_irem: 1232 arithmetic_idiv(op->code(), 1233 op->in_opr1(), 1234 op->in_opr2(), 1235 op->in_opr3(), 1236 op->result_opr(), 1237 op->info()); 1238 break; 1239 case lir_fmad: 1240 __ fmad(op->result_opr()->as_xmm_double_reg(), 1241 op->in_opr1()->as_xmm_double_reg(), 1242 op->in_opr2()->as_xmm_double_reg(), 1243 op->in_opr3()->as_xmm_double_reg()); 1244 break; 1245 case lir_fmaf: 1246 __ fmaf(op->result_opr()->as_xmm_float_reg(), 1247 op->in_opr1()->as_xmm_float_reg(), 1248 op->in_opr2()->as_xmm_float_reg(), 1249 op->in_opr3()->as_xmm_float_reg()); 1250 break; 1251 default: ShouldNotReachHere(); break; 1252 } 1253 } 1254 1255 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1256 #ifdef ASSERT 1257 assert(op->block() == nullptr || op->block()->label() == op->label(), "wrong label"); 1258 if (op->block() != nullptr) _branch_target_blocks.append(op->block()); 1259 if (op->ublock() != nullptr) _branch_target_blocks.append(op->ublock()); 1260 #endif 1261 1262 if (op->cond() == lir_cond_always) { 1263 if (op->info() != nullptr) add_debug_info_for_branch(op->info()); 1264 __ jmp (*(op->label())); 1265 } else { 1266 Assembler::Condition acond = Assembler::zero; 1267 if (op->code() == lir_cond_float_branch) { 1268 assert(op->ublock() != nullptr, "must have unordered successor"); 1269 __ jcc(Assembler::parity, *(op->ublock()->label())); 1270 switch(op->cond()) { 1271 case lir_cond_equal: acond = Assembler::equal; break; 1272 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1273 case lir_cond_less: acond = Assembler::below; break; 1274 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1275 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1276 case lir_cond_greater: acond = Assembler::above; break; 1277 default: ShouldNotReachHere(); 1278 } 1279 } else { 1280 switch (op->cond()) { 1281 case lir_cond_equal: acond = Assembler::equal; break; 1282 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1283 case lir_cond_less: acond = Assembler::less; break; 1284 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1285 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1286 case lir_cond_greater: acond = Assembler::greater; break; 1287 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1288 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1289 default: ShouldNotReachHere(); 1290 } 1291 } 1292 __ jcc(acond,*(op->label())); 1293 } 1294 } 1295 1296 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1297 LIR_Opr src = op->in_opr(); 1298 LIR_Opr dest = op->result_opr(); 1299 1300 switch (op->bytecode()) { 1301 case Bytecodes::_i2l: 1302 #ifdef _LP64 1303 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1304 #else 1305 move_regs(src->as_register(), dest->as_register_lo()); 1306 move_regs(src->as_register(), dest->as_register_hi()); 1307 __ sarl(dest->as_register_hi(), 31); 1308 #endif // LP64 1309 break; 1310 1311 case Bytecodes::_l2i: 1312 #ifdef _LP64 1313 __ movl(dest->as_register(), src->as_register_lo()); 1314 #else 1315 move_regs(src->as_register_lo(), dest->as_register()); 1316 #endif 1317 break; 1318 1319 case Bytecodes::_i2b: 1320 move_regs(src->as_register(), dest->as_register()); 1321 __ sign_extend_byte(dest->as_register()); 1322 break; 1323 1324 case Bytecodes::_i2c: 1325 move_regs(src->as_register(), dest->as_register()); 1326 __ andl(dest->as_register(), 0xFFFF); 1327 break; 1328 1329 case Bytecodes::_i2s: 1330 move_regs(src->as_register(), dest->as_register()); 1331 __ sign_extend_short(dest->as_register()); 1332 break; 1333 1334 case Bytecodes::_f2d: 1335 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1336 break; 1337 1338 case Bytecodes::_d2f: 1339 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1340 break; 1341 1342 case Bytecodes::_i2f: 1343 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1344 break; 1345 1346 case Bytecodes::_i2d: 1347 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1348 break; 1349 1350 case Bytecodes::_l2f: 1351 __ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo()); 1352 break; 1353 1354 case Bytecodes::_l2d: 1355 __ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo()); 1356 break; 1357 1358 case Bytecodes::_f2i: 1359 __ convert_f2i(dest->as_register(), src->as_xmm_float_reg()); 1360 break; 1361 1362 case Bytecodes::_d2i: 1363 __ convert_d2i(dest->as_register(), src->as_xmm_double_reg()); 1364 break; 1365 1366 case Bytecodes::_f2l: 1367 __ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg()); 1368 break; 1369 1370 case Bytecodes::_d2l: 1371 __ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg()); 1372 break; 1373 1374 default: ShouldNotReachHere(); 1375 } 1376 } 1377 1378 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1379 if (op->init_check()) { 1380 add_debug_info_for_null_check_here(op->stub()->info()); 1381 // init_state needs acquire, but x86 is TSO, and so we are already good. 1382 __ cmpb(Address(op->klass()->as_register(), 1383 InstanceKlass::init_state_offset()), 1384 InstanceKlass::fully_initialized); 1385 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1386 } 1387 __ allocate_object(op->obj()->as_register(), 1388 op->tmp1()->as_register(), 1389 op->tmp2()->as_register(), 1390 op->header_size(), 1391 op->object_size(), 1392 op->klass()->as_register(), 1393 *op->stub()->entry()); 1394 __ bind(*op->stub()->continuation()); 1395 } 1396 1397 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1398 Register len = op->len()->as_register(); 1399 LP64_ONLY( __ movslq(len, len); ) 1400 1401 if (UseSlowPath || 1402 (!UseFastNewObjectArray && is_reference_type(op->type())) || 1403 (!UseFastNewTypeArray && !is_reference_type(op->type()))) { 1404 __ jmp(*op->stub()->entry()); 1405 } else { 1406 Register tmp1 = op->tmp1()->as_register(); 1407 Register tmp2 = op->tmp2()->as_register(); 1408 Register tmp3 = op->tmp3()->as_register(); 1409 if (len == tmp1) { 1410 tmp1 = tmp3; 1411 } else if (len == tmp2) { 1412 tmp2 = tmp3; 1413 } else if (len == tmp3) { 1414 // everything is ok 1415 } else { 1416 __ mov(tmp3, len); 1417 } 1418 __ allocate_array(op->obj()->as_register(), 1419 len, 1420 tmp1, 1421 tmp2, 1422 arrayOopDesc::base_offset_in_bytes(op->type()), 1423 array_element_size(op->type()), 1424 op->klass()->as_register(), 1425 *op->stub()->entry(), 1426 op->zero_array()); 1427 } 1428 __ bind(*op->stub()->continuation()); 1429 } 1430 1431 void LIR_Assembler::type_profile_helper(Register mdo, 1432 ciMethodData *md, ciProfileData *data, 1433 Register recv, Label* update_done) { 1434 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1435 Label next_test; 1436 // See if the receiver is receiver[n]. 1437 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1438 __ jccb(Assembler::notEqual, next_test); 1439 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1440 __ addptr(data_addr, DataLayout::counter_increment); 1441 __ jmp(*update_done); 1442 __ bind(next_test); 1443 } 1444 1445 // Didn't find receiver; find next empty slot and fill it in 1446 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1447 Label next_test; 1448 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1449 __ cmpptr(recv_addr, NULL_WORD); 1450 __ jccb(Assembler::notEqual, next_test); 1451 __ movptr(recv_addr, recv); 1452 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1453 __ jmp(*update_done); 1454 __ bind(next_test); 1455 } 1456 } 1457 1458 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1459 // we always need a stub for the failure case. 1460 CodeStub* stub = op->stub(); 1461 Register obj = op->object()->as_register(); 1462 Register k_RInfo = op->tmp1()->as_register(); 1463 Register klass_RInfo = op->tmp2()->as_register(); 1464 Register dst = op->result_opr()->as_register(); 1465 ciKlass* k = op->klass(); 1466 Register Rtmp1 = noreg; 1467 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1468 1469 // check if it needs to be profiled 1470 ciMethodData* md = nullptr; 1471 ciProfileData* data = nullptr; 1472 1473 if (op->should_profile()) { 1474 ciMethod* method = op->profiled_method(); 1475 assert(method != nullptr, "Should have method"); 1476 int bci = op->profiled_bci(); 1477 md = method->method_data_or_null(); 1478 assert(md != nullptr, "Sanity"); 1479 data = md->bci_to_data(bci); 1480 assert(data != nullptr, "need data for type check"); 1481 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1482 } 1483 Label* success_target = success; 1484 Label* failure_target = failure; 1485 1486 if (obj == k_RInfo) { 1487 k_RInfo = dst; 1488 } else if (obj == klass_RInfo) { 1489 klass_RInfo = dst; 1490 } 1491 if (k->is_loaded() && !UseCompressedClassPointers) { 1492 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1493 } else { 1494 Rtmp1 = op->tmp3()->as_register(); 1495 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1496 } 1497 1498 assert_different_registers(obj, k_RInfo, klass_RInfo); 1499 1500 __ testptr(obj, obj); 1501 if (op->should_profile()) { 1502 Label not_null; 1503 Register mdo = klass_RInfo; 1504 __ mov_metadata(mdo, md->constant_encoding()); 1505 __ jccb(Assembler::notEqual, not_null); 1506 // Object is null; update MDO and exit 1507 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1508 int header_bits = BitData::null_seen_byte_constant(); 1509 __ orb(data_addr, header_bits); 1510 __ jmp(*obj_is_null); 1511 __ bind(not_null); 1512 1513 Label update_done; 1514 Register recv = k_RInfo; 1515 __ load_klass(recv, obj, tmp_load_klass); 1516 type_profile_helper(mdo, md, data, recv, &update_done); 1517 1518 Address nonprofiled_receiver_count_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1519 __ addptr(nonprofiled_receiver_count_addr, DataLayout::counter_increment); 1520 1521 __ bind(update_done); 1522 } else { 1523 __ jcc(Assembler::equal, *obj_is_null); 1524 } 1525 1526 if (!k->is_loaded()) { 1527 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1528 } else { 1529 #ifdef _LP64 1530 __ mov_metadata(k_RInfo, k->constant_encoding()); 1531 #endif // _LP64 1532 } 1533 __ verify_oop(obj); 1534 1535 if (op->fast_check()) { 1536 // get object class 1537 // not a safepoint as obj null check happens earlier 1538 #ifdef _LP64 1539 if (UseCompressedClassPointers) { 1540 __ load_klass(Rtmp1, obj, tmp_load_klass); 1541 __ cmpptr(k_RInfo, Rtmp1); 1542 } else { 1543 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1544 } 1545 #else 1546 if (k->is_loaded()) { 1547 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1548 } else { 1549 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1550 } 1551 #endif 1552 __ jcc(Assembler::notEqual, *failure_target); 1553 // successful cast, fall through to profile or jump 1554 } else { 1555 // get object class 1556 // not a safepoint as obj null check happens earlier 1557 __ load_klass(klass_RInfo, obj, tmp_load_klass); 1558 if (k->is_loaded()) { 1559 // See if we get an immediate positive hit 1560 #ifdef _LP64 1561 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1562 #else 1563 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1564 #endif // _LP64 1565 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1566 __ jcc(Assembler::notEqual, *failure_target); 1567 // successful cast, fall through to profile or jump 1568 } else { 1569 // See if we get an immediate positive hit 1570 __ jcc(Assembler::equal, *success_target); 1571 // check for self 1572 #ifdef _LP64 1573 __ cmpptr(klass_RInfo, k_RInfo); 1574 #else 1575 __ cmpklass(klass_RInfo, k->constant_encoding()); 1576 #endif // _LP64 1577 __ jcc(Assembler::equal, *success_target); 1578 1579 __ push(klass_RInfo); 1580 #ifdef _LP64 1581 __ push(k_RInfo); 1582 #else 1583 __ pushklass(k->constant_encoding(), noreg); 1584 #endif // _LP64 1585 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1586 __ pop(klass_RInfo); 1587 __ pop(klass_RInfo); 1588 // result is a boolean 1589 __ testl(klass_RInfo, klass_RInfo); 1590 __ jcc(Assembler::equal, *failure_target); 1591 // successful cast, fall through to profile or jump 1592 } 1593 } else { 1594 // perform the fast part of the checking logic 1595 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1596 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1597 __ push(klass_RInfo); 1598 __ push(k_RInfo); 1599 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1600 __ pop(klass_RInfo); 1601 __ pop(k_RInfo); 1602 // result is a boolean 1603 __ testl(k_RInfo, k_RInfo); 1604 __ jcc(Assembler::equal, *failure_target); 1605 // successful cast, fall through to profile or jump 1606 } 1607 } 1608 __ jmp(*success); 1609 } 1610 1611 1612 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1613 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 1614 LIR_Code code = op->code(); 1615 if (code == lir_store_check) { 1616 Register value = op->object()->as_register(); 1617 Register array = op->array()->as_register(); 1618 Register k_RInfo = op->tmp1()->as_register(); 1619 Register klass_RInfo = op->tmp2()->as_register(); 1620 Register Rtmp1 = op->tmp3()->as_register(); 1621 1622 CodeStub* stub = op->stub(); 1623 1624 // check if it needs to be profiled 1625 ciMethodData* md = nullptr; 1626 ciProfileData* data = nullptr; 1627 1628 if (op->should_profile()) { 1629 ciMethod* method = op->profiled_method(); 1630 assert(method != nullptr, "Should have method"); 1631 int bci = op->profiled_bci(); 1632 md = method->method_data_or_null(); 1633 assert(md != nullptr, "Sanity"); 1634 data = md->bci_to_data(bci); 1635 assert(data != nullptr, "need data for type check"); 1636 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1637 } 1638 Label done; 1639 Label* success_target = &done; 1640 Label* failure_target = stub->entry(); 1641 1642 __ testptr(value, value); 1643 if (op->should_profile()) { 1644 Label not_null; 1645 Register mdo = klass_RInfo; 1646 __ mov_metadata(mdo, md->constant_encoding()); 1647 __ jccb(Assembler::notEqual, not_null); 1648 // Object is null; update MDO and exit 1649 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset())); 1650 int header_bits = BitData::null_seen_byte_constant(); 1651 __ orb(data_addr, header_bits); 1652 __ jmp(done); 1653 __ bind(not_null); 1654 1655 Label update_done; 1656 Register recv = k_RInfo; 1657 __ load_klass(recv, value, tmp_load_klass); 1658 type_profile_helper(mdo, md, data, recv, &update_done); 1659 1660 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1661 __ addptr(counter_addr, DataLayout::counter_increment); 1662 __ bind(update_done); 1663 } else { 1664 __ jcc(Assembler::equal, done); 1665 } 1666 1667 add_debug_info_for_null_check_here(op->info_for_exception()); 1668 __ load_klass(k_RInfo, array, tmp_load_klass); 1669 __ load_klass(klass_RInfo, value, tmp_load_klass); 1670 1671 // get instance klass (it's already uncompressed) 1672 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1673 // perform the fast part of the checking logic 1674 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); 1675 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1676 __ push(klass_RInfo); 1677 __ push(k_RInfo); 1678 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 1679 __ pop(klass_RInfo); 1680 __ pop(k_RInfo); 1681 // result is a boolean 1682 __ testl(k_RInfo, k_RInfo); 1683 __ jcc(Assembler::equal, *failure_target); 1684 // fall through to the success case 1685 1686 __ bind(done); 1687 } else 1688 if (code == lir_checkcast) { 1689 Register obj = op->object()->as_register(); 1690 Register dst = op->result_opr()->as_register(); 1691 Label success; 1692 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1693 __ bind(success); 1694 if (dst != obj) { 1695 __ mov(dst, obj); 1696 } 1697 } else 1698 if (code == lir_instanceof) { 1699 Register obj = op->object()->as_register(); 1700 Register dst = op->result_opr()->as_register(); 1701 Label success, failure, done; 1702 emit_typecheck_helper(op, &success, &failure, &failure); 1703 __ bind(failure); 1704 __ xorptr(dst, dst); 1705 __ jmpb(done); 1706 __ bind(success); 1707 __ movptr(dst, 1); 1708 __ bind(done); 1709 } else { 1710 ShouldNotReachHere(); 1711 } 1712 1713 } 1714 1715 1716 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1717 if (LP64_ONLY(false &&) op->code() == lir_cas_long) { 1718 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 1719 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 1720 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 1721 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 1722 Register addr = op->addr()->as_register(); 1723 __ lock(); 1724 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 1725 1726 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 1727 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 1728 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 1729 Register newval = op->new_value()->as_register(); 1730 Register cmpval = op->cmp_value()->as_register(); 1731 assert(cmpval == rax, "wrong register"); 1732 assert(newval != noreg, "new val must be register"); 1733 assert(cmpval != newval, "cmp and new values must be in different registers"); 1734 assert(cmpval != addr, "cmp and addr must be in different registers"); 1735 assert(newval != addr, "new value and addr must be in different registers"); 1736 1737 if ( op->code() == lir_cas_obj) { 1738 #ifdef _LP64 1739 if (UseCompressedOops) { 1740 __ encode_heap_oop(cmpval); 1741 __ mov(rscratch1, newval); 1742 __ encode_heap_oop(rscratch1); 1743 __ lock(); 1744 // cmpval (rax) is implicitly used by this instruction 1745 __ cmpxchgl(rscratch1, Address(addr, 0)); 1746 } else 1747 #endif 1748 { 1749 __ lock(); 1750 __ cmpxchgptr(newval, Address(addr, 0)); 1751 } 1752 } else { 1753 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 1754 __ lock(); 1755 __ cmpxchgl(newval, Address(addr, 0)); 1756 } 1757 #ifdef _LP64 1758 } else if (op->code() == lir_cas_long) { 1759 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 1760 Register newval = op->new_value()->as_register_lo(); 1761 Register cmpval = op->cmp_value()->as_register_lo(); 1762 assert(cmpval == rax, "wrong register"); 1763 assert(newval != noreg, "new val must be register"); 1764 assert(cmpval != newval, "cmp and new values must be in different registers"); 1765 assert(cmpval != addr, "cmp and addr must be in different registers"); 1766 assert(newval != addr, "new value and addr must be in different registers"); 1767 __ lock(); 1768 __ cmpxchgq(newval, Address(addr, 0)); 1769 #endif // _LP64 1770 } else { 1771 Unimplemented(); 1772 } 1773 } 1774 1775 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type, 1776 LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) { 1777 assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86"); 1778 1779 Assembler::Condition acond, ncond; 1780 switch (condition) { 1781 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 1782 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 1783 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 1784 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 1785 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 1786 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 1787 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 1788 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 1789 default: acond = Assembler::equal; ncond = Assembler::notEqual; 1790 ShouldNotReachHere(); 1791 } 1792 1793 if (opr1->is_cpu_register()) { 1794 reg2reg(opr1, result); 1795 } else if (opr1->is_stack()) { 1796 stack2reg(opr1, result, result->type()); 1797 } else if (opr1->is_constant()) { 1798 const2reg(opr1, result, lir_patch_none, nullptr); 1799 } else { 1800 ShouldNotReachHere(); 1801 } 1802 1803 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 1804 // optimized version that does not require a branch 1805 if (opr2->is_single_cpu()) { 1806 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1807 __ cmov(ncond, result->as_register(), opr2->as_register()); 1808 } else if (opr2->is_double_cpu()) { 1809 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1810 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1811 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 1812 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 1813 } else if (opr2->is_single_stack()) { 1814 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 1815 } else if (opr2->is_double_stack()) { 1816 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 1817 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 1818 } else { 1819 ShouldNotReachHere(); 1820 } 1821 1822 } else { 1823 Label skip; 1824 __ jccb(acond, skip); 1825 if (opr2->is_cpu_register()) { 1826 reg2reg(opr2, result); 1827 } else if (opr2->is_stack()) { 1828 stack2reg(opr2, result, result->type()); 1829 } else if (opr2->is_constant()) { 1830 const2reg(opr2, result, lir_patch_none, nullptr); 1831 } else { 1832 ShouldNotReachHere(); 1833 } 1834 __ bind(skip); 1835 } 1836 } 1837 1838 1839 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info) { 1840 assert(info == nullptr, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1841 1842 if (left->is_single_cpu()) { 1843 assert(left == dest, "left and dest must be equal"); 1844 Register lreg = left->as_register(); 1845 1846 if (right->is_single_cpu()) { 1847 // cpu register - cpu register 1848 Register rreg = right->as_register(); 1849 switch (code) { 1850 case lir_add: __ addl (lreg, rreg); break; 1851 case lir_sub: __ subl (lreg, rreg); break; 1852 case lir_mul: __ imull(lreg, rreg); break; 1853 default: ShouldNotReachHere(); 1854 } 1855 1856 } else if (right->is_stack()) { 1857 // cpu register - stack 1858 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1859 switch (code) { 1860 case lir_add: __ addl(lreg, raddr); break; 1861 case lir_sub: __ subl(lreg, raddr); break; 1862 default: ShouldNotReachHere(); 1863 } 1864 1865 } else if (right->is_constant()) { 1866 // cpu register - constant 1867 jint c = right->as_constant_ptr()->as_jint(); 1868 switch (code) { 1869 case lir_add: { 1870 __ incrementl(lreg, c); 1871 break; 1872 } 1873 case lir_sub: { 1874 __ decrementl(lreg, c); 1875 break; 1876 } 1877 default: ShouldNotReachHere(); 1878 } 1879 1880 } else { 1881 ShouldNotReachHere(); 1882 } 1883 1884 } else if (left->is_double_cpu()) { 1885 assert(left == dest, "left and dest must be equal"); 1886 Register lreg_lo = left->as_register_lo(); 1887 Register lreg_hi = left->as_register_hi(); 1888 1889 if (right->is_double_cpu()) { 1890 // cpu register - cpu register 1891 Register rreg_lo = right->as_register_lo(); 1892 Register rreg_hi = right->as_register_hi(); 1893 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 1894 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 1895 switch (code) { 1896 case lir_add: 1897 __ addptr(lreg_lo, rreg_lo); 1898 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 1899 break; 1900 case lir_sub: 1901 __ subptr(lreg_lo, rreg_lo); 1902 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 1903 break; 1904 case lir_mul: 1905 #ifdef _LP64 1906 __ imulq(lreg_lo, rreg_lo); 1907 #else 1908 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 1909 __ imull(lreg_hi, rreg_lo); 1910 __ imull(rreg_hi, lreg_lo); 1911 __ addl (rreg_hi, lreg_hi); 1912 __ mull (rreg_lo); 1913 __ addl (lreg_hi, rreg_hi); 1914 #endif // _LP64 1915 break; 1916 default: 1917 ShouldNotReachHere(); 1918 } 1919 1920 } else if (right->is_constant()) { 1921 // cpu register - constant 1922 #ifdef _LP64 1923 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1924 __ movptr(r10, (intptr_t) c); 1925 switch (code) { 1926 case lir_add: 1927 __ addptr(lreg_lo, r10); 1928 break; 1929 case lir_sub: 1930 __ subptr(lreg_lo, r10); 1931 break; 1932 default: 1933 ShouldNotReachHere(); 1934 } 1935 #else 1936 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 1937 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 1938 switch (code) { 1939 case lir_add: 1940 __ addptr(lreg_lo, c_lo); 1941 __ adcl(lreg_hi, c_hi); 1942 break; 1943 case lir_sub: 1944 __ subptr(lreg_lo, c_lo); 1945 __ sbbl(lreg_hi, c_hi); 1946 break; 1947 default: 1948 ShouldNotReachHere(); 1949 } 1950 #endif // _LP64 1951 1952 } else { 1953 ShouldNotReachHere(); 1954 } 1955 1956 } else if (left->is_single_xmm()) { 1957 assert(left == dest, "left and dest must be equal"); 1958 XMMRegister lreg = left->as_xmm_float_reg(); 1959 1960 if (right->is_single_xmm()) { 1961 XMMRegister rreg = right->as_xmm_float_reg(); 1962 switch (code) { 1963 case lir_add: __ addss(lreg, rreg); break; 1964 case lir_sub: __ subss(lreg, rreg); break; 1965 case lir_mul: __ mulss(lreg, rreg); break; 1966 case lir_div: __ divss(lreg, rreg); break; 1967 default: ShouldNotReachHere(); 1968 } 1969 } else { 1970 Address raddr; 1971 if (right->is_single_stack()) { 1972 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1973 } else if (right->is_constant()) { 1974 // hack for now 1975 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 1976 } else { 1977 ShouldNotReachHere(); 1978 } 1979 switch (code) { 1980 case lir_add: __ addss(lreg, raddr); break; 1981 case lir_sub: __ subss(lreg, raddr); break; 1982 case lir_mul: __ mulss(lreg, raddr); break; 1983 case lir_div: __ divss(lreg, raddr); break; 1984 default: ShouldNotReachHere(); 1985 } 1986 } 1987 1988 } else if (left->is_double_xmm()) { 1989 assert(left == dest, "left and dest must be equal"); 1990 1991 XMMRegister lreg = left->as_xmm_double_reg(); 1992 if (right->is_double_xmm()) { 1993 XMMRegister rreg = right->as_xmm_double_reg(); 1994 switch (code) { 1995 case lir_add: __ addsd(lreg, rreg); break; 1996 case lir_sub: __ subsd(lreg, rreg); break; 1997 case lir_mul: __ mulsd(lreg, rreg); break; 1998 case lir_div: __ divsd(lreg, rreg); break; 1999 default: ShouldNotReachHere(); 2000 } 2001 } else { 2002 Address raddr; 2003 if (right->is_double_stack()) { 2004 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2005 } else if (right->is_constant()) { 2006 // hack for now 2007 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2008 } else { 2009 ShouldNotReachHere(); 2010 } 2011 switch (code) { 2012 case lir_add: __ addsd(lreg, raddr); break; 2013 case lir_sub: __ subsd(lreg, raddr); break; 2014 case lir_mul: __ mulsd(lreg, raddr); break; 2015 case lir_div: __ divsd(lreg, raddr); break; 2016 default: ShouldNotReachHere(); 2017 } 2018 } 2019 2020 } else if (left->is_single_stack() || left->is_address()) { 2021 assert(left == dest, "left and dest must be equal"); 2022 2023 Address laddr; 2024 if (left->is_single_stack()) { 2025 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2026 } else if (left->is_address()) { 2027 laddr = as_Address(left->as_address_ptr()); 2028 } else { 2029 ShouldNotReachHere(); 2030 } 2031 2032 if (right->is_single_cpu()) { 2033 Register rreg = right->as_register(); 2034 switch (code) { 2035 case lir_add: __ addl(laddr, rreg); break; 2036 case lir_sub: __ subl(laddr, rreg); break; 2037 default: ShouldNotReachHere(); 2038 } 2039 } else if (right->is_constant()) { 2040 jint c = right->as_constant_ptr()->as_jint(); 2041 switch (code) { 2042 case lir_add: { 2043 __ incrementl(laddr, c); 2044 break; 2045 } 2046 case lir_sub: { 2047 __ decrementl(laddr, c); 2048 break; 2049 } 2050 default: ShouldNotReachHere(); 2051 } 2052 } else { 2053 ShouldNotReachHere(); 2054 } 2055 2056 } else { 2057 ShouldNotReachHere(); 2058 } 2059 } 2060 2061 2062 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr tmp, LIR_Opr dest, LIR_Op* op) { 2063 if (value->is_double_xmm()) { 2064 switch(code) { 2065 case lir_abs : 2066 { 2067 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2068 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2069 } 2070 assert(!tmp->is_valid(), "do not need temporary"); 2071 __ andpd(dest->as_xmm_double_reg(), 2072 ExternalAddress((address)double_signmask_pool), 2073 rscratch1); 2074 } 2075 break; 2076 2077 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2078 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2079 default : ShouldNotReachHere(); 2080 } 2081 2082 } else if (code == lir_f2hf) { 2083 __ flt_to_flt16(dest->as_register(), value->as_xmm_float_reg(), tmp->as_xmm_float_reg()); 2084 } else if (code == lir_hf2f) { 2085 __ flt16_to_flt(dest->as_xmm_float_reg(), value->as_register()); 2086 } else { 2087 Unimplemented(); 2088 } 2089 } 2090 2091 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2092 // assert(left->destroys_register(), "check"); 2093 if (left->is_single_cpu()) { 2094 Register reg = left->as_register(); 2095 if (right->is_constant()) { 2096 int val = right->as_constant_ptr()->as_jint(); 2097 switch (code) { 2098 case lir_logic_and: __ andl (reg, val); break; 2099 case lir_logic_or: __ orl (reg, val); break; 2100 case lir_logic_xor: __ xorl (reg, val); break; 2101 default: ShouldNotReachHere(); 2102 } 2103 } else if (right->is_stack()) { 2104 // added support for stack operands 2105 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2106 switch (code) { 2107 case lir_logic_and: __ andl (reg, raddr); break; 2108 case lir_logic_or: __ orl (reg, raddr); break; 2109 case lir_logic_xor: __ xorl (reg, raddr); break; 2110 default: ShouldNotReachHere(); 2111 } 2112 } else { 2113 Register rright = right->as_register(); 2114 switch (code) { 2115 case lir_logic_and: __ andptr (reg, rright); break; 2116 case lir_logic_or : __ orptr (reg, rright); break; 2117 case lir_logic_xor: __ xorptr (reg, rright); break; 2118 default: ShouldNotReachHere(); 2119 } 2120 } 2121 move_regs(reg, dst->as_register()); 2122 } else { 2123 Register l_lo = left->as_register_lo(); 2124 Register l_hi = left->as_register_hi(); 2125 if (right->is_constant()) { 2126 #ifdef _LP64 2127 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2128 switch (code) { 2129 case lir_logic_and: 2130 __ andq(l_lo, rscratch1); 2131 break; 2132 case lir_logic_or: 2133 __ orq(l_lo, rscratch1); 2134 break; 2135 case lir_logic_xor: 2136 __ xorq(l_lo, rscratch1); 2137 break; 2138 default: ShouldNotReachHere(); 2139 } 2140 #else 2141 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2142 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2143 switch (code) { 2144 case lir_logic_and: 2145 __ andl(l_lo, r_lo); 2146 __ andl(l_hi, r_hi); 2147 break; 2148 case lir_logic_or: 2149 __ orl(l_lo, r_lo); 2150 __ orl(l_hi, r_hi); 2151 break; 2152 case lir_logic_xor: 2153 __ xorl(l_lo, r_lo); 2154 __ xorl(l_hi, r_hi); 2155 break; 2156 default: ShouldNotReachHere(); 2157 } 2158 #endif // _LP64 2159 } else { 2160 #ifdef _LP64 2161 Register r_lo; 2162 if (is_reference_type(right->type())) { 2163 r_lo = right->as_register(); 2164 } else { 2165 r_lo = right->as_register_lo(); 2166 } 2167 #else 2168 Register r_lo = right->as_register_lo(); 2169 Register r_hi = right->as_register_hi(); 2170 assert(l_lo != r_hi, "overwriting registers"); 2171 #endif 2172 switch (code) { 2173 case lir_logic_and: 2174 __ andptr(l_lo, r_lo); 2175 NOT_LP64(__ andptr(l_hi, r_hi);) 2176 break; 2177 case lir_logic_or: 2178 __ orptr(l_lo, r_lo); 2179 NOT_LP64(__ orptr(l_hi, r_hi);) 2180 break; 2181 case lir_logic_xor: 2182 __ xorptr(l_lo, r_lo); 2183 NOT_LP64(__ xorptr(l_hi, r_hi);) 2184 break; 2185 default: ShouldNotReachHere(); 2186 } 2187 } 2188 2189 Register dst_lo = dst->as_register_lo(); 2190 Register dst_hi = dst->as_register_hi(); 2191 2192 #ifdef _LP64 2193 move_regs(l_lo, dst_lo); 2194 #else 2195 if (dst_lo == l_hi) { 2196 assert(dst_hi != l_lo, "overwriting registers"); 2197 move_regs(l_hi, dst_hi); 2198 move_regs(l_lo, dst_lo); 2199 } else { 2200 assert(dst_lo != l_hi, "overwriting registers"); 2201 move_regs(l_lo, dst_lo); 2202 move_regs(l_hi, dst_hi); 2203 } 2204 #endif // _LP64 2205 } 2206 } 2207 2208 2209 // we assume that rax, and rdx can be overwritten 2210 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2211 2212 assert(left->is_single_cpu(), "left must be register"); 2213 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2214 assert(result->is_single_cpu(), "result must be register"); 2215 2216 // assert(left->destroys_register(), "check"); 2217 // assert(right->destroys_register(), "check"); 2218 2219 Register lreg = left->as_register(); 2220 Register dreg = result->as_register(); 2221 2222 if (right->is_constant()) { 2223 jint divisor = right->as_constant_ptr()->as_jint(); 2224 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2225 if (code == lir_idiv) { 2226 assert(lreg == rax, "must be rax,"); 2227 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2228 __ cdql(); // sign extend into rdx:rax 2229 if (divisor == 2) { 2230 __ subl(lreg, rdx); 2231 } else { 2232 __ andl(rdx, divisor - 1); 2233 __ addl(lreg, rdx); 2234 } 2235 __ sarl(lreg, log2i_exact(divisor)); 2236 move_regs(lreg, dreg); 2237 } else if (code == lir_irem) { 2238 Label done; 2239 __ mov(dreg, lreg); 2240 __ andl(dreg, 0x80000000 | (divisor - 1)); 2241 __ jcc(Assembler::positive, done); 2242 __ decrement(dreg); 2243 __ orl(dreg, ~(divisor - 1)); 2244 __ increment(dreg); 2245 __ bind(done); 2246 } else { 2247 ShouldNotReachHere(); 2248 } 2249 } else { 2250 Register rreg = right->as_register(); 2251 assert(lreg == rax, "left register must be rax,"); 2252 assert(rreg != rdx, "right register must not be rdx"); 2253 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2254 2255 move_regs(lreg, rax); 2256 2257 int idivl_offset = __ corrected_idivl(rreg); 2258 if (ImplicitDiv0Checks) { 2259 add_debug_info_for_div0(idivl_offset, info); 2260 } 2261 if (code == lir_irem) { 2262 move_regs(rdx, dreg); // result is in rdx 2263 } else { 2264 move_regs(rax, dreg); 2265 } 2266 } 2267 } 2268 2269 2270 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2271 if (opr1->is_single_cpu()) { 2272 Register reg1 = opr1->as_register(); 2273 if (opr2->is_single_cpu()) { 2274 // cpu register - cpu register 2275 if (is_reference_type(opr1->type())) { 2276 __ cmpoop(reg1, opr2->as_register()); 2277 } else { 2278 assert(!is_reference_type(opr2->type()), "cmp int, oop?"); 2279 __ cmpl(reg1, opr2->as_register()); 2280 } 2281 } else if (opr2->is_stack()) { 2282 // cpu register - stack 2283 if (is_reference_type(opr1->type())) { 2284 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2285 } else { 2286 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2287 } 2288 } else if (opr2->is_constant()) { 2289 // cpu register - constant 2290 LIR_Const* c = opr2->as_constant_ptr(); 2291 if (c->type() == T_INT) { 2292 jint i = c->as_jint(); 2293 if (i == 0) { 2294 __ testl(reg1, reg1); 2295 } else { 2296 __ cmpl(reg1, i); 2297 } 2298 } else if (c->type() == T_METADATA) { 2299 // All we need for now is a comparison with null for equality. 2300 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 2301 Metadata* m = c->as_metadata(); 2302 if (m == nullptr) { 2303 __ testptr(reg1, reg1); 2304 } else { 2305 ShouldNotReachHere(); 2306 } 2307 } else if (is_reference_type(c->type())) { 2308 // In 64bit oops are single register 2309 jobject o = c->as_jobject(); 2310 if (o == nullptr) { 2311 __ testptr(reg1, reg1); 2312 } else { 2313 __ cmpoop(reg1, o, rscratch1); 2314 } 2315 } else { 2316 fatal("unexpected type: %s", basictype_to_str(c->type())); 2317 } 2318 // cpu register - address 2319 } else if (opr2->is_address()) { 2320 if (op->info() != nullptr) { 2321 add_debug_info_for_null_check_here(op->info()); 2322 } 2323 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2324 } else { 2325 ShouldNotReachHere(); 2326 } 2327 2328 } else if(opr1->is_double_cpu()) { 2329 Register xlo = opr1->as_register_lo(); 2330 Register xhi = opr1->as_register_hi(); 2331 if (opr2->is_double_cpu()) { 2332 #ifdef _LP64 2333 __ cmpptr(xlo, opr2->as_register_lo()); 2334 #else 2335 // cpu register - cpu register 2336 Register ylo = opr2->as_register_lo(); 2337 Register yhi = opr2->as_register_hi(); 2338 __ subl(xlo, ylo); 2339 __ sbbl(xhi, yhi); 2340 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2341 __ orl(xhi, xlo); 2342 } 2343 #endif // _LP64 2344 } else if (opr2->is_constant()) { 2345 // cpu register - constant 0 2346 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2347 #ifdef _LP64 2348 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2349 #else 2350 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2351 __ orl(xhi, xlo); 2352 #endif // _LP64 2353 } else { 2354 ShouldNotReachHere(); 2355 } 2356 2357 } else if (opr1->is_single_xmm()) { 2358 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2359 if (opr2->is_single_xmm()) { 2360 // xmm register - xmm register 2361 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2362 } else if (opr2->is_stack()) { 2363 // xmm register - stack 2364 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2365 } else if (opr2->is_constant()) { 2366 // xmm register - constant 2367 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2368 } else if (opr2->is_address()) { 2369 // xmm register - address 2370 if (op->info() != nullptr) { 2371 add_debug_info_for_null_check_here(op->info()); 2372 } 2373 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2374 } else { 2375 ShouldNotReachHere(); 2376 } 2377 2378 } else if (opr1->is_double_xmm()) { 2379 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2380 if (opr2->is_double_xmm()) { 2381 // xmm register - xmm register 2382 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2383 } else if (opr2->is_stack()) { 2384 // xmm register - stack 2385 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2386 } else if (opr2->is_constant()) { 2387 // xmm register - constant 2388 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2389 } else if (opr2->is_address()) { 2390 // xmm register - address 2391 if (op->info() != nullptr) { 2392 add_debug_info_for_null_check_here(op->info()); 2393 } 2394 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2395 } else { 2396 ShouldNotReachHere(); 2397 } 2398 2399 } else if (opr1->is_address() && opr2->is_constant()) { 2400 LIR_Const* c = opr2->as_constant_ptr(); 2401 #ifdef _LP64 2402 if (is_reference_type(c->type())) { 2403 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2404 __ movoop(rscratch1, c->as_jobject()); 2405 } 2406 #endif // LP64 2407 if (op->info() != nullptr) { 2408 add_debug_info_for_null_check_here(op->info()); 2409 } 2410 // special case: address - constant 2411 LIR_Address* addr = opr1->as_address_ptr(); 2412 if (c->type() == T_INT) { 2413 __ cmpl(as_Address(addr), c->as_jint()); 2414 } else if (is_reference_type(c->type())) { 2415 #ifdef _LP64 2416 // %%% Make this explode if addr isn't reachable until we figure out a 2417 // better strategy by giving noreg as the temp for as_Address 2418 __ cmpoop(rscratch1, as_Address(addr, noreg)); 2419 #else 2420 __ cmpoop(as_Address(addr), c->as_jobject()); 2421 #endif // _LP64 2422 } else { 2423 ShouldNotReachHere(); 2424 } 2425 2426 } else { 2427 ShouldNotReachHere(); 2428 } 2429 } 2430 2431 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2432 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2433 if (left->is_single_xmm()) { 2434 assert(right->is_single_xmm(), "must match"); 2435 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2436 } else if (left->is_double_xmm()) { 2437 assert(right->is_double_xmm(), "must match"); 2438 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2439 2440 } else { 2441 ShouldNotReachHere(); 2442 } 2443 } else { 2444 assert(code == lir_cmp_l2i, "check"); 2445 #ifdef _LP64 2446 Label done; 2447 Register dest = dst->as_register(); 2448 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2449 __ movl(dest, -1); 2450 __ jccb(Assembler::less, done); 2451 __ setb(Assembler::notZero, dest); 2452 __ movzbl(dest, dest); 2453 __ bind(done); 2454 #else 2455 __ lcmp2int(left->as_register_hi(), 2456 left->as_register_lo(), 2457 right->as_register_hi(), 2458 right->as_register_lo()); 2459 move_regs(left->as_register_hi(), dst->as_register()); 2460 #endif // _LP64 2461 } 2462 } 2463 2464 2465 void LIR_Assembler::align_call(LIR_Code code) { 2466 // make sure that the displacement word of the call ends up word aligned 2467 int offset = __ offset(); 2468 switch (code) { 2469 case lir_static_call: 2470 case lir_optvirtual_call: 2471 case lir_dynamic_call: 2472 offset += NativeCall::displacement_offset; 2473 break; 2474 case lir_icvirtual_call: 2475 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size_rex; 2476 break; 2477 default: ShouldNotReachHere(); 2478 } 2479 __ align(BytesPerWord, offset); 2480 } 2481 2482 2483 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 2484 assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2485 "must be aligned"); 2486 __ call(AddressLiteral(op->addr(), rtype)); 2487 add_call_info(code_offset(), op->info()); 2488 __ post_call_nop(); 2489 } 2490 2491 2492 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 2493 __ ic_call(op->addr()); 2494 add_call_info(code_offset(), op->info()); 2495 assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 2496 "must be aligned"); 2497 __ post_call_nop(); 2498 } 2499 2500 2501 void LIR_Assembler::emit_static_call_stub() { 2502 address call_pc = __ pc(); 2503 address stub = __ start_a_stub(call_stub_size()); 2504 if (stub == nullptr) { 2505 bailout("static call stub overflow"); 2506 return; 2507 } 2508 2509 int start = __ offset(); 2510 2511 // make sure that the displacement word of the call ends up word aligned 2512 __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size_rex + NativeCall::displacement_offset); 2513 __ relocate(static_stub_Relocation::spec(call_pc)); 2514 __ mov_metadata(rbx, (Metadata*)nullptr); 2515 // must be set to -1 at code generation time 2516 assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); 2517 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 2518 __ jump(RuntimeAddress(__ pc())); 2519 2520 assert(__ offset() - start <= call_stub_size(), "stub too big"); 2521 __ end_a_stub(); 2522 } 2523 2524 2525 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2526 assert(exceptionOop->as_register() == rax, "must match"); 2527 assert(exceptionPC->as_register() == rdx, "must match"); 2528 2529 // exception object is not added to oop map by LinearScan 2530 // (LinearScan assumes that no oops are in fixed registers) 2531 info->add_register_oop(exceptionOop); 2532 C1StubId unwind_id; 2533 2534 // get current pc information 2535 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2536 int pc_for_athrow_offset = __ offset(); 2537 InternalAddress pc_for_athrow(__ pc()); 2538 __ lea(exceptionPC->as_register(), pc_for_athrow); 2539 add_call_info(pc_for_athrow_offset, info); // for exception handler 2540 2541 __ verify_not_null_oop(rax); 2542 // search an exception handler (rax: exception oop, rdx: throwing pc) 2543 if (compilation()->has_fpu_code()) { 2544 unwind_id = C1StubId::handle_exception_id; 2545 } else { 2546 unwind_id = C1StubId::handle_exception_nofpu_id; 2547 } 2548 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2549 2550 // enough room for two byte trap 2551 __ nop(); 2552 } 2553 2554 2555 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2556 assert(exceptionOop->as_register() == rax, "must match"); 2557 2558 __ jmp(_unwind_handler_entry); 2559 } 2560 2561 2562 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2563 2564 // optimized version for linear scan: 2565 // * count must be already in ECX (guaranteed by LinearScan) 2566 // * left and dest must be equal 2567 // * tmp must be unused 2568 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 2569 assert(left == dest, "left and dest must be equal"); 2570 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2571 2572 if (left->is_single_cpu()) { 2573 Register value = left->as_register(); 2574 assert(value != SHIFT_count, "left cannot be ECX"); 2575 2576 switch (code) { 2577 case lir_shl: __ shll(value); break; 2578 case lir_shr: __ sarl(value); break; 2579 case lir_ushr: __ shrl(value); break; 2580 default: ShouldNotReachHere(); 2581 } 2582 } else if (left->is_double_cpu()) { 2583 Register lo = left->as_register_lo(); 2584 Register hi = left->as_register_hi(); 2585 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 2586 #ifdef _LP64 2587 switch (code) { 2588 case lir_shl: __ shlptr(lo); break; 2589 case lir_shr: __ sarptr(lo); break; 2590 case lir_ushr: __ shrptr(lo); break; 2591 default: ShouldNotReachHere(); 2592 } 2593 #else 2594 2595 switch (code) { 2596 case lir_shl: __ lshl(hi, lo); break; 2597 case lir_shr: __ lshr(hi, lo, true); break; 2598 case lir_ushr: __ lshr(hi, lo, false); break; 2599 default: ShouldNotReachHere(); 2600 } 2601 #endif // LP64 2602 } else { 2603 ShouldNotReachHere(); 2604 } 2605 } 2606 2607 2608 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2609 if (dest->is_single_cpu()) { 2610 // first move left into dest so that left is not destroyed by the shift 2611 Register value = dest->as_register(); 2612 count = count & 0x1F; // Java spec 2613 2614 move_regs(left->as_register(), value); 2615 switch (code) { 2616 case lir_shl: __ shll(value, count); break; 2617 case lir_shr: __ sarl(value, count); break; 2618 case lir_ushr: __ shrl(value, count); break; 2619 default: ShouldNotReachHere(); 2620 } 2621 } else if (dest->is_double_cpu()) { 2622 #ifndef _LP64 2623 Unimplemented(); 2624 #else 2625 // first move left into dest so that left is not destroyed by the shift 2626 Register value = dest->as_register_lo(); 2627 count = count & 0x1F; // Java spec 2628 2629 move_regs(left->as_register_lo(), value); 2630 switch (code) { 2631 case lir_shl: __ shlptr(value, count); break; 2632 case lir_shr: __ sarptr(value, count); break; 2633 case lir_ushr: __ shrptr(value, count); break; 2634 default: ShouldNotReachHere(); 2635 } 2636 #endif // _LP64 2637 } else { 2638 ShouldNotReachHere(); 2639 } 2640 } 2641 2642 2643 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 2644 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2645 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2646 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2647 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 2648 } 2649 2650 2651 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 2652 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2653 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2654 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2655 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 2656 } 2657 2658 2659 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 2660 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2661 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2662 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2663 __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1); 2664 } 2665 2666 2667 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) { 2668 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 2669 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 2670 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2671 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1); 2672 } 2673 2674 2675 // This code replaces a call to arraycopy; no exception may 2676 // be thrown in this code, they must be thrown in the System.arraycopy 2677 // activation frame; we could save some checks if this would not be the case 2678 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 2679 ciArrayKlass* default_type = op->expected_type(); 2680 Register src = op->src()->as_register(); 2681 Register dst = op->dst()->as_register(); 2682 Register src_pos = op->src_pos()->as_register(); 2683 Register dst_pos = op->dst_pos()->as_register(); 2684 Register length = op->length()->as_register(); 2685 Register tmp = op->tmp()->as_register(); 2686 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 2687 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg; 2688 2689 CodeStub* stub = op->stub(); 2690 int flags = op->flags(); 2691 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL; 2692 if (is_reference_type(basic_type)) basic_type = T_OBJECT; 2693 2694 // if we don't know anything, just go through the generic arraycopy 2695 if (default_type == nullptr) { 2696 // save outgoing arguments on stack in case call to System.arraycopy is needed 2697 // HACK ALERT. This code used to push the parameters in a hardwired fashion 2698 // for interpreter calling conventions. Now we have to do it in new style conventions. 2699 // For the moment until C1 gets the new register allocator I just force all the 2700 // args to the right place (except the register args) and then on the back side 2701 // reload the register args properly if we go slow path. Yuck 2702 2703 // These are proper for the calling convention 2704 store_parameter(length, 2); 2705 store_parameter(dst_pos, 1); 2706 store_parameter(dst, 0); 2707 2708 // these are just temporary placements until we need to reload 2709 store_parameter(src_pos, 3); 2710 store_parameter(src, 4); 2711 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 2712 2713 address copyfunc_addr = StubRoutines::generic_arraycopy(); 2714 assert(copyfunc_addr != nullptr, "generic arraycopy stub required"); 2715 2716 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 2717 #ifdef _LP64 2718 // The arguments are in java calling convention so we can trivially shift them to C 2719 // convention 2720 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 2721 __ mov(c_rarg0, j_rarg0); 2722 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 2723 __ mov(c_rarg1, j_rarg1); 2724 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 2725 __ mov(c_rarg2, j_rarg2); 2726 assert_different_registers(c_rarg3, j_rarg4); 2727 __ mov(c_rarg3, j_rarg3); 2728 #ifdef _WIN64 2729 // Allocate abi space for args but be sure to keep stack aligned 2730 __ subptr(rsp, 6*wordSize); 2731 store_parameter(j_rarg4, 4); 2732 #ifndef PRODUCT 2733 if (PrintC1Statistics) { 2734 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 2735 } 2736 #endif 2737 __ call(RuntimeAddress(copyfunc_addr)); 2738 __ addptr(rsp, 6*wordSize); 2739 #else 2740 __ mov(c_rarg4, j_rarg4); 2741 #ifndef PRODUCT 2742 if (PrintC1Statistics) { 2743 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 2744 } 2745 #endif 2746 __ call(RuntimeAddress(copyfunc_addr)); 2747 #endif // _WIN64 2748 #else 2749 __ push(length); 2750 __ push(dst_pos); 2751 __ push(dst); 2752 __ push(src_pos); 2753 __ push(src); 2754 2755 #ifndef PRODUCT 2756 if (PrintC1Statistics) { 2757 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt), rscratch1); 2758 } 2759 #endif 2760 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 2761 2762 #endif // _LP64 2763 2764 __ testl(rax, rax); 2765 __ jcc(Assembler::equal, *stub->continuation()); 2766 2767 __ mov(tmp, rax); 2768 __ xorl(tmp, -1); 2769 2770 // Reload values from the stack so they are where the stub 2771 // expects them. 2772 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 2773 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 2774 __ movptr (length, Address(rsp, 2*BytesPerWord)); 2775 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 2776 __ movptr (src, Address(rsp, 4*BytesPerWord)); 2777 2778 __ subl(length, tmp); 2779 __ addl(src_pos, tmp); 2780 __ addl(dst_pos, tmp); 2781 __ jmp(*stub->entry()); 2782 2783 __ bind(*stub->continuation()); 2784 return; 2785 } 2786 2787 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2788 2789 int elem_size = type2aelembytes(basic_type); 2790 Address::ScaleFactor scale; 2791 2792 switch (elem_size) { 2793 case 1 : 2794 scale = Address::times_1; 2795 break; 2796 case 2 : 2797 scale = Address::times_2; 2798 break; 2799 case 4 : 2800 scale = Address::times_4; 2801 break; 2802 case 8 : 2803 scale = Address::times_8; 2804 break; 2805 default: 2806 scale = Address::no_scale; 2807 ShouldNotReachHere(); 2808 } 2809 2810 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2811 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2812 2813 // length and pos's are all sign extended at this point on 64bit 2814 2815 // test for null 2816 if (flags & LIR_OpArrayCopy::src_null_check) { 2817 __ testptr(src, src); 2818 __ jcc(Assembler::zero, *stub->entry()); 2819 } 2820 if (flags & LIR_OpArrayCopy::dst_null_check) { 2821 __ testptr(dst, dst); 2822 __ jcc(Assembler::zero, *stub->entry()); 2823 } 2824 2825 // If the compiler was not able to prove that exact type of the source or the destination 2826 // of the arraycopy is an array type, check at runtime if the source or the destination is 2827 // an instance type. 2828 if (flags & LIR_OpArrayCopy::type_check) { 2829 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2830 __ load_klass(tmp, dst, tmp_load_klass); 2831 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 2832 __ jcc(Assembler::greaterEqual, *stub->entry()); 2833 } 2834 2835 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2836 __ load_klass(tmp, src, tmp_load_klass); 2837 __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); 2838 __ jcc(Assembler::greaterEqual, *stub->entry()); 2839 } 2840 } 2841 2842 // check if negative 2843 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2844 __ testl(src_pos, src_pos); 2845 __ jcc(Assembler::less, *stub->entry()); 2846 } 2847 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2848 __ testl(dst_pos, dst_pos); 2849 __ jcc(Assembler::less, *stub->entry()); 2850 } 2851 2852 if (flags & LIR_OpArrayCopy::src_range_check) { 2853 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 2854 __ cmpl(tmp, src_length_addr); 2855 __ jcc(Assembler::above, *stub->entry()); 2856 } 2857 if (flags & LIR_OpArrayCopy::dst_range_check) { 2858 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 2859 __ cmpl(tmp, dst_length_addr); 2860 __ jcc(Assembler::above, *stub->entry()); 2861 } 2862 2863 if (flags & LIR_OpArrayCopy::length_positive_check) { 2864 __ testl(length, length); 2865 __ jcc(Assembler::less, *stub->entry()); 2866 } 2867 2868 #ifdef _LP64 2869 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 2870 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 2871 #endif 2872 2873 if (flags & LIR_OpArrayCopy::type_check) { 2874 // We don't know the array types are compatible 2875 if (basic_type != T_OBJECT) { 2876 // Simple test for basic type arrays 2877 __ cmp_klasses_from_objects(src, dst, tmp, tmp2); 2878 __ jcc(Assembler::notEqual, *stub->entry()); 2879 } else { 2880 // For object arrays, if src is a sub class of dst then we can 2881 // safely do the copy. 2882 Label cont, slow; 2883 2884 __ push(src); 2885 __ push(dst); 2886 2887 __ load_klass(src, src, tmp_load_klass); 2888 __ load_klass(dst, dst, tmp_load_klass); 2889 2890 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); 2891 2892 __ push(src); 2893 __ push(dst); 2894 __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); 2895 __ pop(dst); 2896 __ pop(src); 2897 2898 __ testl(src, src); 2899 __ jcc(Assembler::notEqual, cont); 2900 2901 __ bind(slow); 2902 __ pop(dst); 2903 __ pop(src); 2904 2905 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2906 if (copyfunc_addr != nullptr) { // use stub if available 2907 // src is not a sub class of dst so we have to do a 2908 // per-element check. 2909 2910 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2911 if ((flags & mask) != mask) { 2912 // Check that at least both of them object arrays. 2913 assert(flags & mask, "one of the two should be known to be an object array"); 2914 2915 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2916 __ load_klass(tmp, src, tmp_load_klass); 2917 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2918 __ load_klass(tmp, dst, tmp_load_klass); 2919 } 2920 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2921 Address klass_lh_addr(tmp, lh_offset); 2922 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2923 __ cmpl(klass_lh_addr, objArray_lh); 2924 __ jcc(Assembler::notEqual, *stub->entry()); 2925 } 2926 2927 // Spill because stubs can use any register they like and it's 2928 // easier to restore just those that we care about. 2929 store_parameter(dst, 0); 2930 store_parameter(dst_pos, 1); 2931 store_parameter(length, 2); 2932 store_parameter(src_pos, 3); 2933 store_parameter(src, 4); 2934 2935 #ifndef _LP64 2936 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2937 __ movptr(tmp, dst_klass_addr); 2938 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 2939 __ push(tmp); 2940 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 2941 __ push(tmp); 2942 __ push(length); 2943 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 2944 __ push(tmp); 2945 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 2946 __ push(tmp); 2947 2948 __ call_VM_leaf(copyfunc_addr, 5); 2949 #else 2950 __ movl2ptr(length, length); //higher 32bits must be null 2951 2952 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 2953 assert_different_registers(c_rarg0, dst, dst_pos, length); 2954 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 2955 assert_different_registers(c_rarg1, dst, length); 2956 2957 __ mov(c_rarg2, length); 2958 assert_different_registers(c_rarg2, dst); 2959 2960 #ifdef _WIN64 2961 // Allocate abi space for args but be sure to keep stack aligned 2962 __ subptr(rsp, 6*wordSize); 2963 __ load_klass(c_rarg3, dst, tmp_load_klass); 2964 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 2965 store_parameter(c_rarg3, 4); 2966 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 2967 __ call(RuntimeAddress(copyfunc_addr)); 2968 __ addptr(rsp, 6*wordSize); 2969 #else 2970 __ load_klass(c_rarg4, dst, tmp_load_klass); 2971 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 2972 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 2973 __ call(RuntimeAddress(copyfunc_addr)); 2974 #endif 2975 2976 #endif 2977 2978 #ifndef PRODUCT 2979 if (PrintC1Statistics) { 2980 Label failed; 2981 __ testl(rax, rax); 2982 __ jcc(Assembler::notZero, failed); 2983 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt), rscratch1); 2984 __ bind(failed); 2985 } 2986 #endif 2987 2988 __ testl(rax, rax); 2989 __ jcc(Assembler::zero, *stub->continuation()); 2990 2991 #ifndef PRODUCT 2992 if (PrintC1Statistics) { 2993 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt), rscratch1); 2994 } 2995 #endif 2996 2997 __ mov(tmp, rax); 2998 2999 __ xorl(tmp, -1); 3000 3001 // Restore previously spilled arguments 3002 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3003 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3004 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3005 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3006 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3007 3008 3009 __ subl(length, tmp); 3010 __ addl(src_pos, tmp); 3011 __ addl(dst_pos, tmp); 3012 } 3013 3014 __ jmp(*stub->entry()); 3015 3016 __ bind(cont); 3017 __ pop(dst); 3018 __ pop(src); 3019 } 3020 } 3021 3022 #ifdef ASSERT 3023 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3024 // Sanity check the known type with the incoming class. For the 3025 // primitive case the types must match exactly with src.klass and 3026 // dst.klass each exactly matching the default type. For the 3027 // object array case, if no type check is needed then either the 3028 // dst type is exactly the expected type and the src type is a 3029 // subtype which we can't check or src is the same array as dst 3030 // but not necessarily exactly of type default_type. 3031 Label known_ok, halt; 3032 __ mov_metadata(tmp, default_type->constant_encoding()); 3033 #ifdef _LP64 3034 if (UseCompressedClassPointers) { 3035 __ encode_klass_not_null(tmp, rscratch1); 3036 } 3037 #endif 3038 3039 if (basic_type != T_OBJECT) { 3040 __ cmp_klass(tmp, dst, tmp2); 3041 __ jcc(Assembler::notEqual, halt); 3042 __ cmp_klass(tmp, src, tmp2); 3043 __ jcc(Assembler::equal, known_ok); 3044 } else { 3045 __ cmp_klass(tmp, dst, tmp2); 3046 __ jcc(Assembler::equal, known_ok); 3047 __ cmpptr(src, dst); 3048 __ jcc(Assembler::equal, known_ok); 3049 } 3050 __ bind(halt); 3051 __ stop("incorrect type information in arraycopy"); 3052 __ bind(known_ok); 3053 } 3054 #endif 3055 3056 #ifndef PRODUCT 3057 if (PrintC1Statistics) { 3058 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1); 3059 } 3060 #endif 3061 3062 #ifdef _LP64 3063 assert_different_registers(c_rarg0, dst, dst_pos, length); 3064 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3065 assert_different_registers(c_rarg1, length); 3066 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3067 __ mov(c_rarg2, length); 3068 3069 #else 3070 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3071 store_parameter(tmp, 0); 3072 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3073 store_parameter(tmp, 1); 3074 store_parameter(length, 2); 3075 #endif // _LP64 3076 3077 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3078 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3079 const char *name; 3080 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3081 __ call_VM_leaf(entry, 0); 3082 3083 if (stub != nullptr) { 3084 __ bind(*stub->continuation()); 3085 } 3086 } 3087 3088 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3089 assert(op->crc()->is_single_cpu(), "crc must be register"); 3090 assert(op->val()->is_single_cpu(), "byte value must be register"); 3091 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3092 Register crc = op->crc()->as_register(); 3093 Register val = op->val()->as_register(); 3094 Register res = op->result_opr()->as_register(); 3095 3096 assert_different_registers(val, crc, res); 3097 3098 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3099 __ notl(crc); // ~crc 3100 __ update_byte_crc32(crc, val, res); 3101 __ notl(crc); // ~crc 3102 __ mov(res, crc); 3103 } 3104 3105 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3106 Register obj = op->obj_opr()->as_register(); // may not be an oop 3107 Register hdr = op->hdr_opr()->as_register(); 3108 Register lock = op->lock_opr()->as_register(); 3109 if (LockingMode == LM_MONITOR) { 3110 if (op->info() != nullptr) { 3111 add_debug_info_for_null_check_here(op->info()); 3112 __ null_check(obj); 3113 } 3114 __ jmp(*op->stub()->entry()); 3115 } else if (op->code() == lir_lock) { 3116 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3117 Register tmp = LockingMode == LM_LIGHTWEIGHT ? op->scratch_opr()->as_register() : noreg; 3118 // add debug info for NullPointerException only if one is possible 3119 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); 3120 if (op->info() != nullptr) { 3121 add_debug_info_for_null_check(null_check_offset, op->info()); 3122 } 3123 // done 3124 } else if (op->code() == lir_unlock) { 3125 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3126 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3127 } else { 3128 Unimplemented(); 3129 } 3130 __ bind(*op->stub()->continuation()); 3131 } 3132 3133 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { 3134 Register obj = op->obj()->as_pointer_register(); 3135 Register result = op->result_opr()->as_pointer_register(); 3136 3137 CodeEmitInfo* info = op->info(); 3138 if (info != nullptr) { 3139 add_debug_info_for_null_check_here(info); 3140 } 3141 3142 __ load_klass(result, obj, rscratch1); 3143 } 3144 3145 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3146 ciMethod* method = op->profiled_method(); 3147 int bci = op->profiled_bci(); 3148 ciMethod* callee = op->profiled_callee(); 3149 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3150 3151 // Update counter for all call types 3152 ciMethodData* md = method->method_data_or_null(); 3153 assert(md != nullptr, "Sanity"); 3154 ciProfileData* data = md->bci_to_data(bci); 3155 assert(data != nullptr && data->is_CounterData(), "need CounterData for calls"); 3156 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3157 Register mdo = op->mdo()->as_register(); 3158 __ mov_metadata(mdo, md->constant_encoding()); 3159 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3160 // Perform additional virtual call profiling for invokevirtual and 3161 // invokeinterface bytecodes 3162 if (op->should_profile_receiver_type()) { 3163 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3164 Register recv = op->recv()->as_register(); 3165 assert_different_registers(mdo, recv); 3166 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3167 ciKlass* known_klass = op->known_holder(); 3168 if (C1OptimizeVirtualCallProfiling && known_klass != nullptr) { 3169 // We know the type that will be seen at this call site; we can 3170 // statically update the MethodData* rather than needing to do 3171 // dynamic tests on the receiver type 3172 3173 // NOTE: we should probably put a lock around this search to 3174 // avoid collisions by concurrent compilations 3175 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3176 uint i; 3177 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3178 ciKlass* receiver = vc_data->receiver(i); 3179 if (known_klass->equals(receiver)) { 3180 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3181 __ addptr(data_addr, DataLayout::counter_increment); 3182 return; 3183 } 3184 } 3185 3186 // Receiver type not found in profile data; select an empty slot 3187 3188 // Note that this is less efficient than it should be because it 3189 // always does a write to the receiver part of the 3190 // VirtualCallData rather than just the first time 3191 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3192 ciKlass* receiver = vc_data->receiver(i); 3193 if (receiver == nullptr) { 3194 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3195 __ mov_metadata(recv_addr, known_klass->constant_encoding(), rscratch1); 3196 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3197 __ addptr(data_addr, DataLayout::counter_increment); 3198 return; 3199 } 3200 } 3201 } else { 3202 __ load_klass(recv, recv, tmp_load_klass); 3203 Label update_done; 3204 type_profile_helper(mdo, md, data, recv, &update_done); 3205 // Receiver did not match any saved receiver and there is no empty row for it. 3206 // Increment total counter to indicate polymorphic case. 3207 __ addptr(counter_addr, DataLayout::counter_increment); 3208 3209 __ bind(update_done); 3210 } 3211 } else { 3212 // Static call 3213 __ addptr(counter_addr, DataLayout::counter_increment); 3214 } 3215 } 3216 3217 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3218 Register obj = op->obj()->as_register(); 3219 Register tmp = op->tmp()->as_pointer_register(); 3220 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); 3221 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 3222 ciKlass* exact_klass = op->exact_klass(); 3223 intptr_t current_klass = op->current_klass(); 3224 bool not_null = op->not_null(); 3225 bool no_conflict = op->no_conflict(); 3226 3227 Label update, next, none; 3228 3229 bool do_null = !not_null; 3230 bool exact_klass_set = exact_klass != nullptr && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3231 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3232 3233 assert(do_null || do_update, "why are we here?"); 3234 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3235 3236 __ verify_oop(obj); 3237 3238 #ifdef ASSERT 3239 if (obj == tmp) { 3240 #ifdef _LP64 3241 assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index()); 3242 #else 3243 assert_different_registers(obj, mdo_addr.base(), mdo_addr.index()); 3244 #endif 3245 } else { 3246 #ifdef _LP64 3247 assert_different_registers(obj, tmp, rscratch1, mdo_addr.base(), mdo_addr.index()); 3248 #else 3249 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index()); 3250 #endif 3251 } 3252 #endif 3253 if (do_null) { 3254 __ testptr(obj, obj); 3255 __ jccb(Assembler::notZero, update); 3256 if (!TypeEntries::was_null_seen(current_klass)) { 3257 __ testptr(mdo_addr, TypeEntries::null_seen); 3258 #ifndef ASSERT 3259 __ jccb(Assembler::notZero, next); // already set 3260 #else 3261 __ jcc(Assembler::notZero, next); // already set 3262 #endif 3263 // atomic update to prevent overwriting Klass* with 0 3264 __ lock(); 3265 __ orptr(mdo_addr, TypeEntries::null_seen); 3266 } 3267 if (do_update) { 3268 #ifndef ASSERT 3269 __ jmpb(next); 3270 } 3271 #else 3272 __ jmp(next); 3273 } 3274 } else { 3275 __ testptr(obj, obj); 3276 __ jcc(Assembler::notZero, update); 3277 __ stop("unexpected null obj"); 3278 #endif 3279 } 3280 3281 __ bind(update); 3282 3283 if (do_update) { 3284 #ifdef ASSERT 3285 if (exact_klass != nullptr) { 3286 Label ok; 3287 __ load_klass(tmp, obj, tmp_load_klass); 3288 __ push(tmp); 3289 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3290 __ cmpptr(tmp, Address(rsp, 0)); 3291 __ jcc(Assembler::equal, ok); 3292 __ stop("exact klass and actual klass differ"); 3293 __ bind(ok); 3294 __ pop(tmp); 3295 } 3296 #endif 3297 if (!no_conflict) { 3298 if (exact_klass == nullptr || TypeEntries::is_type_none(current_klass)) { 3299 if (exact_klass != nullptr) { 3300 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3301 } else { 3302 __ load_klass(tmp, obj, tmp_load_klass); 3303 } 3304 #ifdef _LP64 3305 __ mov(rscratch1, tmp); // save original value before XOR 3306 #endif 3307 __ xorptr(tmp, mdo_addr); 3308 __ testptr(tmp, TypeEntries::type_klass_mask); 3309 // klass seen before, nothing to do. The unknown bit may have been 3310 // set already but no need to check. 3311 __ jccb(Assembler::zero, next); 3312 3313 __ testptr(tmp, TypeEntries::type_unknown); 3314 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3315 3316 if (TypeEntries::is_type_none(current_klass)) { 3317 __ testptr(mdo_addr, TypeEntries::type_mask); 3318 __ jccb(Assembler::zero, none); 3319 #ifdef _LP64 3320 // There is a chance that the checks above (re-reading profiling 3321 // data from memory) fail if another thread has just set the 3322 // profiling to this obj's klass 3323 __ mov(tmp, rscratch1); // get back original value before XOR 3324 __ xorptr(tmp, mdo_addr); 3325 __ testptr(tmp, TypeEntries::type_klass_mask); 3326 __ jccb(Assembler::zero, next); 3327 #endif 3328 } 3329 } else { 3330 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3331 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3332 3333 __ testptr(mdo_addr, TypeEntries::type_unknown); 3334 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3335 } 3336 3337 // different than before. Cannot keep accurate profile. 3338 __ orptr(mdo_addr, TypeEntries::type_unknown); 3339 3340 if (TypeEntries::is_type_none(current_klass)) { 3341 __ jmpb(next); 3342 3343 __ bind(none); 3344 // first time here. Set profile type. 3345 __ movptr(mdo_addr, tmp); 3346 #ifdef ASSERT 3347 __ andptr(tmp, TypeEntries::type_klass_mask); 3348 __ verify_klass_ptr(tmp); 3349 #endif 3350 } 3351 } else { 3352 // There's a single possible klass at this profile point 3353 assert(exact_klass != nullptr, "should be"); 3354 if (TypeEntries::is_type_none(current_klass)) { 3355 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3356 __ xorptr(tmp, mdo_addr); 3357 __ testptr(tmp, TypeEntries::type_klass_mask); 3358 #ifdef ASSERT 3359 __ jcc(Assembler::zero, next); 3360 3361 { 3362 Label ok; 3363 __ push(tmp); 3364 __ testptr(mdo_addr, TypeEntries::type_mask); 3365 __ jcc(Assembler::zero, ok); 3366 // may have been set by another thread 3367 __ mov_metadata(tmp, exact_klass->constant_encoding()); 3368 __ xorptr(tmp, mdo_addr); 3369 __ testptr(tmp, TypeEntries::type_mask); 3370 __ jcc(Assembler::zero, ok); 3371 3372 __ stop("unexpected profiling mismatch"); 3373 __ bind(ok); 3374 __ pop(tmp); 3375 } 3376 #else 3377 __ jccb(Assembler::zero, next); 3378 #endif 3379 // first time here. Set profile type. 3380 __ movptr(mdo_addr, tmp); 3381 #ifdef ASSERT 3382 __ andptr(tmp, TypeEntries::type_klass_mask); 3383 __ verify_klass_ptr(tmp); 3384 #endif 3385 } else { 3386 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr && 3387 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3388 3389 __ testptr(mdo_addr, TypeEntries::type_unknown); 3390 __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore. 3391 3392 __ orptr(mdo_addr, TypeEntries::type_unknown); 3393 } 3394 } 3395 } 3396 __ bind(next); 3397 } 3398 3399 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3400 Unimplemented(); 3401 } 3402 3403 3404 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3405 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3406 } 3407 3408 3409 void LIR_Assembler::align_backward_branch_target() { 3410 __ align(BytesPerWord); 3411 } 3412 3413 3414 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 3415 if (left->is_single_cpu()) { 3416 __ negl(left->as_register()); 3417 move_regs(left->as_register(), dest->as_register()); 3418 3419 } else if (left->is_double_cpu()) { 3420 Register lo = left->as_register_lo(); 3421 #ifdef _LP64 3422 Register dst = dest->as_register_lo(); 3423 __ movptr(dst, lo); 3424 __ negptr(dst); 3425 #else 3426 Register hi = left->as_register_hi(); 3427 __ lneg(hi, lo); 3428 if (dest->as_register_lo() == hi) { 3429 assert(dest->as_register_hi() != lo, "destroying register"); 3430 move_regs(hi, dest->as_register_hi()); 3431 move_regs(lo, dest->as_register_lo()); 3432 } else { 3433 move_regs(lo, dest->as_register_lo()); 3434 move_regs(hi, dest->as_register_hi()); 3435 } 3436 #endif // _LP64 3437 3438 } else if (dest->is_single_xmm()) { 3439 assert(!tmp->is_valid(), "do not need temporary"); 3440 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 3441 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 3442 } 3443 __ xorps(dest->as_xmm_float_reg(), 3444 ExternalAddress((address)float_signflip_pool), 3445 rscratch1); 3446 } else if (dest->is_double_xmm()) { 3447 assert(!tmp->is_valid(), "do not need temporary"); 3448 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 3449 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 3450 } 3451 __ xorpd(dest->as_xmm_double_reg(), 3452 ExternalAddress((address)double_signflip_pool), 3453 rscratch1); 3454 } else { 3455 ShouldNotReachHere(); 3456 } 3457 } 3458 3459 3460 void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 3461 assert(src->is_address(), "must be an address"); 3462 assert(dest->is_register(), "must be a register"); 3463 3464 PatchingStub* patch = nullptr; 3465 if (patch_code != lir_patch_none) { 3466 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 3467 } 3468 3469 Register reg = dest->as_pointer_register(); 3470 LIR_Address* addr = src->as_address_ptr(); 3471 __ lea(reg, as_Address(addr)); 3472 3473 if (patch != nullptr) { 3474 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 3475 } 3476 } 3477 3478 3479 3480 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3481 assert(!tmp->is_valid(), "don't need temporary"); 3482 __ call(RuntimeAddress(dest)); 3483 if (info != nullptr) { 3484 add_call_info_here(info); 3485 } 3486 __ post_call_nop(); 3487 } 3488 3489 3490 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3491 assert(type == T_LONG, "only for volatile long fields"); 3492 3493 if (info != nullptr) { 3494 add_debug_info_for_null_check_here(info); 3495 } 3496 3497 if (src->is_double_xmm()) { 3498 if (dest->is_double_cpu()) { 3499 #ifdef _LP64 3500 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 3501 #else 3502 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 3503 __ psrlq(src->as_xmm_double_reg(), 32); 3504 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 3505 #endif // _LP64 3506 } else if (dest->is_double_stack()) { 3507 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 3508 } else if (dest->is_address()) { 3509 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 3510 } else { 3511 ShouldNotReachHere(); 3512 } 3513 3514 } else if (dest->is_double_xmm()) { 3515 if (src->is_double_stack()) { 3516 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 3517 } else if (src->is_address()) { 3518 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 3519 } else { 3520 ShouldNotReachHere(); 3521 } 3522 } else { 3523 ShouldNotReachHere(); 3524 } 3525 } 3526 3527 #ifdef ASSERT 3528 // emit run-time assertion 3529 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3530 assert(op->code() == lir_assert, "must be"); 3531 3532 if (op->in_opr1()->is_valid()) { 3533 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3534 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3535 } else { 3536 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3537 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3538 } 3539 3540 Label ok; 3541 if (op->condition() != lir_cond_always) { 3542 Assembler::Condition acond = Assembler::zero; 3543 switch (op->condition()) { 3544 case lir_cond_equal: acond = Assembler::equal; break; 3545 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3546 case lir_cond_less: acond = Assembler::less; break; 3547 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3548 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 3549 case lir_cond_greater: acond = Assembler::greater; break; 3550 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 3551 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 3552 default: ShouldNotReachHere(); 3553 } 3554 __ jcc(acond, ok); 3555 } 3556 if (op->halt()) { 3557 const char* str = __ code_string(op->msg()); 3558 __ stop(str); 3559 } else { 3560 breakpoint(); 3561 } 3562 __ bind(ok); 3563 } 3564 #endif 3565 3566 void LIR_Assembler::membar() { 3567 // QQQ sparc TSO uses this, 3568 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3569 } 3570 3571 void LIR_Assembler::membar_acquire() { 3572 // No x86 machines currently require load fences 3573 } 3574 3575 void LIR_Assembler::membar_release() { 3576 // No x86 machines currently require store fences 3577 } 3578 3579 void LIR_Assembler::membar_loadload() { 3580 // no-op 3581 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3582 } 3583 3584 void LIR_Assembler::membar_storestore() { 3585 // no-op 3586 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3587 } 3588 3589 void LIR_Assembler::membar_loadstore() { 3590 // no-op 3591 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3592 } 3593 3594 void LIR_Assembler::membar_storeload() { 3595 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3596 } 3597 3598 void LIR_Assembler::on_spin_wait() { 3599 __ pause (); 3600 } 3601 3602 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3603 assert(result_reg->is_register(), "check"); 3604 #ifdef _LP64 3605 // __ get_thread(result_reg->as_register_lo()); 3606 __ mov(result_reg->as_register(), r15_thread); 3607 #else 3608 __ get_thread(result_reg->as_register()); 3609 #endif // _LP64 3610 } 3611 3612 3613 void LIR_Assembler::peephole(LIR_List*) { 3614 // do nothing for now 3615 } 3616 3617 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3618 assert(data == dest, "xchg/xadd uses only 2 operands"); 3619 3620 if (data->type() == T_INT) { 3621 if (code == lir_xadd) { 3622 __ lock(); 3623 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 3624 } else { 3625 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 3626 } 3627 } else if (data->is_oop()) { 3628 assert (code == lir_xchg, "xadd for oops"); 3629 Register obj = data->as_register(); 3630 #ifdef _LP64 3631 if (UseCompressedOops) { 3632 __ encode_heap_oop(obj); 3633 __ xchgl(obj, as_Address(src->as_address_ptr())); 3634 __ decode_heap_oop(obj); 3635 } else { 3636 __ xchgptr(obj, as_Address(src->as_address_ptr())); 3637 } 3638 #else 3639 __ xchgl(obj, as_Address(src->as_address_ptr())); 3640 #endif 3641 } else if (data->type() == T_LONG) { 3642 #ifdef _LP64 3643 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 3644 if (code == lir_xadd) { 3645 __ lock(); 3646 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 3647 } else { 3648 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 3649 } 3650 #else 3651 ShouldNotReachHere(); 3652 #endif 3653 } else { 3654 ShouldNotReachHere(); 3655 } 3656 } 3657 3658 #undef __