< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page


   1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"

  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/cardTableBarrierSet.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "nativeInst_x86.hpp"
  39 #include "oops/objArrayKlass.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/safepointMechanism.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "vmreg_x86.inline.hpp"
  44 
  45 
  46 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  47 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  48 // fast versions of NegF/NegD and AbsF/AbsD.
  49 
  50 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  51 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  52   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  53   // of 128-bits operands for SSE instructions.
  54   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));


 182 }
 183 
 184 void LIR_Assembler::ffree(int i) {
 185   __ ffree(i);
 186 }
 187 
 188 void LIR_Assembler::breakpoint() {
 189   __ int3();
 190 }
 191 
 192 void LIR_Assembler::push(LIR_Opr opr) {
 193   if (opr->is_single_cpu()) {
 194     __ push_reg(opr->as_register());
 195   } else if (opr->is_double_cpu()) {
 196     NOT_LP64(__ push_reg(opr->as_register_hi()));
 197     __ push_reg(opr->as_register_lo());
 198   } else if (opr->is_stack()) {
 199     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 200   } else if (opr->is_constant()) {
 201     LIR_Const* const_opr = opr->as_constant_ptr();
 202     if (const_opr->type() == T_OBJECT) {
 203       __ push_oop(const_opr->as_jobject());
 204     } else if (const_opr->type() == T_INT) {
 205       __ push_jint(const_opr->as_jint());
 206     } else {
 207       ShouldNotReachHere();
 208     }
 209 
 210   } else {
 211     ShouldNotReachHere();
 212   }
 213 }
 214 
 215 void LIR_Assembler::pop(LIR_Opr opr) {
 216   if (opr->is_single_cpu()) {
 217     __ pop_reg(opr->as_register());
 218   } else {
 219     ShouldNotReachHere();
 220   }
 221 }
 222 


 272   return as_Address(addr);
 273 }
 274 
 275 
 276 void LIR_Assembler::osr_entry() {
 277   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 278   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 279   ValueStack* entry_state = osr_entry->state();
 280   int number_of_locks = entry_state->locks_size();
 281 
 282   // we jump here if osr happens with the interpreter
 283   // state set up to continue at the beginning of the
 284   // loop that triggered osr - in particular, we have
 285   // the following registers setup:
 286   //
 287   // rcx: osr buffer
 288   //
 289 
 290   // build frame
 291   ciMethod* m = compilation()->method();
 292   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());

 293 
 294   // OSR buffer is
 295   //
 296   // locals[nlocals-1..0]
 297   // monitors[0..number_of_locks]
 298   //
 299   // locals is a direct copy of the interpreter frame so in the osr buffer
 300   // so first slot in the local array is the last local from the interpreter
 301   // and last slot is local[0] (receiver) from the interpreter
 302   //
 303   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 304   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 305   // in the interpreter frame (the method lock if a sync method)
 306 
 307   // Initialize monitors in the compiled activation.
 308   //   rcx: pointer to osr buffer
 309   //
 310   // All other registers are dead at this point and the locals will be
 311   // copied into place by code emitted in the IR.
 312 


 469     __ bind(*stub->continuation());
 470   }
 471 
 472   if (compilation()->env()->dtrace_method_probes()) {
 473 #ifdef _LP64
 474     __ mov(rdi, r15_thread);
 475     __ mov_metadata(rsi, method()->constant_encoding());
 476 #else
 477     __ get_thread(rax);
 478     __ movptr(Address(rsp, 0), rax);
 479     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 480 #endif
 481     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 482   }
 483 
 484   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 485     __ mov(rax, rbx);  // Restore the exception
 486   }
 487 
 488   // remove the activation and dispatch to the unwind handler
 489   __ remove_frame(initial_frame_size_in_bytes());
 490   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 491 
 492   // Emit the slow path assembly
 493   if (stub != NULL) {
 494     stub->emit_code(this);
 495   }
 496 
 497   return offset;
 498 }
 499 
 500 
 501 int LIR_Assembler::emit_deopt_handler() {
 502   // if the last instruction is a call (typically to do a throw which
 503   // is coming at the end after block reordering) the return address
 504   // must still point into the code area in order to avoid assertion
 505   // failures when searching for the corresponding bci => add a nop
 506   // (was bug 5/14/1999 - gri)
 507   __ nop();
 508 
 509   // generate code for exception handler


 515   }
 516 
 517   int offset = code_offset();
 518   InternalAddress here(__ pc());
 519 
 520   __ pushptr(here.addr());
 521   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 522   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 523   __ end_a_stub();
 524 
 525   return offset;
 526 }
 527 
 528 
 529 void LIR_Assembler::return_op(LIR_Opr result) {
 530   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 531   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 532     assert(result->fpu() == 0, "result must already be on TOS");
 533   }
 534 



















 535   // Pop the stack before the safepoint code
 536   __ remove_frame(initial_frame_size_in_bytes());
 537 
 538   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 539     __ reserved_stack_check();
 540   }
 541 
 542   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 543 
 544   // Note: we do not need to round double result; float result has the right precision
 545   // the poll sets the condition code, but no data registers
 546 
 547   if (SafepointMechanism::uses_thread_local_poll()) {
 548 #ifdef _LP64
 549     const Register poll_addr = rscratch1;
 550     __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
 551 #else
 552     const Register poll_addr = rbx;
 553     assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite");
 554     __ get_thread(poll_addr);
 555     __ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset()));
 556 #endif
 557     __ relocate(relocInfo::poll_return_type);
 558     __ testl(rax, Address(poll_addr, 0));
 559   } else {
 560     AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
 561 
 562     if (Assembler::is_polling_page_far()) {
 563       __ lea(rscratch1, polling_page);
 564       __ relocate(relocInfo::poll_return_type);
 565       __ testl(rax, Address(rscratch1, 0));
 566     } else {
 567       __ testl(rax, polling_page);
 568     }
 569   }
 570   __ ret(0);
 571 }
 572 
 573 




 574 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 575   guarantee(info != NULL, "Shouldn't be NULL");
 576   int offset = __ offset();
 577   if (SafepointMechanism::uses_thread_local_poll()) {
 578 #ifdef _LP64
 579     const Register poll_addr = rscratch1;
 580     __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
 581 #else
 582     assert(tmp->is_cpu_register(), "needed");
 583     const Register poll_addr = tmp->as_register();
 584     __ get_thread(poll_addr);
 585     __ movptr(poll_addr, Address(poll_addr, in_bytes(Thread::polling_page_offset())));
 586 #endif
 587     add_debug_info_for_branch(info);
 588     __ relocate(relocInfo::poll_type);
 589     address pre_pc = __ pc();
 590     __ testl(rax, Address(poll_addr, 0));
 591     address post_pc = __ pc();
 592     guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 593   } else {


 628       break;
 629     }
 630 
 631     case T_ADDRESS: {
 632       assert(patch_code == lir_patch_none, "no patching handled here");
 633       __ movptr(dest->as_register(), c->as_jint());
 634       break;
 635     }
 636 
 637     case T_LONG: {
 638       assert(patch_code == lir_patch_none, "no patching handled here");
 639 #ifdef _LP64
 640       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 641 #else
 642       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 643       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 644 #endif // _LP64
 645       break;
 646     }
 647 

 648     case T_OBJECT: {
 649       if (patch_code != lir_patch_none) {
 650         jobject2reg_with_patching(dest->as_register(), info);
 651       } else {
 652         __ movoop(dest->as_register(), c->as_jobject());
 653       }
 654       break;
 655     }
 656 
 657     case T_METADATA: {
 658       if (patch_code != lir_patch_none) {
 659         klass2reg_with_patching(dest->as_register(), info);
 660       } else {
 661         __ mov_metadata(dest->as_register(), c->as_metadata());
 662       }
 663       break;
 664     }
 665 
 666     case T_FLOAT: {
 667       if (dest->is_single_xmm()) {


 710     default:
 711       ShouldNotReachHere();
 712   }
 713 }
 714 
 715 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 716   assert(src->is_constant(), "should not call otherwise");
 717   assert(dest->is_stack(), "should not call otherwise");
 718   LIR_Const* c = src->as_constant_ptr();
 719 
 720   switch (c->type()) {
 721     case T_INT:  // fall through
 722     case T_FLOAT:
 723       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 724       break;
 725 
 726     case T_ADDRESS:
 727       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 728       break;
 729 

 730     case T_OBJECT:
 731       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 732       break;
 733 
 734     case T_LONG:  // fall through
 735     case T_DOUBLE:
 736 #ifdef _LP64
 737       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 738                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 739 #else
 740       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 741                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 742       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 743                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 744 #endif // _LP64
 745       break;
 746 
 747     default:
 748       ShouldNotReachHere();
 749   }
 750 }
 751 
 752 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 753   assert(src->is_constant(), "should not call otherwise");
 754   assert(dest->is_address(), "should not call otherwise");
 755   LIR_Const* c = src->as_constant_ptr();
 756   LIR_Address* addr = dest->as_address_ptr();
 757 
 758   int null_check_here = code_offset();
 759   switch (type) {
 760     case T_INT:    // fall through
 761     case T_FLOAT:
 762       __ movl(as_Address(addr), c->as_jint_bits());
 763       break;
 764 
 765     case T_ADDRESS:
 766       __ movptr(as_Address(addr), c->as_jint_bits());
 767       break;
 768 

 769     case T_OBJECT:  // fall through
 770     case T_ARRAY:
 771       if (c->as_jobject() == NULL) {
 772         if (UseCompressedOops && !wide) {
 773           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 774         } else {
 775 #ifdef _LP64
 776           __ xorptr(rscratch1, rscratch1);
 777           null_check_here = code_offset();
 778           __ movptr(as_Address(addr), rscratch1);
 779 #else
 780           __ movptr(as_Address(addr), NULL_WORD);
 781 #endif
 782         }
 783       } else {
 784         if (is_literal_address(addr)) {
 785           ShouldNotReachHere();
 786           __ movoop(as_Address(addr, noreg), c->as_jobject());
 787         } else {
 788 #ifdef _LP64


 837   if (info != NULL) {
 838     add_debug_info_for_null_check(null_check_here, info);
 839   }
 840 }
 841 
 842 
 843 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 844   assert(src->is_register(), "should not call otherwise");
 845   assert(dest->is_register(), "should not call otherwise");
 846 
 847   // move between cpu-registers
 848   if (dest->is_single_cpu()) {
 849 #ifdef _LP64
 850     if (src->type() == T_LONG) {
 851       // Can do LONG -> OBJECT
 852       move_regs(src->as_register_lo(), dest->as_register());
 853       return;
 854     }
 855 #endif
 856     assert(src->is_single_cpu(), "must match");
 857     if (src->type() == T_OBJECT) {
 858       __ verify_oop(src->as_register());
 859     }
 860     move_regs(src->as_register(), dest->as_register());
 861 
 862   } else if (dest->is_double_cpu()) {
 863 #ifdef _LP64
 864     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 865       // Surprising to me but we can see move of a long to t_object
 866       __ verify_oop(src->as_register());
 867       move_regs(src->as_register(), dest->as_register_lo());
 868       return;
 869     }
 870 #endif
 871     assert(src->is_double_cpu(), "must match");
 872     Register f_lo = src->as_register_lo();
 873     Register f_hi = src->as_register_hi();
 874     Register t_lo = dest->as_register_lo();
 875     Register t_hi = dest->as_register_hi();
 876 #ifdef _LP64
 877     assert(f_hi == f_lo, "must be same");
 878     assert(t_hi == t_lo, "must be same");
 879     move_regs(f_lo, t_lo);
 880 #else
 881     assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
 882 
 883 
 884     if (f_lo == t_hi && f_hi == t_lo) {


 915     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 916   } else if (dest->is_double_xmm()) {
 917     assert(src->is_double_xmm(), "must match");
 918     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 919 
 920     // move between fpu-registers (no instruction necessary because of fpu-stack)
 921   } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
 922     assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
 923     assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
 924   } else {
 925     ShouldNotReachHere();
 926   }
 927 }
 928 
 929 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 930   assert(src->is_register(), "should not call otherwise");
 931   assert(dest->is_stack(), "should not call otherwise");
 932 
 933   if (src->is_single_cpu()) {
 934     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 935     if (type == T_OBJECT || type == T_ARRAY) {
 936       __ verify_oop(src->as_register());
 937       __ movptr (dst, src->as_register());
 938     } else if (type == T_METADATA) {
 939       __ movptr (dst, src->as_register());
 940     } else {
 941       __ movl (dst, src->as_register());
 942     }
 943 
 944   } else if (src->is_double_cpu()) {
 945     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 946     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 947     __ movptr (dstLO, src->as_register_lo());
 948     NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
 949 
 950   } else if (src->is_single_xmm()) {
 951     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 952     __ movflt(dst_addr, src->as_xmm_float_reg());
 953 
 954   } else if (src->is_double_xmm()) {
 955     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());


 961     if (pop_fpu_stack)     __ fstp_s (dst_addr);
 962     else                   __ fst_s  (dst_addr);
 963 
 964   } else if (src->is_double_fpu()) {
 965     assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
 966     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 967     if (pop_fpu_stack)     __ fstp_d (dst_addr);
 968     else                   __ fst_d  (dst_addr);
 969 
 970   } else {
 971     ShouldNotReachHere();
 972   }
 973 }
 974 
 975 
 976 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 977   LIR_Address* to_addr = dest->as_address_ptr();
 978   PatchingStub* patch = NULL;
 979   Register compressed_src = rscratch1;
 980 
 981   if (type == T_ARRAY || type == T_OBJECT) {
 982     __ verify_oop(src->as_register());
 983 #ifdef _LP64
 984     if (UseCompressedOops && !wide) {
 985       __ movptr(compressed_src, src->as_register());
 986       __ encode_heap_oop(compressed_src);
 987       if (patch_code != lir_patch_none) {
 988         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
 989       }
 990     }
 991 #endif
 992   }
 993 
 994   if (patch_code != lir_patch_none) {
 995     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 996     Address toa = as_Address(to_addr);
 997     assert(toa.disp() != 0, "must have");
 998   }
 999 
1000   int null_check_here = code_offset();
1001   switch (type) {


1006         assert(src->is_single_fpu(), "must be");
1007         assert(src->fpu_regnr() == 0, "argument must be on TOS");
1008         if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
1009         else                    __ fst_s (as_Address(to_addr));
1010       }
1011       break;
1012     }
1013 
1014     case T_DOUBLE: {
1015       if (src->is_double_xmm()) {
1016         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1017       } else {
1018         assert(src->is_double_fpu(), "must be");
1019         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1020         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1021         else                    __ fst_d (as_Address(to_addr));
1022       }
1023       break;
1024     }
1025 

1026     case T_ARRAY:   // fall through
1027     case T_OBJECT:  // fall through
1028       if (UseCompressedOops && !wide) {
1029         __ movl(as_Address(to_addr), compressed_src);
1030       } else {
1031         __ movptr(as_Address(to_addr), src->as_register());
1032       }
1033       break;
1034     case T_METADATA:
1035       // We get here to store a method pointer to the stack to pass to
1036       // a dtrace runtime call. This can't work on 64 bit with
1037       // compressed klass ptrs: T_METADATA can be a compressed klass
1038       // ptr or a 64 bit method pointer.
1039       LP64_ONLY(ShouldNotReachHere());
1040       __ movptr(as_Address(to_addr), src->as_register());
1041       break;
1042     case T_ADDRESS:
1043       __ movptr(as_Address(to_addr), src->as_register());
1044       break;
1045     case T_INT:


1096       break;
1097 
1098     default:
1099       ShouldNotReachHere();
1100   }
1101   if (info != NULL) {
1102     add_debug_info_for_null_check(null_check_here, info);
1103   }
1104 
1105   if (patch_code != lir_patch_none) {
1106     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1107   }
1108 }
1109 
1110 
1111 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1112   assert(src->is_stack(), "should not call otherwise");
1113   assert(dest->is_register(), "should not call otherwise");
1114 
1115   if (dest->is_single_cpu()) {
1116     if (type == T_ARRAY || type == T_OBJECT) {
1117       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1118       __ verify_oop(dest->as_register());
1119     } else if (type == T_METADATA) {
1120       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1121     } else {
1122       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1123     }
1124 
1125   } else if (dest->is_double_cpu()) {
1126     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1127     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1128     __ movptr(dest->as_register_lo(), src_addr_LO);
1129     NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1130 
1131   } else if (dest->is_single_xmm()) {
1132     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1133     __ movflt(dest->as_xmm_float_reg(), src_addr);
1134 
1135   } else if (dest->is_double_xmm()) {
1136     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1137     __ movdbl(dest->as_xmm_double_reg(), src_addr);
1138 
1139   } else if (dest->is_single_fpu()) {
1140     assert(dest->fpu_regnr() == 0, "dest must be TOS");
1141     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1142     __ fld_s(src_addr);
1143 
1144   } else if (dest->is_double_fpu()) {
1145     assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1146     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1147     __ fld_d(src_addr);
1148 
1149   } else {
1150     ShouldNotReachHere();
1151   }
1152 }
1153 
1154 
1155 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1156   if (src->is_single_stack()) {
1157     if (type == T_OBJECT || type == T_ARRAY) {
1158       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1159       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1160     } else {
1161 #ifndef _LP64
1162       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1163       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1164 #else
1165       //no pushl on 64bits
1166       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1167       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1168 #endif
1169     }
1170 
1171   } else if (src->is_double_stack()) {
1172 #ifdef _LP64
1173     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1174     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1175 #else
1176     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1177     // push and pop the part at src + wordSize, adding wordSize for the previous push
1178     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1179     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1180     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1181 #endif // _LP64
1182 
1183   } else {
1184     ShouldNotReachHere();
1185   }
1186 }
1187 
1188 
1189 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1190   assert(src->is_address(), "should not call otherwise");
1191   assert(dest->is_register(), "should not call otherwise");
1192 
1193   LIR_Address* addr = src->as_address_ptr();
1194   Address from_addr = as_Address(addr);
1195 
1196   if (addr->base()->type() == T_OBJECT) {
1197     __ verify_oop(addr->base()->as_pointer_register());
1198   }
1199 
1200   switch (type) {
1201     case T_BOOLEAN: // fall through
1202     case T_BYTE:    // fall through
1203     case T_CHAR:    // fall through
1204     case T_SHORT:
1205       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1206         // on pre P6 processors we may get partial register stalls
1207         // so blow away the value of to_rinfo before loading a
1208         // partial word into it.  Do it here so that it precedes
1209         // the potential patch point below.
1210         __ xorptr(dest->as_register(), dest->as_register());
1211       }
1212       break;
1213    default:
1214      break;
1215   }
1216 


1229         __ movflt(dest->as_xmm_float_reg(), from_addr);
1230       } else {
1231         assert(dest->is_single_fpu(), "must be");
1232         assert(dest->fpu_regnr() == 0, "dest must be TOS");
1233         __ fld_s(from_addr);
1234       }
1235       break;
1236     }
1237 
1238     case T_DOUBLE: {
1239       if (dest->is_double_xmm()) {
1240         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1241       } else {
1242         assert(dest->is_double_fpu(), "must be");
1243         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1244         __ fld_d(from_addr);
1245       }
1246       break;
1247     }
1248 

1249     case T_OBJECT:  // fall through
1250     case T_ARRAY:   // fall through
1251       if (UseCompressedOops && !wide) {
1252         __ movl(dest->as_register(), from_addr);
1253       } else {
1254         __ movptr(dest->as_register(), from_addr);
1255       }
1256       break;
1257 
1258     case T_ADDRESS:
1259       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1260         __ movl(dest->as_register(), from_addr);
1261       } else {
1262         __ movptr(dest->as_register(), from_addr);
1263       }
1264       break;
1265     case T_INT:
1266       __ movl(dest->as_register(), from_addr);
1267       break;
1268 


1338     case T_SHORT: {
1339       Register dest_reg = dest->as_register();
1340       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1341         __ movswl(dest_reg, from_addr);
1342       } else {
1343         __ movw(dest_reg, from_addr);
1344         __ shll(dest_reg, 16);
1345         __ sarl(dest_reg, 16);
1346       }
1347       break;
1348     }
1349 
1350     default:
1351       ShouldNotReachHere();
1352   }
1353 
1354   if (patch != NULL) {
1355     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1356   }
1357 
1358   if (type == T_ARRAY || type == T_OBJECT) {
1359 #ifdef _LP64
1360     if (UseCompressedOops && !wide) {
1361       __ decode_heap_oop(dest->as_register());
1362     }
1363 #endif
1364 
1365     // Load barrier has not yet been applied, so ZGC can't verify the oop here
1366     if (!UseZGC) {
1367       __ verify_oop(dest->as_register());
1368     }
1369   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1370 #ifdef _LP64
1371     if (UseCompressedClassPointers) {

1372       __ decode_klass_not_null(dest->as_register());



1373     }


1374 #endif
1375   }
1376 }
1377 
1378 
1379 NEEDS_CLEANUP; // This could be static?
1380 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1381   int elem_size = type2aelembytes(type);
1382   switch (elem_size) {
1383     case 1: return Address::times_1;
1384     case 2: return Address::times_2;
1385     case 4: return Address::times_4;
1386     case 8: return Address::times_8;
1387   }
1388   ShouldNotReachHere();
1389   return Address::no_scale;
1390 }
1391 
1392 
1393 void LIR_Assembler::emit_op3(LIR_Op3* op) {


1575     add_debug_info_for_null_check_here(op->stub()->info());
1576     __ cmpb(Address(op->klass()->as_register(),
1577                     InstanceKlass::init_state_offset()),
1578                     InstanceKlass::fully_initialized);
1579     __ jcc(Assembler::notEqual, *op->stub()->entry());
1580   }
1581   __ allocate_object(op->obj()->as_register(),
1582                      op->tmp1()->as_register(),
1583                      op->tmp2()->as_register(),
1584                      op->header_size(),
1585                      op->object_size(),
1586                      op->klass()->as_register(),
1587                      *op->stub()->entry());
1588   __ bind(*op->stub()->continuation());
1589 }
1590 
1591 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1592   Register len =  op->len()->as_register();
1593   LP64_ONLY( __ movslq(len, len); )
1594 
1595   if (UseSlowPath ||
1596       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1597       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1598     __ jmp(*op->stub()->entry());
1599   } else {
1600     Register tmp1 = op->tmp1()->as_register();
1601     Register tmp2 = op->tmp2()->as_register();
1602     Register tmp3 = op->tmp3()->as_register();
1603     if (len == tmp1) {
1604       tmp1 = tmp3;
1605     } else if (len == tmp2) {
1606       tmp2 = tmp3;
1607     } else if (len == tmp3) {
1608       // everything is ok
1609     } else {
1610       __ mov(tmp3, len);
1611     }
1612     __ allocate_array(op->obj()->as_register(),
1613                       len,
1614                       tmp1,
1615                       tmp2,


1673     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1674   }
1675   Label profile_cast_success, profile_cast_failure;
1676   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1677   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1678 
1679   if (obj == k_RInfo) {
1680     k_RInfo = dst;
1681   } else if (obj == klass_RInfo) {
1682     klass_RInfo = dst;
1683   }
1684   if (k->is_loaded() && !UseCompressedClassPointers) {
1685     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1686   } else {
1687     Rtmp1 = op->tmp3()->as_register();
1688     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1689   }
1690 
1691   assert_different_registers(obj, k_RInfo, klass_RInfo);
1692 
1693   __ cmpptr(obj, (int32_t)NULL_WORD);
1694   if (op->should_profile()) {
1695     Label not_null;
1696     __ jccb(Assembler::notEqual, not_null);
1697     // Object is null; update MDO and exit
1698     Register mdo  = klass_RInfo;
1699     __ mov_metadata(mdo, md->constant_encoding());
1700     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1701     int header_bits = BitData::null_seen_byte_constant();
1702     __ orb(data_addr, header_bits);
1703     __ jmp(*obj_is_null);
1704     __ bind(not_null);
1705   } else {
1706     __ jcc(Assembler::equal, *obj_is_null);


1707   }
1708 
1709   if (!k->is_loaded()) {
1710     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1711   } else {
1712 #ifdef _LP64
1713     __ mov_metadata(k_RInfo, k->constant_encoding());
1714 #endif // _LP64
1715   }
1716   __ verify_oop(obj);
1717 
1718   if (op->fast_check()) {
1719     // get object class
1720     // not a safepoint as obj null check happens earlier
1721 #ifdef _LP64
1722     if (UseCompressedClassPointers) {
1723       __ load_klass(Rtmp1, obj);
1724       __ cmpptr(k_RInfo, Rtmp1);
1725     } else {
1726       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));


1897         __ mov(dst, obj);
1898       }
1899     } else
1900       if (code == lir_instanceof) {
1901         Register obj = op->object()->as_register();
1902         Register dst = op->result_opr()->as_register();
1903         Label success, failure, done;
1904         emit_typecheck_helper(op, &success, &failure, &failure);
1905         __ bind(failure);
1906         __ xorptr(dst, dst);
1907         __ jmpb(done);
1908         __ bind(success);
1909         __ movptr(dst, 1);
1910         __ bind(done);
1911       } else {
1912         ShouldNotReachHere();
1913       }
1914 
1915 }
1916 











































































































1917 
1918 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1919   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1920     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1921     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1922     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1923     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1924     Register addr = op->addr()->as_register();
1925     __ lock();
1926     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1927 
1928   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1929     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1930     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1931     Register newval = op->new_value()->as_register();
1932     Register cmpval = op->cmp_value()->as_register();
1933     assert(cmpval == rax, "wrong register");
1934     assert(newval != NULL, "new val must be register");
1935     assert(cmpval != newval, "cmp and new values must be in different registers");
1936     assert(cmpval != addr, "cmp and addr must be in different registers");


1957       __ cmpxchgl(newval, Address(addr, 0));
1958     }
1959 #ifdef _LP64
1960   } else if (op->code() == lir_cas_long) {
1961     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1962     Register newval = op->new_value()->as_register_lo();
1963     Register cmpval = op->cmp_value()->as_register_lo();
1964     assert(cmpval == rax, "wrong register");
1965     assert(newval != NULL, "new val must be register");
1966     assert(cmpval != newval, "cmp and new values must be in different registers");
1967     assert(cmpval != addr, "cmp and addr must be in different registers");
1968     assert(newval != addr, "new value and addr must be in different registers");
1969     __ lock();
1970     __ cmpxchgq(newval, Address(addr, 0));
1971 #endif // _LP64
1972   } else {
1973     Unimplemented();
1974   }
1975 }
1976 















1977 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1978   Assembler::Condition acond, ncond;
1979   switch (condition) {
1980     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
1981     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
1982     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
1983     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
1984     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
1985     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
1986     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
1987     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
1988     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
1989                                 ShouldNotReachHere();
1990   }
1991 
1992   if (opr1->is_cpu_register()) {
1993     reg2reg(opr1, result);
1994   } else if (opr1->is_stack()) {
1995     stack2reg(opr1, result, result->type());
1996   } else if (opr1->is_constant()) {


2493       int r_hi = right->as_constant_ptr()->as_jint_hi();
2494       switch (code) {
2495         case lir_logic_and:
2496           __ andl(l_lo, r_lo);
2497           __ andl(l_hi, r_hi);
2498           break;
2499         case lir_logic_or:
2500           __ orl(l_lo, r_lo);
2501           __ orl(l_hi, r_hi);
2502           break;
2503         case lir_logic_xor:
2504           __ xorl(l_lo, r_lo);
2505           __ xorl(l_hi, r_hi);
2506           break;
2507         default: ShouldNotReachHere();
2508       }
2509 #endif // _LP64
2510     } else {
2511 #ifdef _LP64
2512       Register r_lo;
2513       if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2514         r_lo = right->as_register();
2515       } else {
2516         r_lo = right->as_register_lo();
2517       }
2518 #else
2519       Register r_lo = right->as_register_lo();
2520       Register r_hi = right->as_register_hi();
2521       assert(l_lo != r_hi, "overwriting registers");
2522 #endif
2523       switch (code) {
2524         case lir_logic_and:
2525           __ andptr(l_lo, r_lo);
2526           NOT_LP64(__ andptr(l_hi, r_hi);)
2527           break;
2528         case lir_logic_or:
2529           __ orptr(l_lo, r_lo);
2530           NOT_LP64(__ orptr(l_hi, r_hi);)
2531           break;
2532         case lir_logic_xor:
2533           __ xorptr(l_lo, r_lo);


2606     move_regs(lreg, rax);
2607 
2608     int idivl_offset = __ corrected_idivl(rreg);
2609     if (ImplicitDiv0Checks) {
2610       add_debug_info_for_div0(idivl_offset, info);
2611     }
2612     if (code == lir_irem) {
2613       move_regs(rdx, dreg); // result is in rdx
2614     } else {
2615       move_regs(rax, dreg);
2616     }
2617   }
2618 }
2619 
2620 
2621 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2622   if (opr1->is_single_cpu()) {
2623     Register reg1 = opr1->as_register();
2624     if (opr2->is_single_cpu()) {
2625       // cpu register - cpu register
2626       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2627         __ cmpoop(reg1, opr2->as_register());
2628       } else {
2629         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2630         __ cmpl(reg1, opr2->as_register());
2631       }
2632     } else if (opr2->is_stack()) {
2633       // cpu register - stack
2634       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2635         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2636       } else {
2637         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2638       }
2639     } else if (opr2->is_constant()) {
2640       // cpu register - constant
2641       LIR_Const* c = opr2->as_constant_ptr();
2642       if (c->type() == T_INT) {
2643         __ cmpl(reg1, c->as_jint());
2644       } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2645         // In 64bit oops are single register
2646         jobject o = c->as_jobject();
2647         if (o == NULL) {
2648           __ cmpptr(reg1, (int32_t)NULL_WORD);
2649         } else {
2650           __ cmpoop(reg1, o);
2651         }
2652       } else {
2653         fatal("unexpected type: %s", basictype_to_str(c->type()));
2654       }
2655       // cpu register - address
2656     } else if (opr2->is_address()) {
2657       if (op->info() != NULL) {
2658         add_debug_info_for_null_check_here(op->info());
2659       }
2660       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2661     } else {
2662       ShouldNotReachHere();
2663     }
2664 


2724       // xmm register - constant
2725       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2726     } else if (opr2->is_address()) {
2727       // xmm register - address
2728       if (op->info() != NULL) {
2729         add_debug_info_for_null_check_here(op->info());
2730       }
2731       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2732     } else {
2733       ShouldNotReachHere();
2734     }
2735 
2736   } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2737     assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2738     assert(opr2->is_fpu_register(), "both must be registers");
2739     __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2740 
2741   } else if (opr1->is_address() && opr2->is_constant()) {
2742     LIR_Const* c = opr2->as_constant_ptr();
2743 #ifdef _LP64
2744     if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2745       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2746       __ movoop(rscratch1, c->as_jobject());
2747     }
2748 #endif // LP64
2749     if (op->info() != NULL) {
2750       add_debug_info_for_null_check_here(op->info());
2751     }
2752     // special case: address - constant
2753     LIR_Address* addr = opr1->as_address_ptr();
2754     if (c->type() == T_INT) {
2755       __ cmpl(as_Address(addr), c->as_jint());
2756     } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2757 #ifdef _LP64
2758       // %%% Make this explode if addr isn't reachable until we figure out a
2759       // better strategy by giving noreg as the temp for as_Address
2760       __ cmpoop(rscratch1, as_Address(addr, noreg));
2761 #else
2762       __ cmpoop(as_Address(addr), c->as_jobject());
2763 #endif // _LP64
2764     } else {
2765       ShouldNotReachHere();
2766     }
2767 
2768   } else {
2769     ShouldNotReachHere();
2770   }
2771 }
2772 
2773 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2774   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2775     if (left->is_single_xmm()) {
2776       assert(right->is_single_xmm(), "must match");


2815   switch (code) {
2816   case lir_static_call:
2817   case lir_optvirtual_call:
2818   case lir_dynamic_call:
2819     offset += NativeCall::displacement_offset;
2820     break;
2821   case lir_icvirtual_call:
2822     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2823     break;
2824   case lir_virtual_call:  // currently, sparc-specific for niagara
2825   default: ShouldNotReachHere();
2826   }
2827   __ align(BytesPerWord, offset);
2828 }
2829 
2830 
2831 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2832   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2833          "must be aligned");
2834   __ call(AddressLiteral(op->addr(), rtype));
2835   add_call_info(code_offset(), op->info());
2836 }
2837 
2838 
2839 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2840   __ ic_call(op->addr());
2841   add_call_info(code_offset(), op->info());
2842   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2843          "must be aligned");
2844 }
2845 
2846 
2847 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2848 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2849   ShouldNotReachHere();
2850 }
2851 
2852 
2853 void LIR_Assembler::emit_static_call_stub() {
2854   address call_pc = __ pc();
2855   address stub = __ start_a_stub(call_stub_size());
2856   if (stub == NULL) {
2857     bailout("static call stub overflow");
2858     return;
2859   }
2860 
2861   int start = __ offset();


3017   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3018 }
3019 
3020 
3021 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3022   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3023   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3024   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3025   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3026 }
3027 
3028 
3029 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3030   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3031   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3032   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3033   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3034 }
3035 
3036 

















3037 // This code replaces a call to arraycopy; no exception may
3038 // be thrown in this code, they must be thrown in the System.arraycopy
3039 // activation frame; we could save some checks if this would not be the case
3040 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3041   ciArrayKlass* default_type = op->expected_type();
3042   Register src = op->src()->as_register();
3043   Register dst = op->dst()->as_register();
3044   Register src_pos = op->src_pos()->as_register();
3045   Register dst_pos = op->dst_pos()->as_register();
3046   Register length  = op->length()->as_register();
3047   Register tmp = op->tmp()->as_register();
3048 
3049   __ resolve(ACCESS_READ, src);
3050   __ resolve(ACCESS_WRITE, dst);
3051 
3052   CodeStub* stub = op->stub();
3053   int flags = op->flags();
3054   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3055   if (basic_type == T_ARRAY) basic_type = T_OBJECT;














3056 
3057   // if we don't know anything, just go through the generic arraycopy
3058   if (default_type == NULL) {
3059     // save outgoing arguments on stack in case call to System.arraycopy is needed
3060     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3061     // for interpreter calling conventions. Now we have to do it in new style conventions.
3062     // For the moment until C1 gets the new register allocator I just force all the
3063     // args to the right place (except the register args) and then on the back side
3064     // reload the register args properly if we go slow path. Yuck
3065 
3066     // These are proper for the calling convention
3067     store_parameter(length, 2);
3068     store_parameter(dst_pos, 1);
3069     store_parameter(dst, 0);
3070 
3071     // these are just temporary placements until we need to reload
3072     store_parameter(src_pos, 3);
3073     store_parameter(src, 4);
3074     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3075 


   1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciValueKlass.hpp"
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/cardTableBarrierSet.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "nativeInst_x86.hpp"
  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "vmreg_x86.inline.hpp"
  45 
  46 
  47 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  48 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  49 // fast versions of NegF/NegD and AbsF/AbsD.
  50 
  51 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  52 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  53   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  54   // of 128-bits operands for SSE instructions.
  55   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));


 183 }
 184 
 185 void LIR_Assembler::ffree(int i) {
 186   __ ffree(i);
 187 }
 188 
 189 void LIR_Assembler::breakpoint() {
 190   __ int3();
 191 }
 192 
 193 void LIR_Assembler::push(LIR_Opr opr) {
 194   if (opr->is_single_cpu()) {
 195     __ push_reg(opr->as_register());
 196   } else if (opr->is_double_cpu()) {
 197     NOT_LP64(__ push_reg(opr->as_register_hi()));
 198     __ push_reg(opr->as_register_lo());
 199   } else if (opr->is_stack()) {
 200     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 201   } else if (opr->is_constant()) {
 202     LIR_Const* const_opr = opr->as_constant_ptr();
 203     if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) {
 204       __ push_oop(const_opr->as_jobject());
 205     } else if (const_opr->type() == T_INT) {
 206       __ push_jint(const_opr->as_jint());
 207     } else {
 208       ShouldNotReachHere();
 209     }
 210 
 211   } else {
 212     ShouldNotReachHere();
 213   }
 214 }
 215 
 216 void LIR_Assembler::pop(LIR_Opr opr) {
 217   if (opr->is_single_cpu()) {
 218     __ pop_reg(opr->as_register());
 219   } else {
 220     ShouldNotReachHere();
 221   }
 222 }
 223 


 273   return as_Address(addr);
 274 }
 275 
 276 
 277 void LIR_Assembler::osr_entry() {
 278   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 279   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 280   ValueStack* entry_state = osr_entry->state();
 281   int number_of_locks = entry_state->locks_size();
 282 
 283   // we jump here if osr happens with the interpreter
 284   // state set up to continue at the beginning of the
 285   // loop that triggered osr - in particular, we have
 286   // the following registers setup:
 287   //
 288   // rcx: osr buffer
 289   //
 290 
 291   // build frame
 292   ciMethod* m = compilation()->method();
 293   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(),
 294                  needs_stack_repair(), NULL);
 295 
 296   // OSR buffer is
 297   //
 298   // locals[nlocals-1..0]
 299   // monitors[0..number_of_locks]
 300   //
 301   // locals is a direct copy of the interpreter frame so in the osr buffer
 302   // so first slot in the local array is the last local from the interpreter
 303   // and last slot is local[0] (receiver) from the interpreter
 304   //
 305   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 306   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 307   // in the interpreter frame (the method lock if a sync method)
 308 
 309   // Initialize monitors in the compiled activation.
 310   //   rcx: pointer to osr buffer
 311   //
 312   // All other registers are dead at this point and the locals will be
 313   // copied into place by code emitted in the IR.
 314 


 471     __ bind(*stub->continuation());
 472   }
 473 
 474   if (compilation()->env()->dtrace_method_probes()) {
 475 #ifdef _LP64
 476     __ mov(rdi, r15_thread);
 477     __ mov_metadata(rsi, method()->constant_encoding());
 478 #else
 479     __ get_thread(rax);
 480     __ movptr(Address(rsp, 0), rax);
 481     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 482 #endif
 483     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 484   }
 485 
 486   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 487     __ mov(rax, rbx);  // Restore the exception
 488   }
 489 
 490   // remove the activation and dispatch to the unwind handler
 491   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 492   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 493 
 494   // Emit the slow path assembly
 495   if (stub != NULL) {
 496     stub->emit_code(this);
 497   }
 498 
 499   return offset;
 500 }
 501 
 502 
 503 int LIR_Assembler::emit_deopt_handler() {
 504   // if the last instruction is a call (typically to do a throw which
 505   // is coming at the end after block reordering) the return address
 506   // must still point into the code area in order to avoid assertion
 507   // failures when searching for the corresponding bci => add a nop
 508   // (was bug 5/14/1999 - gri)
 509   __ nop();
 510 
 511   // generate code for exception handler


 517   }
 518 
 519   int offset = code_offset();
 520   InternalAddress here(__ pc());
 521 
 522   __ pushptr(here.addr());
 523   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 524   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 525   __ end_a_stub();
 526 
 527   return offset;
 528 }
 529 
 530 
 531 void LIR_Assembler::return_op(LIR_Opr result) {
 532   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 533   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 534     assert(result->fpu() == 0, "result must already be on TOS");
 535   }
 536 
 537   ciMethod* method = compilation()->method();
 538   if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) {
 539     ciType* return_type = method->return_type();
 540     if (return_type->is_valuetype()) {
 541       ciValueKlass* vk = return_type->as_value_klass();
 542       if (vk->can_be_returned_as_fields()) {
 543 #ifndef _LP64
 544         Unimplemented();
 545 #else
 546         address unpack_handler = vk->unpack_handler();
 547         assert(unpack_handler != NULL, "must be");
 548         __ call(RuntimeAddress(unpack_handler));
 549         // At this point, rax points to the value object (for interpreter or C1 caller).
 550         // The fields of the object are copied into registers (for C2 caller).
 551 #endif
 552       }
 553     }
 554   }
 555 
 556   // Pop the stack before the safepoint code
 557   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 558 
 559   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 560     __ reserved_stack_check();
 561   }
 562 
 563   bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 564 
 565   // Note: we do not need to round double result; float result has the right precision
 566   // the poll sets the condition code, but no data registers
 567 
 568   if (SafepointMechanism::uses_thread_local_poll()) {
 569 #ifdef _LP64
 570     const Register poll_addr = rscratch1;
 571     __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
 572 #else
 573     const Register poll_addr = rbx;
 574     assert(FrameMap::is_caller_save_register(poll_addr), "will overwrite");
 575     __ get_thread(poll_addr);
 576     __ movptr(poll_addr, Address(poll_addr, Thread::polling_page_offset()));
 577 #endif
 578     __ relocate(relocInfo::poll_return_type);
 579     __ testl(rax, Address(poll_addr, 0));
 580   } else {
 581     AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
 582 
 583     if (Assembler::is_polling_page_far()) {
 584       __ lea(rscratch1, polling_page);
 585       __ relocate(relocInfo::poll_return_type);
 586       __ testl(rax, Address(rscratch1, 0));
 587     } else {
 588       __ testl(rax, polling_page);
 589     }
 590   }
 591   __ ret(0);
 592 }
 593 
 594 
 595 int LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) {
 596   return (__ store_value_type_fields_to_buf(vk, false));
 597 }
 598 
 599 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 600   guarantee(info != NULL, "Shouldn't be NULL");
 601   int offset = __ offset();
 602   if (SafepointMechanism::uses_thread_local_poll()) {
 603 #ifdef _LP64
 604     const Register poll_addr = rscratch1;
 605     __ movptr(poll_addr, Address(r15_thread, Thread::polling_page_offset()));
 606 #else
 607     assert(tmp->is_cpu_register(), "needed");
 608     const Register poll_addr = tmp->as_register();
 609     __ get_thread(poll_addr);
 610     __ movptr(poll_addr, Address(poll_addr, in_bytes(Thread::polling_page_offset())));
 611 #endif
 612     add_debug_info_for_branch(info);
 613     __ relocate(relocInfo::poll_type);
 614     address pre_pc = __ pc();
 615     __ testl(rax, Address(poll_addr, 0));
 616     address post_pc = __ pc();
 617     guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 618   } else {


 653       break;
 654     }
 655 
 656     case T_ADDRESS: {
 657       assert(patch_code == lir_patch_none, "no patching handled here");
 658       __ movptr(dest->as_register(), c->as_jint());
 659       break;
 660     }
 661 
 662     case T_LONG: {
 663       assert(patch_code == lir_patch_none, "no patching handled here");
 664 #ifdef _LP64
 665       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 666 #else
 667       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 668       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 669 #endif // _LP64
 670       break;
 671     }
 672 
 673     case T_VALUETYPE: // Fall through
 674     case T_OBJECT: {
 675       if (patch_code != lir_patch_none) {
 676         jobject2reg_with_patching(dest->as_register(), info);
 677       } else {
 678         __ movoop(dest->as_register(), c->as_jobject());
 679       }
 680       break;
 681     }
 682 
 683     case T_METADATA: {
 684       if (patch_code != lir_patch_none) {
 685         klass2reg_with_patching(dest->as_register(), info);
 686       } else {
 687         __ mov_metadata(dest->as_register(), c->as_metadata());
 688       }
 689       break;
 690     }
 691 
 692     case T_FLOAT: {
 693       if (dest->is_single_xmm()) {


 736     default:
 737       ShouldNotReachHere();
 738   }
 739 }
 740 
 741 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 742   assert(src->is_constant(), "should not call otherwise");
 743   assert(dest->is_stack(), "should not call otherwise");
 744   LIR_Const* c = src->as_constant_ptr();
 745 
 746   switch (c->type()) {
 747     case T_INT:  // fall through
 748     case T_FLOAT:
 749       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 750       break;
 751 
 752     case T_ADDRESS:
 753       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 754       break;
 755 
 756     case T_VALUETYPE: // Fall through
 757     case T_OBJECT:
 758       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 759       break;
 760 
 761     case T_LONG:  // fall through
 762     case T_DOUBLE:
 763 #ifdef _LP64
 764       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 765                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 766 #else
 767       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 768                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 769       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 770                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 771 #endif // _LP64
 772       break;
 773 
 774     default:
 775       ShouldNotReachHere();
 776   }
 777 }
 778 
 779 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 780   assert(src->is_constant(), "should not call otherwise");
 781   assert(dest->is_address(), "should not call otherwise");
 782   LIR_Const* c = src->as_constant_ptr();
 783   LIR_Address* addr = dest->as_address_ptr();
 784 
 785   int null_check_here = code_offset();
 786   switch (type) {
 787     case T_INT:    // fall through
 788     case T_FLOAT:
 789       __ movl(as_Address(addr), c->as_jint_bits());
 790       break;
 791 
 792     case T_ADDRESS:
 793       __ movptr(as_Address(addr), c->as_jint_bits());
 794       break;
 795 
 796     case T_VALUETYPE: // fall through
 797     case T_OBJECT:  // fall through
 798     case T_ARRAY:
 799       if (c->as_jobject() == NULL) {
 800         if (UseCompressedOops && !wide) {
 801           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 802         } else {
 803 #ifdef _LP64
 804           __ xorptr(rscratch1, rscratch1);
 805           null_check_here = code_offset();
 806           __ movptr(as_Address(addr), rscratch1);
 807 #else
 808           __ movptr(as_Address(addr), NULL_WORD);
 809 #endif
 810         }
 811       } else {
 812         if (is_literal_address(addr)) {
 813           ShouldNotReachHere();
 814           __ movoop(as_Address(addr, noreg), c->as_jobject());
 815         } else {
 816 #ifdef _LP64


 865   if (info != NULL) {
 866     add_debug_info_for_null_check(null_check_here, info);
 867   }
 868 }
 869 
 870 
 871 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 872   assert(src->is_register(), "should not call otherwise");
 873   assert(dest->is_register(), "should not call otherwise");
 874 
 875   // move between cpu-registers
 876   if (dest->is_single_cpu()) {
 877 #ifdef _LP64
 878     if (src->type() == T_LONG) {
 879       // Can do LONG -> OBJECT
 880       move_regs(src->as_register_lo(), dest->as_register());
 881       return;
 882     }
 883 #endif
 884     assert(src->is_single_cpu(), "must match");
 885     if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
 886       __ verify_oop(src->as_register());
 887     }
 888     move_regs(src->as_register(), dest->as_register());
 889 
 890   } else if (dest->is_double_cpu()) {
 891 #ifdef _LP64
 892     if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
 893       // Surprising to me but we can see move of a long to t_object
 894       __ verify_oop(src->as_register());
 895       move_regs(src->as_register(), dest->as_register_lo());
 896       return;
 897     }
 898 #endif
 899     assert(src->is_double_cpu(), "must match");
 900     Register f_lo = src->as_register_lo();
 901     Register f_hi = src->as_register_hi();
 902     Register t_lo = dest->as_register_lo();
 903     Register t_hi = dest->as_register_hi();
 904 #ifdef _LP64
 905     assert(f_hi == f_lo, "must be same");
 906     assert(t_hi == t_lo, "must be same");
 907     move_regs(f_lo, t_lo);
 908 #else
 909     assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
 910 
 911 
 912     if (f_lo == t_hi && f_hi == t_lo) {


 943     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 944   } else if (dest->is_double_xmm()) {
 945     assert(src->is_double_xmm(), "must match");
 946     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 947 
 948     // move between fpu-registers (no instruction necessary because of fpu-stack)
 949   } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
 950     assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
 951     assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
 952   } else {
 953     ShouldNotReachHere();
 954   }
 955 }
 956 
 957 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 958   assert(src->is_register(), "should not call otherwise");
 959   assert(dest->is_stack(), "should not call otherwise");
 960 
 961   if (src->is_single_cpu()) {
 962     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 963     if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
 964       __ verify_oop(src->as_register());
 965       __ movptr (dst, src->as_register());
 966     } else if (type == T_METADATA) {
 967       __ movptr (dst, src->as_register());
 968     } else {
 969       __ movl (dst, src->as_register());
 970     }
 971 
 972   } else if (src->is_double_cpu()) {
 973     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 974     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 975     __ movptr (dstLO, src->as_register_lo());
 976     NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
 977 
 978   } else if (src->is_single_xmm()) {
 979     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 980     __ movflt(dst_addr, src->as_xmm_float_reg());
 981 
 982   } else if (src->is_double_xmm()) {
 983     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());


 989     if (pop_fpu_stack)     __ fstp_s (dst_addr);
 990     else                   __ fst_s  (dst_addr);
 991 
 992   } else if (src->is_double_fpu()) {
 993     assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
 994     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 995     if (pop_fpu_stack)     __ fstp_d (dst_addr);
 996     else                   __ fst_d  (dst_addr);
 997 
 998   } else {
 999     ShouldNotReachHere();
1000   }
1001 }
1002 
1003 
1004 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
1005   LIR_Address* to_addr = dest->as_address_ptr();
1006   PatchingStub* patch = NULL;
1007   Register compressed_src = rscratch1;
1008 
1009   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1010     __ verify_oop(src->as_register());
1011 #ifdef _LP64
1012     if (UseCompressedOops && !wide) {
1013       __ movptr(compressed_src, src->as_register());
1014       __ encode_heap_oop(compressed_src);
1015       if (patch_code != lir_patch_none) {
1016         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
1017       }
1018     }
1019 #endif
1020   }
1021 
1022   if (patch_code != lir_patch_none) {
1023     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1024     Address toa = as_Address(to_addr);
1025     assert(toa.disp() != 0, "must have");
1026   }
1027 
1028   int null_check_here = code_offset();
1029   switch (type) {


1034         assert(src->is_single_fpu(), "must be");
1035         assert(src->fpu_regnr() == 0, "argument must be on TOS");
1036         if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
1037         else                    __ fst_s (as_Address(to_addr));
1038       }
1039       break;
1040     }
1041 
1042     case T_DOUBLE: {
1043       if (src->is_double_xmm()) {
1044         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1045       } else {
1046         assert(src->is_double_fpu(), "must be");
1047         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1048         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1049         else                    __ fst_d (as_Address(to_addr));
1050       }
1051       break;
1052     }
1053 
1054     case T_VALUETYPE: // fall through
1055     case T_ARRAY:   // fall through
1056     case T_OBJECT:  // fall through
1057       if (UseCompressedOops && !wide) {
1058         __ movl(as_Address(to_addr), compressed_src);
1059       } else {
1060         __ movptr(as_Address(to_addr), src->as_register());
1061       }
1062       break;
1063     case T_METADATA:
1064       // We get here to store a method pointer to the stack to pass to
1065       // a dtrace runtime call. This can't work on 64 bit with
1066       // compressed klass ptrs: T_METADATA can be a compressed klass
1067       // ptr or a 64 bit method pointer.
1068       LP64_ONLY(ShouldNotReachHere());
1069       __ movptr(as_Address(to_addr), src->as_register());
1070       break;
1071     case T_ADDRESS:
1072       __ movptr(as_Address(to_addr), src->as_register());
1073       break;
1074     case T_INT:


1125       break;
1126 
1127     default:
1128       ShouldNotReachHere();
1129   }
1130   if (info != NULL) {
1131     add_debug_info_for_null_check(null_check_here, info);
1132   }
1133 
1134   if (patch_code != lir_patch_none) {
1135     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1136   }
1137 }
1138 
1139 
1140 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1141   assert(src->is_stack(), "should not call otherwise");
1142   assert(dest->is_register(), "should not call otherwise");
1143 
1144   if (dest->is_single_cpu()) {
1145     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1146       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1147       __ verify_oop(dest->as_register());
1148     } else if (type == T_METADATA) {
1149       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1150     } else {
1151       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1152     }
1153 
1154   } else if (dest->is_double_cpu()) {
1155     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1156     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1157     __ movptr(dest->as_register_lo(), src_addr_LO);
1158     NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1159 
1160   } else if (dest->is_single_xmm()) {
1161     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1162     __ movflt(dest->as_xmm_float_reg(), src_addr);
1163 
1164   } else if (dest->is_double_xmm()) {
1165     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1166     __ movdbl(dest->as_xmm_double_reg(), src_addr);
1167 
1168   } else if (dest->is_single_fpu()) {
1169     assert(dest->fpu_regnr() == 0, "dest must be TOS");
1170     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1171     __ fld_s(src_addr);
1172 
1173   } else if (dest->is_double_fpu()) {
1174     assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1175     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1176     __ fld_d(src_addr);
1177 
1178   } else {
1179     ShouldNotReachHere();
1180   }
1181 }
1182 
1183 
1184 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1185   if (src->is_single_stack()) {
1186     if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
1187       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1188       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1189     } else {
1190 #ifndef _LP64
1191       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1192       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1193 #else
1194       //no pushl on 64bits
1195       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1196       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1197 #endif
1198     }
1199 
1200   } else if (src->is_double_stack()) {
1201 #ifdef _LP64
1202     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1203     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1204 #else
1205     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1206     // push and pop the part at src + wordSize, adding wordSize for the previous push
1207     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1208     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1209     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1210 #endif // _LP64
1211 
1212   } else {
1213     ShouldNotReachHere();
1214   }
1215 }
1216 
1217 
1218 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1219   assert(src->is_address(), "should not call otherwise");
1220   assert(dest->is_register(), "should not call otherwise");
1221 
1222   LIR_Address* addr = src->as_address_ptr();
1223   Address from_addr = as_Address(addr);
1224 
1225   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) {
1226     __ verify_oop(addr->base()->as_pointer_register());
1227   }
1228 
1229   switch (type) {
1230     case T_BOOLEAN: // fall through
1231     case T_BYTE:    // fall through
1232     case T_CHAR:    // fall through
1233     case T_SHORT:
1234       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1235         // on pre P6 processors we may get partial register stalls
1236         // so blow away the value of to_rinfo before loading a
1237         // partial word into it.  Do it here so that it precedes
1238         // the potential patch point below.
1239         __ xorptr(dest->as_register(), dest->as_register());
1240       }
1241       break;
1242    default:
1243      break;
1244   }
1245 


1258         __ movflt(dest->as_xmm_float_reg(), from_addr);
1259       } else {
1260         assert(dest->is_single_fpu(), "must be");
1261         assert(dest->fpu_regnr() == 0, "dest must be TOS");
1262         __ fld_s(from_addr);
1263       }
1264       break;
1265     }
1266 
1267     case T_DOUBLE: {
1268       if (dest->is_double_xmm()) {
1269         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1270       } else {
1271         assert(dest->is_double_fpu(), "must be");
1272         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1273         __ fld_d(from_addr);
1274       }
1275       break;
1276     }
1277 
1278     case T_VALUETYPE: // fall through
1279     case T_OBJECT:  // fall through
1280     case T_ARRAY:   // fall through
1281       if (UseCompressedOops && !wide) {
1282         __ movl(dest->as_register(), from_addr);
1283       } else {
1284         __ movptr(dest->as_register(), from_addr);
1285       }
1286       break;
1287 
1288     case T_ADDRESS:
1289       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1290         __ movl(dest->as_register(), from_addr);
1291       } else {
1292         __ movptr(dest->as_register(), from_addr);
1293       }
1294       break;
1295     case T_INT:
1296       __ movl(dest->as_register(), from_addr);
1297       break;
1298 


1368     case T_SHORT: {
1369       Register dest_reg = dest->as_register();
1370       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1371         __ movswl(dest_reg, from_addr);
1372       } else {
1373         __ movw(dest_reg, from_addr);
1374         __ shll(dest_reg, 16);
1375         __ sarl(dest_reg, 16);
1376       }
1377       break;
1378     }
1379 
1380     default:
1381       ShouldNotReachHere();
1382   }
1383 
1384   if (patch != NULL) {
1385     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1386   }
1387 
1388   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1389 #ifdef _LP64
1390     if (UseCompressedOops && !wide) {
1391       __ decode_heap_oop(dest->as_register());
1392     }
1393 #endif
1394 
1395     // Load barrier has not yet been applied, so ZGC can't verify the oop here
1396     if (!UseZGC) {
1397       __ verify_oop(dest->as_register());
1398     }
1399   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1400 #ifdef _LP64
1401     if (UseCompressedClassPointers) {
1402       __ andl(dest->as_register(), oopDesc::compressed_klass_mask());
1403       __ decode_klass_not_null(dest->as_register());
1404     } else {
1405       __ shlq(dest->as_register(), oopDesc::storage_props_nof_bits);
1406       __ shrq(dest->as_register(), oopDesc::storage_props_nof_bits);
1407     }
1408 #else
1409     __ andl(dest->as_register(), oopDesc::wide_klass_mask());
1410 #endif
1411   }
1412 }
1413 
1414 
1415 NEEDS_CLEANUP; // This could be static?
1416 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1417   int elem_size = type2aelembytes(type);
1418   switch (elem_size) {
1419     case 1: return Address::times_1;
1420     case 2: return Address::times_2;
1421     case 4: return Address::times_4;
1422     case 8: return Address::times_8;
1423   }
1424   ShouldNotReachHere();
1425   return Address::no_scale;
1426 }
1427 
1428 
1429 void LIR_Assembler::emit_op3(LIR_Op3* op) {


1611     add_debug_info_for_null_check_here(op->stub()->info());
1612     __ cmpb(Address(op->klass()->as_register(),
1613                     InstanceKlass::init_state_offset()),
1614                     InstanceKlass::fully_initialized);
1615     __ jcc(Assembler::notEqual, *op->stub()->entry());
1616   }
1617   __ allocate_object(op->obj()->as_register(),
1618                      op->tmp1()->as_register(),
1619                      op->tmp2()->as_register(),
1620                      op->header_size(),
1621                      op->object_size(),
1622                      op->klass()->as_register(),
1623                      *op->stub()->entry());
1624   __ bind(*op->stub()->continuation());
1625 }
1626 
1627 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1628   Register len =  op->len()->as_register();
1629   LP64_ONLY( __ movslq(len, len); )
1630 
1631   if (UseSlowPath || op->type() == T_VALUETYPE ||
1632       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1633       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1634     __ jmp(*op->stub()->entry());
1635   } else {
1636     Register tmp1 = op->tmp1()->as_register();
1637     Register tmp2 = op->tmp2()->as_register();
1638     Register tmp3 = op->tmp3()->as_register();
1639     if (len == tmp1) {
1640       tmp1 = tmp3;
1641     } else if (len == tmp2) {
1642       tmp2 = tmp3;
1643     } else if (len == tmp3) {
1644       // everything is ok
1645     } else {
1646       __ mov(tmp3, len);
1647     }
1648     __ allocate_array(op->obj()->as_register(),
1649                       len,
1650                       tmp1,
1651                       tmp2,


1709     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1710   }
1711   Label profile_cast_success, profile_cast_failure;
1712   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1713   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1714 
1715   if (obj == k_RInfo) {
1716     k_RInfo = dst;
1717   } else if (obj == klass_RInfo) {
1718     klass_RInfo = dst;
1719   }
1720   if (k->is_loaded() && !UseCompressedClassPointers) {
1721     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1722   } else {
1723     Rtmp1 = op->tmp3()->as_register();
1724     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1725   }
1726 
1727   assert_different_registers(obj, k_RInfo, klass_RInfo);
1728 
1729   if (op->need_null_check()) {
1730     __ cmpptr(obj, (int32_t)NULL_WORD);
1731     if (op->should_profile()) {
1732       Label not_null;
1733       __ jccb(Assembler::notEqual, not_null);
1734       // Object is null; update MDO and exit
1735       Register mdo  = klass_RInfo;
1736       __ mov_metadata(mdo, md->constant_encoding());
1737       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1738       int header_bits = BitData::null_seen_byte_constant();
1739       __ orb(data_addr, header_bits);
1740       __ jmp(*obj_is_null);
1741       __ bind(not_null);
1742     } else {
1743       __ jcc(Assembler::equal, *obj_is_null);
1744     }
1745   }
1746 
1747   if (!k->is_loaded()) {
1748     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1749   } else {
1750 #ifdef _LP64
1751     __ mov_metadata(k_RInfo, k->constant_encoding());
1752 #endif // _LP64
1753   }
1754   __ verify_oop(obj);
1755 
1756   if (op->fast_check()) {
1757     // get object class
1758     // not a safepoint as obj null check happens earlier
1759 #ifdef _LP64
1760     if (UseCompressedClassPointers) {
1761       __ load_klass(Rtmp1, obj);
1762       __ cmpptr(k_RInfo, Rtmp1);
1763     } else {
1764       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));


1935         __ mov(dst, obj);
1936       }
1937     } else
1938       if (code == lir_instanceof) {
1939         Register obj = op->object()->as_register();
1940         Register dst = op->result_opr()->as_register();
1941         Label success, failure, done;
1942         emit_typecheck_helper(op, &success, &failure, &failure);
1943         __ bind(failure);
1944         __ xorptr(dst, dst);
1945         __ jmpb(done);
1946         __ bind(success);
1947         __ movptr(dst, 1);
1948         __ bind(done);
1949       } else {
1950         ShouldNotReachHere();
1951       }
1952 
1953 }
1954 
1955 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1956   // We are loading/storing an array that *may* be a flattened array (the declared type
1957   // Object[], interface[], or VT?[]). If this array is flattened, take slow path.
1958 
1959   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1960   __ testb(op->tmp()->as_register(), ArrayStorageProperties::flattened_value);
1961   __ jcc(Assembler::notZero, *op->stub()->entry());
1962   if (!op->value()->is_illegal()) {
1963     // We are storing into the array.
1964     Label skip;
1965     __ testb(op->tmp()->as_register(), ArrayStorageProperties::null_free_value);
1966     __ jcc(Assembler::zero, skip);
1967     // The array is not flattened, but it is null_free. If we are storing
1968     // a null, take the slow path (which will throw NPE).
1969     __ cmpptr(op->value()->as_register(), (int32_t)NULL_WORD);
1970     __ jcc(Assembler::zero, *op->stub()->entry());
1971     __ bind(skip);
1972   }
1973 }
1974 
1975 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1976   // This is called when we use aastore into a an array declared as "[LVT;",
1977   // where we know VT is not flattenable (due to ValueArrayElemMaxFlatOops, etc).
1978   // However, we need to do a NULL check if the actual array is a "[QVT;".
1979 
1980   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1981   __ testb(op->tmp()->as_register(), ArrayStorageProperties::null_free_value);
1982 }
1983 
1984 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1985   Label L_oops_equal;
1986   Label L_oops_not_equal;
1987   Label L_end;
1988 
1989   Register left  = op->left()->as_register();
1990   Register right = op->right()->as_register();
1991 
1992   __ cmpptr(left, right);
1993   __ jcc(Assembler::equal, L_oops_equal);
1994 
1995   // (1) Null check -- if one of the operands is null, the other must not be null (because
1996   //     the two references are not equal), so they are not substitutable,
1997   //     FIXME: do null check only if the operand is nullable
1998   {
1999     __ cmpptr(left, (int32_t)NULL_WORD);
2000     __ jcc(Assembler::equal, L_oops_not_equal);
2001 
2002     __ cmpptr(right, (int32_t)NULL_WORD);
2003     __ jcc(Assembler::equal, L_oops_not_equal);
2004   }
2005 
2006   ciKlass* left_klass = op->left_klass();
2007   ciKlass* right_klass = op->right_klass();
2008 
2009   // (2) Value object check -- if either of the operands is not a value object,
2010   //     they are not substitutable. We do this only if we are not sure that the
2011   //     operands are value objects
2012   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
2013       !left_klass->is_valuetype() || !right_klass->is_valuetype()) {
2014     Register tmp1  = op->tmp1()->as_register();
2015     __ movptr(tmp1, (intptr_t)markOopDesc::always_locked_pattern);
2016     __ andl(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2017     __ andl(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2018     __ cmpptr(tmp1, (intptr_t)markOopDesc::always_locked_pattern);
2019     __ jcc(Assembler::notEqual, L_oops_not_equal);
2020   }
2021 
2022   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2023   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
2024     // No need to load klass -- the operands are statically known to be the same value klass.
2025     __ jmp(*op->stub()->entry());
2026   } else {
2027     Register left_klass_op = op->left_klass_op()->as_register();
2028     Register right_klass_op = op->right_klass_op()->as_register();
2029 
2030     if (UseCompressedOops) {
2031       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2032       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2033       __ cmpl(left_klass_op, right_klass_op);
2034     } else {
2035       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2036       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2037       __ cmpptr(left_klass_op, right_klass_op);
2038     }
2039 
2040     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2041     // fall through to L_oops_not_equal
2042   }
2043 
2044   __ bind(L_oops_not_equal);
2045   move(op->not_equal_result(), op->result_opr());
2046   __ jmp(L_end);
2047 
2048   __ bind(L_oops_equal);
2049   move(op->equal_result(), op->result_opr());
2050   __ jmp(L_end);
2051 
2052   // We've returned from the stub. RAX contains 0x0 IFF the two
2053   // operands are not substitutable. (Don't compare against 0x1 in case the
2054   // C compiler is naughty)
2055   __ bind(*op->stub()->continuation());
2056   __ cmpl(rax, 0);
2057   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2058   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2059   // fall-through
2060   __ bind(L_end);
2061 }
2062 
2063 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2064   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
2065     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2066     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2067     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2068     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2069     Register addr = op->addr()->as_register();
2070     __ lock();
2071     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2072 
2073   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2074     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2075     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2076     Register newval = op->new_value()->as_register();
2077     Register cmpval = op->cmp_value()->as_register();
2078     assert(cmpval == rax, "wrong register");
2079     assert(newval != NULL, "new val must be register");
2080     assert(cmpval != newval, "cmp and new values must be in different registers");
2081     assert(cmpval != addr, "cmp and addr must be in different registers");


2102       __ cmpxchgl(newval, Address(addr, 0));
2103     }
2104 #ifdef _LP64
2105   } else if (op->code() == lir_cas_long) {
2106     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2107     Register newval = op->new_value()->as_register_lo();
2108     Register cmpval = op->cmp_value()->as_register_lo();
2109     assert(cmpval == rax, "wrong register");
2110     assert(newval != NULL, "new val must be register");
2111     assert(cmpval != newval, "cmp and new values must be in different registers");
2112     assert(cmpval != addr, "cmp and addr must be in different registers");
2113     assert(newval != addr, "new value and addr must be in different registers");
2114     __ lock();
2115     __ cmpxchgq(newval, Address(addr, 0));
2116 #endif // _LP64
2117   } else {
2118     Unimplemented();
2119   }
2120 }
2121 
2122 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2123   assert(dst->is_cpu_register(), "must be");
2124   assert(dst->type() == src->type(), "must be");
2125 
2126   if (src->is_cpu_register()) {
2127     reg2reg(src, dst);
2128   } else if (src->is_stack()) {
2129     stack2reg(src, dst, dst->type());
2130   } else if (src->is_constant()) {
2131     const2reg(src, dst, lir_patch_none, NULL);
2132   } else {
2133     ShouldNotReachHere();
2134   }
2135 }
2136 
2137 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2138   Assembler::Condition acond, ncond;
2139   switch (condition) {
2140     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2141     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2142     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2143     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2144     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2145     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2146     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2147     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2148     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2149                                 ShouldNotReachHere();
2150   }
2151 
2152   if (opr1->is_cpu_register()) {
2153     reg2reg(opr1, result);
2154   } else if (opr1->is_stack()) {
2155     stack2reg(opr1, result, result->type());
2156   } else if (opr1->is_constant()) {


2653       int r_hi = right->as_constant_ptr()->as_jint_hi();
2654       switch (code) {
2655         case lir_logic_and:
2656           __ andl(l_lo, r_lo);
2657           __ andl(l_hi, r_hi);
2658           break;
2659         case lir_logic_or:
2660           __ orl(l_lo, r_lo);
2661           __ orl(l_hi, r_hi);
2662           break;
2663         case lir_logic_xor:
2664           __ xorl(l_lo, r_lo);
2665           __ xorl(l_hi, r_hi);
2666           break;
2667         default: ShouldNotReachHere();
2668       }
2669 #endif // _LP64
2670     } else {
2671 #ifdef _LP64
2672       Register r_lo;
2673       if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) {
2674         r_lo = right->as_register();
2675       } else {
2676         r_lo = right->as_register_lo();
2677       }
2678 #else
2679       Register r_lo = right->as_register_lo();
2680       Register r_hi = right->as_register_hi();
2681       assert(l_lo != r_hi, "overwriting registers");
2682 #endif
2683       switch (code) {
2684         case lir_logic_and:
2685           __ andptr(l_lo, r_lo);
2686           NOT_LP64(__ andptr(l_hi, r_hi);)
2687           break;
2688         case lir_logic_or:
2689           __ orptr(l_lo, r_lo);
2690           NOT_LP64(__ orptr(l_hi, r_hi);)
2691           break;
2692         case lir_logic_xor:
2693           __ xorptr(l_lo, r_lo);


2766     move_regs(lreg, rax);
2767 
2768     int idivl_offset = __ corrected_idivl(rreg);
2769     if (ImplicitDiv0Checks) {
2770       add_debug_info_for_div0(idivl_offset, info);
2771     }
2772     if (code == lir_irem) {
2773       move_regs(rdx, dreg); // result is in rdx
2774     } else {
2775       move_regs(rax, dreg);
2776     }
2777   }
2778 }
2779 
2780 
2781 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2782   if (opr1->is_single_cpu()) {
2783     Register reg1 = opr1->as_register();
2784     if (opr2->is_single_cpu()) {
2785       // cpu register - cpu register
2786       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2787         __ cmpoop(reg1, opr2->as_register());
2788       } else {
2789         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?");
2790         __ cmpl(reg1, opr2->as_register());
2791       }
2792     } else if (opr2->is_stack()) {
2793       // cpu register - stack
2794       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2795         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2796       } else {
2797         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2798       }
2799     } else if (opr2->is_constant()) {
2800       // cpu register - constant
2801       LIR_Const* c = opr2->as_constant_ptr();
2802       if (c->type() == T_INT) {
2803         __ cmpl(reg1, c->as_jint());
2804       } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2805         // In 64bit oops are single register
2806         jobject o = c->as_jobject();
2807         if (o == NULL) {
2808           __ cmpptr(reg1, (int32_t)NULL_WORD);
2809         } else {
2810           __ cmpoop(reg1, o);
2811         }
2812       } else {
2813         fatal("unexpected type: %s", basictype_to_str(c->type()));
2814       }
2815       // cpu register - address
2816     } else if (opr2->is_address()) {
2817       if (op->info() != NULL) {
2818         add_debug_info_for_null_check_here(op->info());
2819       }
2820       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2821     } else {
2822       ShouldNotReachHere();
2823     }
2824 


2884       // xmm register - constant
2885       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2886     } else if (opr2->is_address()) {
2887       // xmm register - address
2888       if (op->info() != NULL) {
2889         add_debug_info_for_null_check_here(op->info());
2890       }
2891       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2892     } else {
2893       ShouldNotReachHere();
2894     }
2895 
2896   } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2897     assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2898     assert(opr2->is_fpu_register(), "both must be registers");
2899     __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2900 
2901   } else if (opr1->is_address() && opr2->is_constant()) {
2902     LIR_Const* c = opr2->as_constant_ptr();
2903 #ifdef _LP64
2904     if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2905       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2906       __ movoop(rscratch1, c->as_jobject());
2907     }
2908 #endif // LP64
2909     if (op->info() != NULL) {
2910       add_debug_info_for_null_check_here(op->info());
2911     }
2912     // special case: address - constant
2913     LIR_Address* addr = opr1->as_address_ptr();
2914     if (c->type() == T_INT) {
2915       __ cmpl(as_Address(addr), c->as_jint());
2916     } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2917 #ifdef _LP64
2918       // %%% Make this explode if addr isn't reachable until we figure out a
2919       // better strategy by giving noreg as the temp for as_Address
2920       __ cmpoop(rscratch1, as_Address(addr, noreg));
2921 #else
2922       __ cmpoop(as_Address(addr), c->as_jobject());
2923 #endif // _LP64
2924     } else {
2925       ShouldNotReachHere();
2926     }
2927 
2928   } else {
2929     ShouldNotReachHere();
2930   }
2931 }
2932 
2933 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2934   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2935     if (left->is_single_xmm()) {
2936       assert(right->is_single_xmm(), "must match");


2975   switch (code) {
2976   case lir_static_call:
2977   case lir_optvirtual_call:
2978   case lir_dynamic_call:
2979     offset += NativeCall::displacement_offset;
2980     break;
2981   case lir_icvirtual_call:
2982     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2983     break;
2984   case lir_virtual_call:  // currently, sparc-specific for niagara
2985   default: ShouldNotReachHere();
2986   }
2987   __ align(BytesPerWord, offset);
2988 }
2989 
2990 
2991 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2992   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2993          "must be aligned");
2994   __ call(AddressLiteral(op->addr(), rtype));
2995   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2996 }
2997 
2998 
2999 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3000   __ ic_call(op->addr());
3001   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3002   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3003          "must be aligned");
3004 }
3005 
3006 
3007 /* Currently, vtable-dispatch is only enabled for sparc platforms */
3008 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
3009   ShouldNotReachHere();
3010 }
3011 
3012 
3013 void LIR_Assembler::emit_static_call_stub() {
3014   address call_pc = __ pc();
3015   address stub = __ start_a_stub(call_stub_size());
3016   if (stub == NULL) {
3017     bailout("static call stub overflow");
3018     return;
3019   }
3020 
3021   int start = __ offset();


3177   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3178 }
3179 
3180 
3181 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3182   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3183   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3184   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3185   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3186 }
3187 
3188 
3189 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3190   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3191   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3192   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3193   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3194 }
3195 
3196 
3197 void LIR_Assembler::arraycopy_valuetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3198   if (null_check) {
3199     __ testptr(obj, obj);
3200     __ jcc(Assembler::zero, *slow_path->entry());
3201   }
3202   __ load_storage_props(tmp, obj);
3203   if (is_dest) {
3204     // We also take slow path if it's a null_free destination array, just in case the source array
3205     // contains NULLs.
3206     __ testb(tmp, ArrayStorageProperties::flattened_value | ArrayStorageProperties::null_free_value);
3207   } else {
3208     __ testb(tmp, ArrayStorageProperties::flattened_value);
3209   }
3210   __ jcc(Assembler::notEqual, *slow_path->entry());
3211 }
3212 
3213 
3214 // This code replaces a call to arraycopy; no exception may
3215 // be thrown in this code, they must be thrown in the System.arraycopy
3216 // activation frame; we could save some checks if this would not be the case
3217 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3218   ciArrayKlass* default_type = op->expected_type();
3219   Register src = op->src()->as_register();
3220   Register dst = op->dst()->as_register();
3221   Register src_pos = op->src_pos()->as_register();
3222   Register dst_pos = op->dst_pos()->as_register();
3223   Register length  = op->length()->as_register();
3224   Register tmp = op->tmp()->as_register();
3225 
3226   __ resolve(ACCESS_READ, src);
3227   __ resolve(ACCESS_WRITE, dst);
3228 
3229   CodeStub* stub = op->stub();
3230   int flags = op->flags();
3231   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3232   if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT;
3233 
3234   if (flags & LIR_OpArrayCopy::always_slow_path) {
3235     __ jmp(*stub->entry());
3236     __ bind(*stub->continuation());
3237     return;
3238   }
3239 
3240   if (flags & LIR_OpArrayCopy::src_valuetype_check) {
3241     arraycopy_valuetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3242   }
3243 
3244   if (flags & LIR_OpArrayCopy::dst_valuetype_check) {
3245     arraycopy_valuetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3246   }
3247 
3248   // if we don't know anything, just go through the generic arraycopy
3249   if (default_type == NULL) {
3250     // save outgoing arguments on stack in case call to System.arraycopy is needed
3251     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3252     // for interpreter calling conventions. Now we have to do it in new style conventions.
3253     // For the moment until C1 gets the new register allocator I just force all the
3254     // args to the right place (except the register args) and then on the back side
3255     // reload the register args properly if we go slow path. Yuck
3256 
3257     // These are proper for the calling convention
3258     store_parameter(length, 2);
3259     store_parameter(dst_pos, 1);
3260     store_parameter(dst, 0);
3261 
3262     // these are just temporary placements until we need to reload
3263     store_parameter(src_pos, 3);
3264     store_parameter(src, 4);
3265     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3266 


< prev index next >