< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"

  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 
  48 
  49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  50 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  51 // fast versions of NegF/NegD and AbsF/AbsD.
  52 
  53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  55   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  56   // of 128-bits operands for SSE instructions.
  57   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  58   // Store the value to a 128-bits operand.
  59   operand[0] = lo;

 178 
 179 void LIR_Assembler::ffree(int i) {
 180   __ ffree(i);
 181 }
 182 #endif // !_LP64
 183 
 184 void LIR_Assembler::breakpoint() {
 185   __ int3();
 186 }
 187 
 188 void LIR_Assembler::push(LIR_Opr opr) {
 189   if (opr->is_single_cpu()) {
 190     __ push_reg(opr->as_register());
 191   } else if (opr->is_double_cpu()) {
 192     NOT_LP64(__ push_reg(opr->as_register_hi()));
 193     __ push_reg(opr->as_register_lo());
 194   } else if (opr->is_stack()) {
 195     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 196   } else if (opr->is_constant()) {
 197     LIR_Const* const_opr = opr->as_constant_ptr();
 198     if (const_opr->type() == T_OBJECT) {
 199       __ push_oop(const_opr->as_jobject(), rscratch1);
 200     } else if (const_opr->type() == T_INT) {
 201       __ push_jint(const_opr->as_jint());
 202     } else {
 203       ShouldNotReachHere();
 204     }
 205 
 206   } else {
 207     ShouldNotReachHere();
 208   }
 209 }
 210 
 211 void LIR_Assembler::pop(LIR_Opr opr) {
 212   if (opr->is_single_cpu()) {
 213     __ pop_reg(opr->as_register());
 214   } else {
 215     ShouldNotReachHere();
 216   }
 217 }
 218 

 462     __ bind(*stub->continuation());
 463   }
 464 
 465   if (compilation()->env()->dtrace_method_probes()) {
 466 #ifdef _LP64
 467     __ mov(rdi, r15_thread);
 468     __ mov_metadata(rsi, method()->constant_encoding());
 469 #else
 470     __ get_thread(rax);
 471     __ movptr(Address(rsp, 0), rax);
 472     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
 473 #endif
 474     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 475   }
 476 
 477   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 478     __ mov(rax, rbx);  // Restore the exception
 479   }
 480 
 481   // remove the activation and dispatch to the unwind handler
 482   __ remove_frame(initial_frame_size_in_bytes());
 483   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 484 
 485   // Emit the slow path assembly
 486   if (stub != NULL) {
 487     stub->emit_code(this);
 488   }
 489 
 490   return offset;
 491 }
 492 
 493 
 494 int LIR_Assembler::emit_deopt_handler() {
 495   // generate code for exception handler
 496   address handler_base = __ start_a_stub(deopt_handler_size());
 497   if (handler_base == NULL) {
 498     // not enough space left for the handler
 499     bailout("deopt handler overflow");
 500     return -1;
 501   }
 502 
 503   int offset = code_offset();
 504   InternalAddress here(__ pc());
 505 
 506   __ pushptr(here.addr(), rscratch1);
 507   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 508   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 509   __ end_a_stub();
 510 
 511   return offset;
 512 }
 513 
 514 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 515   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 516   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 517     assert(result->fpu() == 0, "result must already be on TOS");
 518   }
 519 
















 520   // Pop the stack before the safepoint code
 521   __ remove_frame(initial_frame_size_in_bytes());
 522 
 523   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 524     __ reserved_stack_check();
 525   }
 526 
 527   // Note: we do not need to round double result; float result has the right precision
 528   // the poll sets the condition code, but no data registers
 529 
 530 #ifdef _LP64
 531   const Register thread = r15_thread;
 532 #else
 533   const Register thread = rbx;
 534   __ get_thread(thread);
 535 #endif
 536   code_stub->set_safepoint_offset(__ offset());
 537   __ relocate(relocInfo::poll_return_type);
 538   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 539   __ ret(0);
 540 }
 541 
 542 




 543 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 544   guarantee(info != NULL, "Shouldn't be NULL");
 545   int offset = __ offset();
 546 #ifdef _LP64
 547   const Register poll_addr = rscratch1;
 548   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 549 #else
 550   assert(tmp->is_cpu_register(), "needed");
 551   const Register poll_addr = tmp->as_register();
 552   __ get_thread(poll_addr);
 553   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 554 #endif
 555   add_debug_info_for_branch(info);
 556   __ relocate(relocInfo::poll_type);
 557   address pre_pc = __ pc();
 558   __ testl(rax, Address(poll_addr, 0));
 559   address post_pc = __ pc();
 560   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 561   return offset;
 562 }

 583       break;
 584     }
 585 
 586     case T_ADDRESS: {
 587       assert(patch_code == lir_patch_none, "no patching handled here");
 588       __ movptr(dest->as_register(), c->as_jint());
 589       break;
 590     }
 591 
 592     case T_LONG: {
 593       assert(patch_code == lir_patch_none, "no patching handled here");
 594 #ifdef _LP64
 595       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 596 #else
 597       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 598       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 599 #endif // _LP64
 600       break;
 601     }
 602 

 603     case T_OBJECT: {
 604       if (patch_code != lir_patch_none) {
 605         jobject2reg_with_patching(dest->as_register(), info);
 606       } else {
 607         __ movoop(dest->as_register(), c->as_jobject());
 608       }
 609       break;
 610     }
 611 
 612     case T_METADATA: {
 613       if (patch_code != lir_patch_none) {
 614         klass2reg_with_patching(dest->as_register(), info);
 615       } else {
 616         __ mov_metadata(dest->as_register(), c->as_metadata());
 617       }
 618       break;
 619     }
 620 
 621     case T_FLOAT: {
 622       if (dest->is_single_xmm()) {

 673     default:
 674       ShouldNotReachHere();
 675   }
 676 }
 677 
 678 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 679   assert(src->is_constant(), "should not call otherwise");
 680   assert(dest->is_stack(), "should not call otherwise");
 681   LIR_Const* c = src->as_constant_ptr();
 682 
 683   switch (c->type()) {
 684     case T_INT:  // fall through
 685     case T_FLOAT:
 686       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 687       break;
 688 
 689     case T_ADDRESS:
 690       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 691       break;
 692 

 693     case T_OBJECT:
 694       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
 695       break;
 696 
 697     case T_LONG:  // fall through
 698     case T_DOUBLE:
 699 #ifdef _LP64
 700       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 701                                               lo_word_offset_in_bytes),
 702                 (intptr_t)c->as_jlong_bits(),
 703                 rscratch1);
 704 #else
 705       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 706                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 707       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 708                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 709 #endif // _LP64
 710       break;
 711 
 712     default:

 714   }
 715 }
 716 
 717 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 718   assert(src->is_constant(), "should not call otherwise");
 719   assert(dest->is_address(), "should not call otherwise");
 720   LIR_Const* c = src->as_constant_ptr();
 721   LIR_Address* addr = dest->as_address_ptr();
 722 
 723   int null_check_here = code_offset();
 724   switch (type) {
 725     case T_INT:    // fall through
 726     case T_FLOAT:
 727       __ movl(as_Address(addr), c->as_jint_bits());
 728       break;
 729 
 730     case T_ADDRESS:
 731       __ movptr(as_Address(addr), c->as_jint_bits());
 732       break;
 733 

 734     case T_OBJECT:  // fall through
 735     case T_ARRAY:
 736       if (c->as_jobject() == NULL) {
 737         if (UseCompressedOops && !wide) {
 738           __ movl(as_Address(addr), NULL_WORD);
 739         } else {
 740 #ifdef _LP64
 741           __ xorptr(rscratch1, rscratch1);
 742           null_check_here = code_offset();
 743           __ movptr(as_Address(addr), rscratch1);
 744 #else
 745           __ movptr(as_Address(addr), NULL_WORD);
 746 #endif
 747         }
 748       } else {
 749         if (is_literal_address(addr)) {
 750           ShouldNotReachHere();
 751           __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
 752         } else {
 753 #ifdef _LP64

 802   if (info != NULL) {
 803     add_debug_info_for_null_check(null_check_here, info);
 804   }
 805 }
 806 
 807 
 808 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 809   assert(src->is_register(), "should not call otherwise");
 810   assert(dest->is_register(), "should not call otherwise");
 811 
 812   // move between cpu-registers
 813   if (dest->is_single_cpu()) {
 814 #ifdef _LP64
 815     if (src->type() == T_LONG) {
 816       // Can do LONG -> OBJECT
 817       move_regs(src->as_register_lo(), dest->as_register());
 818       return;
 819     }
 820 #endif
 821     assert(src->is_single_cpu(), "must match");
 822     if (src->type() == T_OBJECT) {
 823       __ verify_oop(src->as_register());
 824     }
 825     move_regs(src->as_register(), dest->as_register());
 826 
 827   } else if (dest->is_double_cpu()) {
 828 #ifdef _LP64
 829     if (is_reference_type(src->type())) {
 830       // Surprising to me but we can see move of a long to t_object
 831       __ verify_oop(src->as_register());
 832       move_regs(src->as_register(), dest->as_register_lo());
 833       return;
 834     }
 835 #endif
 836     assert(src->is_double_cpu(), "must match");
 837     Register f_lo = src->as_register_lo();
 838     Register f_hi = src->as_register_hi();
 839     Register t_lo = dest->as_register_lo();
 840     Register t_hi = dest->as_register_hi();
 841 #ifdef _LP64
 842     assert(f_hi == f_lo, "must be same");

 988       break;
 989     }
 990 
 991     case T_DOUBLE: {
 992 #ifdef _LP64
 993       assert(src->is_double_xmm(), "not a double");
 994       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
 995 #else
 996       if (src->is_double_xmm()) {
 997         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
 998       } else {
 999         assert(src->is_double_fpu(), "must be");
1000         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1001         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1002         else                    __ fst_d (as_Address(to_addr));
1003       }
1004 #endif // _LP64
1005       break;
1006     }
1007 

1008     case T_ARRAY:   // fall through
1009     case T_OBJECT:  // fall through
1010       if (UseCompressedOops && !wide) {
1011         __ movl(as_Address(to_addr), compressed_src);
1012       } else {
1013         __ movptr(as_Address(to_addr), src->as_register());
1014       }
1015       break;
1016     case T_METADATA:
1017       // We get here to store a method pointer to the stack to pass to
1018       // a dtrace runtime call. This can't work on 64 bit with
1019       // compressed klass ptrs: T_METADATA can be a compressed klass
1020       // ptr or a 64 bit method pointer.
1021       LP64_ONLY(ShouldNotReachHere());
1022       __ movptr(as_Address(to_addr), src->as_register());
1023       break;
1024     case T_ADDRESS:
1025       __ movptr(as_Address(to_addr), src->as_register());
1026       break;
1027     case T_INT:

1160     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1161     // push and pop the part at src + wordSize, adding wordSize for the previous push
1162     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1163     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1164     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1165 #endif // _LP64
1166 
1167   } else {
1168     ShouldNotReachHere();
1169   }
1170 }
1171 
1172 
1173 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1174   assert(src->is_address(), "should not call otherwise");
1175   assert(dest->is_register(), "should not call otherwise");
1176 
1177   LIR_Address* addr = src->as_address_ptr();
1178   Address from_addr = as_Address(addr);
1179 
1180   if (addr->base()->type() == T_OBJECT) {
1181     __ verify_oop(addr->base()->as_pointer_register());
1182   }
1183 
1184   switch (type) {
1185     case T_BOOLEAN: // fall through
1186     case T_BYTE:    // fall through
1187     case T_CHAR:    // fall through
1188     case T_SHORT:
1189       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1190         // on pre P6 processors we may get partial register stalls
1191         // so blow away the value of to_rinfo before loading a
1192         // partial word into it.  Do it here so that it precedes
1193         // the potential patch point below.
1194         __ xorptr(dest->as_register(), dest->as_register());
1195       }
1196       break;
1197    default:
1198      break;
1199   }
1200 

1221 #endif // !LP64
1222       }
1223       break;
1224     }
1225 
1226     case T_DOUBLE: {
1227       if (dest->is_double_xmm()) {
1228         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1229       } else {
1230 #ifndef _LP64
1231         assert(dest->is_double_fpu(), "must be");
1232         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1233         __ fld_d(from_addr);
1234 #else
1235         ShouldNotReachHere();
1236 #endif // !LP64
1237       }
1238       break;
1239     }
1240 

1241     case T_OBJECT:  // fall through
1242     case T_ARRAY:   // fall through
1243       if (UseCompressedOops && !wide) {
1244         __ movl(dest->as_register(), from_addr);
1245       } else {
1246         __ movptr(dest->as_register(), from_addr);
1247       }
1248       break;
1249 
1250     case T_ADDRESS:
1251       __ movptr(dest->as_register(), from_addr);
1252       break;
1253     case T_INT:
1254       __ movl(dest->as_register(), from_addr);
1255       break;
1256 
1257     case T_LONG: {
1258       Register to_lo = dest->as_register_lo();
1259       Register to_hi = dest->as_register_hi();
1260 #ifdef _LP64

1597     add_debug_info_for_null_check_here(op->stub()->info());
1598     __ cmpb(Address(op->klass()->as_register(),
1599                     InstanceKlass::init_state_offset()),
1600                     InstanceKlass::fully_initialized);
1601     __ jcc(Assembler::notEqual, *op->stub()->entry());
1602   }
1603   __ allocate_object(op->obj()->as_register(),
1604                      op->tmp1()->as_register(),
1605                      op->tmp2()->as_register(),
1606                      op->header_size(),
1607                      op->object_size(),
1608                      op->klass()->as_register(),
1609                      *op->stub()->entry());
1610   __ bind(*op->stub()->continuation());
1611 }
1612 
1613 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1614   Register len =  op->len()->as_register();
1615   LP64_ONLY( __ movslq(len, len); )
1616 
1617   if (UseSlowPath ||
1618       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1619       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1620     __ jmp(*op->stub()->entry());
1621   } else {
1622     Register tmp1 = op->tmp1()->as_register();
1623     Register tmp2 = op->tmp2()->as_register();
1624     Register tmp3 = op->tmp3()->as_register();
1625     if (len == tmp1) {
1626       tmp1 = tmp3;
1627     } else if (len == tmp2) {
1628       tmp2 = tmp3;
1629     } else if (len == tmp3) {
1630       // everything is ok
1631     } else {
1632       __ mov(tmp3, len);
1633     }
1634     __ allocate_array(op->obj()->as_register(),
1635                       len,
1636                       tmp1,
1637                       tmp2,

1696     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1697   }
1698   Label profile_cast_success, profile_cast_failure;
1699   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1700   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1701 
1702   if (obj == k_RInfo) {
1703     k_RInfo = dst;
1704   } else if (obj == klass_RInfo) {
1705     klass_RInfo = dst;
1706   }
1707   if (k->is_loaded() && !UseCompressedClassPointers) {
1708     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1709   } else {
1710     Rtmp1 = op->tmp3()->as_register();
1711     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1712   }
1713 
1714   assert_different_registers(obj, k_RInfo, klass_RInfo);
1715 
1716   __ cmpptr(obj, NULL_WORD);
1717   if (op->should_profile()) {
1718     Label not_null;
1719     __ jccb(Assembler::notEqual, not_null);
1720     // Object is null; update MDO and exit
1721     Register mdo  = klass_RInfo;
1722     __ mov_metadata(mdo, md->constant_encoding());
1723     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1724     int header_bits = BitData::null_seen_byte_constant();
1725     __ orb(data_addr, header_bits);
1726     __ jmp(*obj_is_null);
1727     __ bind(not_null);
1728   } else {
1729     __ jcc(Assembler::equal, *obj_is_null);


1730   }
1731 
1732   if (!k->is_loaded()) {
1733     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1734   } else {
1735 #ifdef _LP64
1736     __ mov_metadata(k_RInfo, k->constant_encoding());
1737 #endif // _LP64
1738   }
1739   __ verify_oop(obj);
1740 
1741   if (op->fast_check()) {
1742     // get object class
1743     // not a safepoint as obj null check happens earlier
1744 #ifdef _LP64
1745     if (UseCompressedClassPointers) {
1746       __ load_klass(Rtmp1, obj, tmp_load_klass);
1747       __ cmpptr(k_RInfo, Rtmp1);
1748     } else {
1749       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1921         __ mov(dst, obj);
1922       }
1923     } else
1924       if (code == lir_instanceof) {
1925         Register obj = op->object()->as_register();
1926         Register dst = op->result_opr()->as_register();
1927         Label success, failure, done;
1928         emit_typecheck_helper(op, &success, &failure, &failure);
1929         __ bind(failure);
1930         __ xorptr(dst, dst);
1931         __ jmpb(done);
1932         __ bind(success);
1933         __ movptr(dst, 1);
1934         __ bind(done);
1935       } else {
1936         ShouldNotReachHere();
1937       }
1938 
1939 }
1940 




























































































































1941 
1942 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1943   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1944     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1945     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1946     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1947     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1948     Register addr = op->addr()->as_register();
1949     __ lock();
1950     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1951 
1952   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1953     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1954     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1955     Register newval = op->new_value()->as_register();
1956     Register cmpval = op->cmp_value()->as_register();
1957     assert(cmpval == rax, "wrong register");
1958     assert(newval != noreg, "new val must be register");
1959     assert(cmpval != newval, "cmp and new values must be in different registers");
1960     assert(cmpval != addr, "cmp and addr must be in different registers");

1981       __ cmpxchgl(newval, Address(addr, 0));
1982     }
1983 #ifdef _LP64
1984   } else if (op->code() == lir_cas_long) {
1985     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1986     Register newval = op->new_value()->as_register_lo();
1987     Register cmpval = op->cmp_value()->as_register_lo();
1988     assert(cmpval == rax, "wrong register");
1989     assert(newval != noreg, "new val must be register");
1990     assert(cmpval != newval, "cmp and new values must be in different registers");
1991     assert(cmpval != addr, "cmp and addr must be in different registers");
1992     assert(newval != addr, "new value and addr must be in different registers");
1993     __ lock();
1994     __ cmpxchgq(newval, Address(addr, 0));
1995 #endif // _LP64
1996   } else {
1997     Unimplemented();
1998   }
1999 }
2000 















2001 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
2002                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
2003   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
2004 
2005   Assembler::Condition acond, ncond;
2006   switch (condition) {
2007     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2008     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2009     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2010     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2011     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2012     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2013     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2014     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2015     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2016                                 ShouldNotReachHere();
2017   }
2018 
2019   if (opr1->is_cpu_register()) {
2020     reg2reg(opr1, result);

2849   int offset = __ offset();
2850   switch (code) {
2851   case lir_static_call:
2852   case lir_optvirtual_call:
2853   case lir_dynamic_call:
2854     offset += NativeCall::displacement_offset;
2855     break;
2856   case lir_icvirtual_call:
2857     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2858     break;
2859   default: ShouldNotReachHere();
2860   }
2861   __ align(BytesPerWord, offset);
2862 }
2863 
2864 
2865 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2866   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2867          "must be aligned");
2868   __ call(AddressLiteral(op->addr(), rtype));
2869   add_call_info(code_offset(), op->info());
2870   __ post_call_nop();
2871 }
2872 
2873 
2874 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2875   __ ic_call(op->addr());
2876   add_call_info(code_offset(), op->info());
2877   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2878          "must be aligned");
2879   __ post_call_nop();
2880 }
2881 
2882 
2883 void LIR_Assembler::emit_static_call_stub() {
2884   address call_pc = __ pc();
2885   address stub = __ start_a_stub(call_stub_size());
2886   if (stub == NULL) {
2887     bailout("static call stub overflow");
2888     return;
2889   }
2890 
2891   int start = __ offset();
2892 
2893   // make sure that the displacement word of the call ends up word aligned
2894   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2895   __ relocate(static_stub_Relocation::spec(call_pc));
2896   __ mov_metadata(rbx, (Metadata*)NULL);

3037   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3038 }
3039 
3040 
3041 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3042   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3043   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3044   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3045   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3046 }
3047 
3048 
3049 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3050   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3051   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3052   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3053   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3054 }
3055 
3056 


























3057 // This code replaces a call to arraycopy; no exception may
3058 // be thrown in this code, they must be thrown in the System.arraycopy
3059 // activation frame; we could save some checks if this would not be the case
3060 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3061   ciArrayKlass* default_type = op->expected_type();
3062   Register src = op->src()->as_register();
3063   Register dst = op->dst()->as_register();
3064   Register src_pos = op->src_pos()->as_register();
3065   Register dst_pos = op->dst_pos()->as_register();
3066   Register length  = op->length()->as_register();
3067   Register tmp = op->tmp()->as_register();
3068   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3069 
3070   CodeStub* stub = op->stub();
3071   int flags = op->flags();
3072   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3073   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3074 






3075   // if we don't know anything, just go through the generic arraycopy
3076   if (default_type == NULL) {
3077     // save outgoing arguments on stack in case call to System.arraycopy is needed
3078     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3079     // for interpreter calling conventions. Now we have to do it in new style conventions.
3080     // For the moment until C1 gets the new register allocator I just force all the
3081     // args to the right place (except the register args) and then on the back side
3082     // reload the register args properly if we go slow path. Yuck
3083 
3084     // These are proper for the calling convention
3085     store_parameter(length, 2);
3086     store_parameter(dst_pos, 1);
3087     store_parameter(dst, 0);
3088 
3089     // these are just temporary placements until we need to reload
3090     store_parameter(src_pos, 3);
3091     store_parameter(src, 4);
3092     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3093 
3094     address copyfunc_addr = StubRoutines::generic_arraycopy();

3148     __ mov(tmp, rax);
3149     __ xorl(tmp, -1);
3150 
3151     // Reload values from the stack so they are where the stub
3152     // expects them.
3153     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3154     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3155     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3156     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3157     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3158 
3159     __ subl(length, tmp);
3160     __ addl(src_pos, tmp);
3161     __ addl(dst_pos, tmp);
3162     __ jmp(*stub->entry());
3163 
3164     __ bind(*stub->continuation());
3165     return;
3166   }
3167 








3168   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3169 
3170   int elem_size = type2aelembytes(basic_type);
3171   Address::ScaleFactor scale;
3172 
3173   switch (elem_size) {
3174     case 1 :
3175       scale = Address::times_1;
3176       break;
3177     case 2 :
3178       scale = Address::times_2;
3179       break;
3180     case 4 :
3181       scale = Address::times_4;
3182       break;
3183     case 8 :
3184       scale = Address::times_8;
3185       break;
3186     default:
3187       scale = Address::no_scale;

3748         __ jccb(Assembler::zero, next);
3749 #endif
3750         // first time here. Set profile type.
3751         __ movptr(mdo_addr, tmp);
3752       } else {
3753         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3754                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3755 
3756         __ movptr(tmp, mdo_addr);
3757         __ testptr(tmp, TypeEntries::type_unknown);
3758         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3759 
3760         __ orptr(mdo_addr, TypeEntries::type_unknown);
3761       }
3762     }
3763 
3764     __ bind(next);
3765   }
3766 }
3767 




















3768 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3769   Unimplemented();
3770 }
3771 
3772 
3773 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3774   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3775 }
3776 
3777 
3778 void LIR_Assembler::align_backward_branch_target() {
3779   __ align(BytesPerWord);
3780 }
3781 
3782 
3783 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3784   if (left->is_single_cpu()) {
3785     __ negl(left->as_register());
3786     move_regs(left->as_register(), dest->as_register());
3787 

4011 }
4012 
4013 void LIR_Assembler::membar_storeload() {
4014   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4015 }
4016 
4017 void LIR_Assembler::on_spin_wait() {
4018   __ pause ();
4019 }
4020 
4021 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4022   assert(result_reg->is_register(), "check");
4023 #ifdef _LP64
4024   // __ get_thread(result_reg->as_register_lo());
4025   __ mov(result_reg->as_register(), r15_thread);
4026 #else
4027   __ get_thread(result_reg->as_register());
4028 #endif // _LP64
4029 }
4030 



4031 
4032 void LIR_Assembler::peephole(LIR_List*) {
4033   // do nothing for now
4034 }
4035 
4036 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4037   assert(data == dest, "xchg/xadd uses only 2 operands");
4038 
4039   if (data->type() == T_INT) {
4040     if (code == lir_xadd) {
4041       __ lock();
4042       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4043     } else {
4044       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4045     }
4046   } else if (data->is_oop()) {
4047     assert (code == lir_xchg, "xadd for oops");
4048     Register obj = data->as_register();
4049 #ifdef _LP64
4050     if (UseCompressedOops) {

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;

 180 
 181 void LIR_Assembler::ffree(int i) {
 182   __ ffree(i);
 183 }
 184 #endif // !_LP64
 185 
 186 void LIR_Assembler::breakpoint() {
 187   __ int3();
 188 }
 189 
 190 void LIR_Assembler::push(LIR_Opr opr) {
 191   if (opr->is_single_cpu()) {
 192     __ push_reg(opr->as_register());
 193   } else if (opr->is_double_cpu()) {
 194     NOT_LP64(__ push_reg(opr->as_register_hi()));
 195     __ push_reg(opr->as_register_lo());
 196   } else if (opr->is_stack()) {
 197     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 198   } else if (opr->is_constant()) {
 199     LIR_Const* const_opr = opr->as_constant_ptr();
 200     if (const_opr->type() == T_OBJECT || const_opr->type() == T_PRIMITIVE_OBJECT) {
 201       __ push_oop(const_opr->as_jobject(), rscratch1);
 202     } else if (const_opr->type() == T_INT) {
 203       __ push_jint(const_opr->as_jint());
 204     } else {
 205       ShouldNotReachHere();
 206     }
 207 
 208   } else {
 209     ShouldNotReachHere();
 210   }
 211 }
 212 
 213 void LIR_Assembler::pop(LIR_Opr opr) {
 214   if (opr->is_single_cpu()) {
 215     __ pop_reg(opr->as_register());
 216   } else {
 217     ShouldNotReachHere();
 218   }
 219 }
 220 

 464     __ bind(*stub->continuation());
 465   }
 466 
 467   if (compilation()->env()->dtrace_method_probes()) {
 468 #ifdef _LP64
 469     __ mov(rdi, r15_thread);
 470     __ mov_metadata(rsi, method()->constant_encoding());
 471 #else
 472     __ get_thread(rax);
 473     __ movptr(Address(rsp, 0), rax);
 474     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding(), noreg);
 475 #endif
 476     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 477   }
 478 
 479   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 480     __ mov(rax, rbx);  // Restore the exception
 481   }
 482 
 483   // remove the activation and dispatch to the unwind handler
 484   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 485   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 486 
 487   // Emit the slow path assembly
 488   if (stub != NULL) {
 489     stub->emit_code(this);
 490   }
 491 
 492   return offset;
 493 }
 494 
 495 
 496 int LIR_Assembler::emit_deopt_handler() {
 497   // generate code for exception handler
 498   address handler_base = __ start_a_stub(deopt_handler_size());
 499   if (handler_base == NULL) {
 500     // not enough space left for the handler
 501     bailout("deopt handler overflow");
 502     return -1;
 503   }
 504 
 505   int offset = code_offset();
 506   InternalAddress here(__ pc());
 507 
 508   __ pushptr(here.addr(), rscratch1);
 509   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 510   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 511   __ end_a_stub();
 512 
 513   return offset;
 514 }
 515 
 516 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 517   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 518   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 519     assert(result->fpu() == 0, "result must already be on TOS");
 520   }
 521 
 522   ciMethod* method = compilation()->method();
 523   if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
 524     ciInlineKlass* vk = method->return_type()->as_inline_klass();
 525     if (vk->can_be_returned_as_fields()) {
 526 #ifndef _LP64
 527       Unimplemented();
 528 #else
 529       address unpack_handler = vk->unpack_handler();
 530       assert(unpack_handler != NULL, "must be");
 531       __ call(RuntimeAddress(unpack_handler));
 532       // At this point, rax points to the value object (for interpreter or C1 caller).
 533       // The fields of the object are copied into registers (for C2 caller).
 534 #endif
 535     }
 536   }
 537 
 538   // Pop the stack before the safepoint code
 539   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 540 
 541   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 542     __ reserved_stack_check();
 543   }
 544 
 545   // Note: we do not need to round double result; float result has the right precision
 546   // the poll sets the condition code, but no data registers
 547 
 548 #ifdef _LP64
 549   const Register thread = r15_thread;
 550 #else
 551   const Register thread = rbx;
 552   __ get_thread(thread);
 553 #endif
 554   code_stub->set_safepoint_offset(__ offset());
 555   __ relocate(relocInfo::poll_return_type);
 556   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 557   __ ret(0);
 558 }
 559 
 560 
 561 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 562   return (__ store_inline_type_fields_to_buf(vk, false));
 563 }
 564 
 565 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 566   guarantee(info != NULL, "Shouldn't be NULL");
 567   int offset = __ offset();
 568 #ifdef _LP64
 569   const Register poll_addr = rscratch1;
 570   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 571 #else
 572   assert(tmp->is_cpu_register(), "needed");
 573   const Register poll_addr = tmp->as_register();
 574   __ get_thread(poll_addr);
 575   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 576 #endif
 577   add_debug_info_for_branch(info);
 578   __ relocate(relocInfo::poll_type);
 579   address pre_pc = __ pc();
 580   __ testl(rax, Address(poll_addr, 0));
 581   address post_pc = __ pc();
 582   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 583   return offset;
 584 }

 605       break;
 606     }
 607 
 608     case T_ADDRESS: {
 609       assert(patch_code == lir_patch_none, "no patching handled here");
 610       __ movptr(dest->as_register(), c->as_jint());
 611       break;
 612     }
 613 
 614     case T_LONG: {
 615       assert(patch_code == lir_patch_none, "no patching handled here");
 616 #ifdef _LP64
 617       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 618 #else
 619       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 620       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 621 #endif // _LP64
 622       break;
 623     }
 624 
 625     case T_PRIMITIVE_OBJECT: // Fall through
 626     case T_OBJECT: {
 627       if (patch_code != lir_patch_none) {
 628         jobject2reg_with_patching(dest->as_register(), info);
 629       } else {
 630         __ movoop(dest->as_register(), c->as_jobject());
 631       }
 632       break;
 633     }
 634 
 635     case T_METADATA: {
 636       if (patch_code != lir_patch_none) {
 637         klass2reg_with_patching(dest->as_register(), info);
 638       } else {
 639         __ mov_metadata(dest->as_register(), c->as_metadata());
 640       }
 641       break;
 642     }
 643 
 644     case T_FLOAT: {
 645       if (dest->is_single_xmm()) {

 696     default:
 697       ShouldNotReachHere();
 698   }
 699 }
 700 
 701 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 702   assert(src->is_constant(), "should not call otherwise");
 703   assert(dest->is_stack(), "should not call otherwise");
 704   LIR_Const* c = src->as_constant_ptr();
 705 
 706   switch (c->type()) {
 707     case T_INT:  // fall through
 708     case T_FLOAT:
 709       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 710       break;
 711 
 712     case T_ADDRESS:
 713       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 714       break;
 715 
 716     case T_PRIMITIVE_OBJECT: // Fall through
 717     case T_OBJECT:
 718       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1);
 719       break;
 720 
 721     case T_LONG:  // fall through
 722     case T_DOUBLE:
 723 #ifdef _LP64
 724       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 725                                               lo_word_offset_in_bytes),
 726                 (intptr_t)c->as_jlong_bits(),
 727                 rscratch1);
 728 #else
 729       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 730                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 731       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 732                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 733 #endif // _LP64
 734       break;
 735 
 736     default:

 738   }
 739 }
 740 
 741 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 742   assert(src->is_constant(), "should not call otherwise");
 743   assert(dest->is_address(), "should not call otherwise");
 744   LIR_Const* c = src->as_constant_ptr();
 745   LIR_Address* addr = dest->as_address_ptr();
 746 
 747   int null_check_here = code_offset();
 748   switch (type) {
 749     case T_INT:    // fall through
 750     case T_FLOAT:
 751       __ movl(as_Address(addr), c->as_jint_bits());
 752       break;
 753 
 754     case T_ADDRESS:
 755       __ movptr(as_Address(addr), c->as_jint_bits());
 756       break;
 757 
 758     case T_PRIMITIVE_OBJECT: // fall through
 759     case T_OBJECT:  // fall through
 760     case T_ARRAY:
 761       if (c->as_jobject() == NULL) {
 762         if (UseCompressedOops && !wide) {
 763           __ movl(as_Address(addr), NULL_WORD);
 764         } else {
 765 #ifdef _LP64
 766           __ xorptr(rscratch1, rscratch1);
 767           null_check_here = code_offset();
 768           __ movptr(as_Address(addr), rscratch1);
 769 #else
 770           __ movptr(as_Address(addr), NULL_WORD);
 771 #endif
 772         }
 773       } else {
 774         if (is_literal_address(addr)) {
 775           ShouldNotReachHere();
 776           __ movoop(as_Address(addr, noreg), c->as_jobject(), rscratch1);
 777         } else {
 778 #ifdef _LP64

 827   if (info != NULL) {
 828     add_debug_info_for_null_check(null_check_here, info);
 829   }
 830 }
 831 
 832 
 833 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 834   assert(src->is_register(), "should not call otherwise");
 835   assert(dest->is_register(), "should not call otherwise");
 836 
 837   // move between cpu-registers
 838   if (dest->is_single_cpu()) {
 839 #ifdef _LP64
 840     if (src->type() == T_LONG) {
 841       // Can do LONG -> OBJECT
 842       move_regs(src->as_register_lo(), dest->as_register());
 843       return;
 844     }
 845 #endif
 846     assert(src->is_single_cpu(), "must match");
 847     if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
 848       __ verify_oop(src->as_register());
 849     }
 850     move_regs(src->as_register(), dest->as_register());
 851 
 852   } else if (dest->is_double_cpu()) {
 853 #ifdef _LP64
 854     if (is_reference_type(src->type())) {
 855       // Surprising to me but we can see move of a long to t_object
 856       __ verify_oop(src->as_register());
 857       move_regs(src->as_register(), dest->as_register_lo());
 858       return;
 859     }
 860 #endif
 861     assert(src->is_double_cpu(), "must match");
 862     Register f_lo = src->as_register_lo();
 863     Register f_hi = src->as_register_hi();
 864     Register t_lo = dest->as_register_lo();
 865     Register t_hi = dest->as_register_hi();
 866 #ifdef _LP64
 867     assert(f_hi == f_lo, "must be same");

1013       break;
1014     }
1015 
1016     case T_DOUBLE: {
1017 #ifdef _LP64
1018       assert(src->is_double_xmm(), "not a double");
1019       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1020 #else
1021       if (src->is_double_xmm()) {
1022         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1023       } else {
1024         assert(src->is_double_fpu(), "must be");
1025         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1026         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1027         else                    __ fst_d (as_Address(to_addr));
1028       }
1029 #endif // _LP64
1030       break;
1031     }
1032 
1033     case T_PRIMITIVE_OBJECT: // fall through
1034     case T_ARRAY:   // fall through
1035     case T_OBJECT:  // fall through
1036       if (UseCompressedOops && !wide) {
1037         __ movl(as_Address(to_addr), compressed_src);
1038       } else {
1039         __ movptr(as_Address(to_addr), src->as_register());
1040       }
1041       break;
1042     case T_METADATA:
1043       // We get here to store a method pointer to the stack to pass to
1044       // a dtrace runtime call. This can't work on 64 bit with
1045       // compressed klass ptrs: T_METADATA can be a compressed klass
1046       // ptr or a 64 bit method pointer.
1047       LP64_ONLY(ShouldNotReachHere());
1048       __ movptr(as_Address(to_addr), src->as_register());
1049       break;
1050     case T_ADDRESS:
1051       __ movptr(as_Address(to_addr), src->as_register());
1052       break;
1053     case T_INT:

1186     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1187     // push and pop the part at src + wordSize, adding wordSize for the previous push
1188     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1189     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1190     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1191 #endif // _LP64
1192 
1193   } else {
1194     ShouldNotReachHere();
1195   }
1196 }
1197 
1198 
1199 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1200   assert(src->is_address(), "should not call otherwise");
1201   assert(dest->is_register(), "should not call otherwise");
1202 
1203   LIR_Address* addr = src->as_address_ptr();
1204   Address from_addr = as_Address(addr);
1205 
1206   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
1207     __ verify_oop(addr->base()->as_pointer_register());
1208   }
1209 
1210   switch (type) {
1211     case T_BOOLEAN: // fall through
1212     case T_BYTE:    // fall through
1213     case T_CHAR:    // fall through
1214     case T_SHORT:
1215       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1216         // on pre P6 processors we may get partial register stalls
1217         // so blow away the value of to_rinfo before loading a
1218         // partial word into it.  Do it here so that it precedes
1219         // the potential patch point below.
1220         __ xorptr(dest->as_register(), dest->as_register());
1221       }
1222       break;
1223    default:
1224      break;
1225   }
1226 

1247 #endif // !LP64
1248       }
1249       break;
1250     }
1251 
1252     case T_DOUBLE: {
1253       if (dest->is_double_xmm()) {
1254         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1255       } else {
1256 #ifndef _LP64
1257         assert(dest->is_double_fpu(), "must be");
1258         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1259         __ fld_d(from_addr);
1260 #else
1261         ShouldNotReachHere();
1262 #endif // !LP64
1263       }
1264       break;
1265     }
1266 
1267     case T_PRIMITIVE_OBJECT: // fall through
1268     case T_OBJECT:  // fall through
1269     case T_ARRAY:   // fall through
1270       if (UseCompressedOops && !wide) {
1271         __ movl(dest->as_register(), from_addr);
1272       } else {
1273         __ movptr(dest->as_register(), from_addr);
1274       }
1275       break;
1276 
1277     case T_ADDRESS:
1278       __ movptr(dest->as_register(), from_addr);
1279       break;
1280     case T_INT:
1281       __ movl(dest->as_register(), from_addr);
1282       break;
1283 
1284     case T_LONG: {
1285       Register to_lo = dest->as_register_lo();
1286       Register to_hi = dest->as_register_hi();
1287 #ifdef _LP64

1624     add_debug_info_for_null_check_here(op->stub()->info());
1625     __ cmpb(Address(op->klass()->as_register(),
1626                     InstanceKlass::init_state_offset()),
1627                     InstanceKlass::fully_initialized);
1628     __ jcc(Assembler::notEqual, *op->stub()->entry());
1629   }
1630   __ allocate_object(op->obj()->as_register(),
1631                      op->tmp1()->as_register(),
1632                      op->tmp2()->as_register(),
1633                      op->header_size(),
1634                      op->object_size(),
1635                      op->klass()->as_register(),
1636                      *op->stub()->entry());
1637   __ bind(*op->stub()->continuation());
1638 }
1639 
1640 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1641   Register len =  op->len()->as_register();
1642   LP64_ONLY( __ movslq(len, len); )
1643 
1644   if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
1645       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1646       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1647     __ jmp(*op->stub()->entry());
1648   } else {
1649     Register tmp1 = op->tmp1()->as_register();
1650     Register tmp2 = op->tmp2()->as_register();
1651     Register tmp3 = op->tmp3()->as_register();
1652     if (len == tmp1) {
1653       tmp1 = tmp3;
1654     } else if (len == tmp2) {
1655       tmp2 = tmp3;
1656     } else if (len == tmp3) {
1657       // everything is ok
1658     } else {
1659       __ mov(tmp3, len);
1660     }
1661     __ allocate_array(op->obj()->as_register(),
1662                       len,
1663                       tmp1,
1664                       tmp2,

1723     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1724   }
1725   Label profile_cast_success, profile_cast_failure;
1726   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1727   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1728 
1729   if (obj == k_RInfo) {
1730     k_RInfo = dst;
1731   } else if (obj == klass_RInfo) {
1732     klass_RInfo = dst;
1733   }
1734   if (k->is_loaded() && !UseCompressedClassPointers) {
1735     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1736   } else {
1737     Rtmp1 = op->tmp3()->as_register();
1738     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1739   }
1740 
1741   assert_different_registers(obj, k_RInfo, klass_RInfo);
1742 
1743   if (op->need_null_check()) {
1744     __ cmpptr(obj, NULL_WORD);
1745     if (op->should_profile()) {
1746       Label not_null;
1747       __ jccb(Assembler::notEqual, not_null);
1748       // Object is null; update MDO and exit
1749       Register mdo  = klass_RInfo;
1750       __ mov_metadata(mdo, md->constant_encoding());
1751       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1752       int header_bits = BitData::null_seen_byte_constant();
1753       __ orb(data_addr, header_bits);
1754       __ jmp(*obj_is_null);
1755       __ bind(not_null);
1756     } else {
1757       __ jcc(Assembler::equal, *obj_is_null);
1758     }
1759   }
1760 
1761   if (!k->is_loaded()) {
1762     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1763   } else {
1764 #ifdef _LP64
1765     __ mov_metadata(k_RInfo, k->constant_encoding());
1766 #endif // _LP64
1767   }
1768   __ verify_oop(obj);
1769 
1770   if (op->fast_check()) {
1771     // get object class
1772     // not a safepoint as obj null check happens earlier
1773 #ifdef _LP64
1774     if (UseCompressedClassPointers) {
1775       __ load_klass(Rtmp1, obj, tmp_load_klass);
1776       __ cmpptr(k_RInfo, Rtmp1);
1777     } else {
1778       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1950         __ mov(dst, obj);
1951       }
1952     } else
1953       if (code == lir_instanceof) {
1954         Register obj = op->object()->as_register();
1955         Register dst = op->result_opr()->as_register();
1956         Label success, failure, done;
1957         emit_typecheck_helper(op, &success, &failure, &failure);
1958         __ bind(failure);
1959         __ xorptr(dst, dst);
1960         __ jmpb(done);
1961         __ bind(success);
1962         __ movptr(dst, 1);
1963         __ bind(done);
1964       } else {
1965         ShouldNotReachHere();
1966       }
1967 
1968 }
1969 
1970 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1971   // We are loading/storing from/to an array that *may* be flattened (the
1972   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1973   // If this array is flattened, take the slow path.
1974   Register klass = op->tmp()->as_register();
1975   if (UseArrayMarkWordCheck) {
1976     __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1977   } else {
1978     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1979     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
1980     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
1981     __ testl(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
1982     __ jcc(Assembler::notZero, *op->stub()->entry());
1983   }
1984   if (!op->value()->is_illegal()) {
1985     // The array is not flattened, but it might be null-free. If we are storing
1986     // a null into a null-free array, take the slow path (which will throw NPE).
1987     Label skip;
1988     __ cmpptr(op->value()->as_register(), NULL_WORD);
1989     __ jcc(Assembler::notEqual, skip);
1990     if (UseArrayMarkWordCheck) {
1991       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1992     } else {
1993       __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
1994       __ jcc(Assembler::notZero, *op->stub()->entry());
1995     }
1996     __ bind(skip);
1997   }
1998 }
1999 
2000 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
2001   // We are storing into an array that *may* be null-free (the declared type is
2002   // Object[], abstract[], interface[] or VT.ref[]).
2003   if (UseArrayMarkWordCheck) {
2004     Label test_mark_word;
2005     Register tmp = op->tmp()->as_register();
2006     __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
2007     __ testl(tmp, markWord::unlocked_value);
2008     __ jccb(Assembler::notZero, test_mark_word);
2009     __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
2010     __ bind(test_mark_word);
2011     __ testl(tmp, markWord::null_free_array_bit_in_place);
2012   } else {
2013     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
2014     Register klass = op->tmp()->as_register();
2015     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
2016     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
2017     __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
2018   }
2019 }
2020 
2021 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
2022   Label L_oops_equal;
2023   Label L_oops_not_equal;
2024   Label L_end;
2025 
2026   Register left  = op->left()->as_register();
2027   Register right = op->right()->as_register();
2028 
2029   __ cmpptr(left, right);
2030   __ jcc(Assembler::equal, L_oops_equal);
2031 
2032   // (1) Null check -- if one of the operands is null, the other must not be null (because
2033   //     the two references are not equal), so they are not substitutable,
2034   //     FIXME: do null check only if the operand is nullable
2035   __ testptr(left, right);
2036   __ jcc(Assembler::zero, L_oops_not_equal);
2037 
2038   ciKlass* left_klass = op->left_klass();
2039   ciKlass* right_klass = op->right_klass();
2040 
2041   // (2) Inline type check -- if either of the operands is not a inline type,
2042   //     they are not substitutable. We do this only if we are not sure that the
2043   //     operands are inline type
2044   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
2045       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
2046     Register tmp1  = op->tmp1()->as_register();
2047     __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2048     __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2049     __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2050     __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2051     __ jcc(Assembler::notEqual, L_oops_not_equal);
2052   }
2053 
2054   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2055   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
2056     // No need to load klass -- the operands are statically known to be the same inline klass.
2057     __ jmp(*op->stub()->entry());
2058   } else {
2059     Register left_klass_op = op->left_klass_op()->as_register();
2060     Register right_klass_op = op->right_klass_op()->as_register();
2061 
2062     if (UseCompressedClassPointers) {
2063       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2064       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2065       __ cmpl(left_klass_op, right_klass_op);
2066     } else {
2067       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2068       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2069       __ cmpptr(left_klass_op, right_klass_op);
2070     }
2071 
2072     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2073     // fall through to L_oops_not_equal
2074   }
2075 
2076   __ bind(L_oops_not_equal);
2077   move(op->not_equal_result(), op->result_opr());
2078   __ jmp(L_end);
2079 
2080   __ bind(L_oops_equal);
2081   move(op->equal_result(), op->result_opr());
2082   __ jmp(L_end);
2083 
2084   // We've returned from the stub. RAX contains 0x0 IFF the two
2085   // operands are not substitutable. (Don't compare against 0x1 in case the
2086   // C compiler is naughty)
2087   __ bind(*op->stub()->continuation());
2088   __ cmpl(rax, 0);
2089   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2090   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2091   // fall-through
2092   __ bind(L_end);
2093 }
2094 
2095 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2096   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
2097     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2098     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2099     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2100     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2101     Register addr = op->addr()->as_register();
2102     __ lock();
2103     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2104 
2105   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2106     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2107     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2108     Register newval = op->new_value()->as_register();
2109     Register cmpval = op->cmp_value()->as_register();
2110     assert(cmpval == rax, "wrong register");
2111     assert(newval != noreg, "new val must be register");
2112     assert(cmpval != newval, "cmp and new values must be in different registers");
2113     assert(cmpval != addr, "cmp and addr must be in different registers");

2134       __ cmpxchgl(newval, Address(addr, 0));
2135     }
2136 #ifdef _LP64
2137   } else if (op->code() == lir_cas_long) {
2138     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2139     Register newval = op->new_value()->as_register_lo();
2140     Register cmpval = op->cmp_value()->as_register_lo();
2141     assert(cmpval == rax, "wrong register");
2142     assert(newval != noreg, "new val must be register");
2143     assert(cmpval != newval, "cmp and new values must be in different registers");
2144     assert(cmpval != addr, "cmp and addr must be in different registers");
2145     assert(newval != addr, "new value and addr must be in different registers");
2146     __ lock();
2147     __ cmpxchgq(newval, Address(addr, 0));
2148 #endif // _LP64
2149   } else {
2150     Unimplemented();
2151   }
2152 }
2153 
2154 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2155   assert(dst->is_cpu_register(), "must be");
2156   assert(dst->type() == src->type(), "must be");
2157 
2158   if (src->is_cpu_register()) {
2159     reg2reg(src, dst);
2160   } else if (src->is_stack()) {
2161     stack2reg(src, dst, dst->type());
2162   } else if (src->is_constant()) {
2163     const2reg(src, dst, lir_patch_none, NULL);
2164   } else {
2165     ShouldNotReachHere();
2166   }
2167 }
2168 
2169 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type,
2170                           LIR_Opr cmp_opr1, LIR_Opr cmp_opr2) {
2171   assert(cmp_opr1 == LIR_OprFact::illegalOpr && cmp_opr2 == LIR_OprFact::illegalOpr, "unnecessary cmp oprs on x86");
2172 
2173   Assembler::Condition acond, ncond;
2174   switch (condition) {
2175     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2176     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2177     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2178     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2179     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2180     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2181     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2182     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2183     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2184                                 ShouldNotReachHere();
2185   }
2186 
2187   if (opr1->is_cpu_register()) {
2188     reg2reg(opr1, result);

3017   int offset = __ offset();
3018   switch (code) {
3019   case lir_static_call:
3020   case lir_optvirtual_call:
3021   case lir_dynamic_call:
3022     offset += NativeCall::displacement_offset;
3023     break;
3024   case lir_icvirtual_call:
3025     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
3026     break;
3027   default: ShouldNotReachHere();
3028   }
3029   __ align(BytesPerWord, offset);
3030 }
3031 
3032 
3033 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
3034   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
3035          "must be aligned");
3036   __ call(AddressLiteral(op->addr(), rtype));
3037   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3038   __ post_call_nop();
3039 }
3040 
3041 
3042 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3043   __ ic_call(op->addr());
3044   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3045   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3046          "must be aligned");
3047   __ post_call_nop();
3048 }
3049 
3050 
3051 void LIR_Assembler::emit_static_call_stub() {
3052   address call_pc = __ pc();
3053   address stub = __ start_a_stub(call_stub_size());
3054   if (stub == NULL) {
3055     bailout("static call stub overflow");
3056     return;
3057   }
3058 
3059   int start = __ offset();
3060 
3061   // make sure that the displacement word of the call ends up word aligned
3062   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
3063   __ relocate(static_stub_Relocation::spec(call_pc));
3064   __ mov_metadata(rbx, (Metadata*)NULL);

3205   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3206 }
3207 
3208 
3209 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3210   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3211   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3212   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3213   __ movoop(Address(rsp, offset_from_rsp_in_bytes), o, rscratch1);
3214 }
3215 
3216 
3217 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3218   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3219   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3220   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3221   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3222 }
3223 
3224 
3225 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3226   if (null_check) {
3227     __ testptr(obj, obj);
3228     __ jcc(Assembler::zero, *slow_path->entry());
3229   }
3230   if (UseArrayMarkWordCheck) {
3231     if (is_dest) {
3232       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
3233     } else {
3234       __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
3235     }
3236   } else {
3237     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3238     __ load_klass(tmp, obj, tmp_load_klass);
3239     __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
3240     if (is_dest) {
3241       // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
3242       __ testl(tmp, Klass::_lh_null_free_array_bit_inplace);
3243     } else {
3244       __ testl(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
3245     }
3246     __ jcc(Assembler::notZero, *slow_path->entry());
3247   }
3248 }
3249 
3250 
3251 // This code replaces a call to arraycopy; no exception may
3252 // be thrown in this code, they must be thrown in the System.arraycopy
3253 // activation frame; we could save some checks if this would not be the case
3254 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3255   ciArrayKlass* default_type = op->expected_type();
3256   Register src = op->src()->as_register();
3257   Register dst = op->dst()->as_register();
3258   Register src_pos = op->src_pos()->as_register();
3259   Register dst_pos = op->dst_pos()->as_register();
3260   Register length  = op->length()->as_register();
3261   Register tmp = op->tmp()->as_register();
3262   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3263 
3264   CodeStub* stub = op->stub();
3265   int flags = op->flags();
3266   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3267   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3268 
3269   if (flags & LIR_OpArrayCopy::always_slow_path) {
3270     __ jmp(*stub->entry());
3271     __ bind(*stub->continuation());
3272     return;
3273   }
3274 
3275   // if we don't know anything, just go through the generic arraycopy
3276   if (default_type == NULL) {
3277     // save outgoing arguments on stack in case call to System.arraycopy is needed
3278     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3279     // for interpreter calling conventions. Now we have to do it in new style conventions.
3280     // For the moment until C1 gets the new register allocator I just force all the
3281     // args to the right place (except the register args) and then on the back side
3282     // reload the register args properly if we go slow path. Yuck
3283 
3284     // These are proper for the calling convention
3285     store_parameter(length, 2);
3286     store_parameter(dst_pos, 1);
3287     store_parameter(dst, 0);
3288 
3289     // these are just temporary placements until we need to reload
3290     store_parameter(src_pos, 3);
3291     store_parameter(src, 4);
3292     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3293 
3294     address copyfunc_addr = StubRoutines::generic_arraycopy();

3348     __ mov(tmp, rax);
3349     __ xorl(tmp, -1);
3350 
3351     // Reload values from the stack so they are where the stub
3352     // expects them.
3353     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3354     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3355     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3356     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3357     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3358 
3359     __ subl(length, tmp);
3360     __ addl(src_pos, tmp);
3361     __ addl(dst_pos, tmp);
3362     __ jmp(*stub->entry());
3363 
3364     __ bind(*stub->continuation());
3365     return;
3366   }
3367 
3368   // Handle inline type arrays
3369   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
3370     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3371   }
3372   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
3373     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3374   }
3375 
3376   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3377 
3378   int elem_size = type2aelembytes(basic_type);
3379   Address::ScaleFactor scale;
3380 
3381   switch (elem_size) {
3382     case 1 :
3383       scale = Address::times_1;
3384       break;
3385     case 2 :
3386       scale = Address::times_2;
3387       break;
3388     case 4 :
3389       scale = Address::times_4;
3390       break;
3391     case 8 :
3392       scale = Address::times_8;
3393       break;
3394     default:
3395       scale = Address::no_scale;

3956         __ jccb(Assembler::zero, next);
3957 #endif
3958         // first time here. Set profile type.
3959         __ movptr(mdo_addr, tmp);
3960       } else {
3961         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3962                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3963 
3964         __ movptr(tmp, mdo_addr);
3965         __ testptr(tmp, TypeEntries::type_unknown);
3966         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3967 
3968         __ orptr(mdo_addr, TypeEntries::type_unknown);
3969       }
3970     }
3971 
3972     __ bind(next);
3973   }
3974 }
3975 
3976 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3977   Register obj = op->obj()->as_register();
3978   Register tmp = op->tmp()->as_pointer_register();
3979   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3980   bool not_null = op->not_null();
3981   int flag = op->flag();
3982 
3983   Label not_inline_type;
3984   if (!not_null) {
3985     __ testptr(obj, obj);
3986     __ jccb(Assembler::zero, not_inline_type);
3987   }
3988 
3989   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3990 
3991   __ orb(mdo_addr, flag);
3992 
3993   __ bind(not_inline_type);
3994 }
3995 
3996 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3997   Unimplemented();
3998 }
3999 
4000 
4001 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
4002   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
4003 }
4004 
4005 
4006 void LIR_Assembler::align_backward_branch_target() {
4007   __ align(BytesPerWord);
4008 }
4009 
4010 
4011 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
4012   if (left->is_single_cpu()) {
4013     __ negl(left->as_register());
4014     move_regs(left->as_register(), dest->as_register());
4015 

4239 }
4240 
4241 void LIR_Assembler::membar_storeload() {
4242   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4243 }
4244 
4245 void LIR_Assembler::on_spin_wait() {
4246   __ pause ();
4247 }
4248 
4249 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4250   assert(result_reg->is_register(), "check");
4251 #ifdef _LP64
4252   // __ get_thread(result_reg->as_register_lo());
4253   __ mov(result_reg->as_register(), r15_thread);
4254 #else
4255   __ get_thread(result_reg->as_register());
4256 #endif // _LP64
4257 }
4258 
4259 void LIR_Assembler::check_orig_pc() {
4260   __ cmpptr(frame_map()->address_for_orig_pc_addr(), NULL_WORD);
4261 }
4262 
4263 void LIR_Assembler::peephole(LIR_List*) {
4264   // do nothing for now
4265 }
4266 
4267 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4268   assert(data == dest, "xchg/xadd uses only 2 operands");
4269 
4270   if (data->type() == T_INT) {
4271     if (code == lir_xadd) {
4272       __ lock();
4273       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4274     } else {
4275       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4276     }
4277   } else if (data->is_oop()) {
4278     assert (code == lir_xchg, "xadd for oops");
4279     Register obj = data->as_register();
4280 #ifdef _LP64
4281     if (UseCompressedOops) {
< prev index next >