< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"

  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 
  48 
  49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  50 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  51 // fast versions of NegF/NegD and AbsF/AbsD.
  52 
  53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  55   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  56   // of 128-bits operands for SSE instructions.
  57   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  58   // Store the value to a 128-bits operand.
  59   operand[0] = lo;

 178 
 179 void LIR_Assembler::ffree(int i) {
 180   __ ffree(i);
 181 }
 182 #endif // !_LP64
 183 
 184 void LIR_Assembler::breakpoint() {
 185   __ int3();
 186 }
 187 
 188 void LIR_Assembler::push(LIR_Opr opr) {
 189   if (opr->is_single_cpu()) {
 190     __ push_reg(opr->as_register());
 191   } else if (opr->is_double_cpu()) {
 192     NOT_LP64(__ push_reg(opr->as_register_hi()));
 193     __ push_reg(opr->as_register_lo());
 194   } else if (opr->is_stack()) {
 195     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 196   } else if (opr->is_constant()) {
 197     LIR_Const* const_opr = opr->as_constant_ptr();
 198     if (const_opr->type() == T_OBJECT) {
 199       __ push_oop(const_opr->as_jobject());
 200     } else if (const_opr->type() == T_INT) {
 201       __ push_jint(const_opr->as_jint());
 202     } else {
 203       ShouldNotReachHere();
 204     }
 205 
 206   } else {
 207     ShouldNotReachHere();
 208   }
 209 }
 210 
 211 void LIR_Assembler::pop(LIR_Opr opr) {
 212   if (opr->is_single_cpu()) {
 213     __ pop_reg(opr->as_register());
 214   } else {
 215     ShouldNotReachHere();
 216   }
 217 }
 218 

 469     __ bind(*stub->continuation());
 470   }
 471 
 472   if (compilation()->env()->dtrace_method_probes()) {
 473 #ifdef _LP64
 474     __ mov(rdi, r15_thread);
 475     __ mov_metadata(rsi, method()->constant_encoding());
 476 #else
 477     __ get_thread(rax);
 478     __ movptr(Address(rsp, 0), rax);
 479     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 480 #endif
 481     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 482   }
 483 
 484   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 485     __ mov(rax, rbx);  // Restore the exception
 486   }
 487 
 488   // remove the activation and dispatch to the unwind handler
 489   __ remove_frame(initial_frame_size_in_bytes());
 490   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 491 
 492   // Emit the slow path assembly
 493   if (stub != NULL) {
 494     stub->emit_code(this);
 495   }
 496 
 497   return offset;
 498 }
 499 
 500 
 501 int LIR_Assembler::emit_deopt_handler() {
 502   // if the last instruction is a call (typically to do a throw which
 503   // is coming at the end after block reordering) the return address
 504   // must still point into the code area in order to avoid assertion
 505   // failures when searching for the corresponding bci => add a nop
 506   // (was bug 5/14/1999 - gri)
 507   __ nop();
 508 
 509   // generate code for exception handler

 514     return -1;
 515   }
 516 
 517   int offset = code_offset();
 518   InternalAddress here(__ pc());
 519 
 520   __ pushptr(here.addr());
 521   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 522   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 523   __ end_a_stub();
 524 
 525   return offset;
 526 }
 527 
 528 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 529   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 530   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 531     assert(result->fpu() == 0, "result must already be on TOS");
 532   }
 533 
















 534   // Pop the stack before the safepoint code
 535   __ remove_frame(initial_frame_size_in_bytes());
 536 
 537   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 538     __ reserved_stack_check();
 539   }
 540 
 541   // Note: we do not need to round double result; float result has the right precision
 542   // the poll sets the condition code, but no data registers
 543 
 544 #ifdef _LP64
 545   const Register thread = r15_thread;
 546 #else
 547   const Register thread = rbx;
 548   __ get_thread(thread);
 549 #endif
 550   code_stub->set_safepoint_offset(__ offset());
 551   __ relocate(relocInfo::poll_return_type);
 552   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 553   __ ret(0);
 554 }
 555 
 556 




 557 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 558   guarantee(info != NULL, "Shouldn't be NULL");
 559   int offset = __ offset();
 560 #ifdef _LP64
 561   const Register poll_addr = rscratch1;
 562   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 563 #else
 564   assert(tmp->is_cpu_register(), "needed");
 565   const Register poll_addr = tmp->as_register();
 566   __ get_thread(poll_addr);
 567   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 568 #endif
 569   add_debug_info_for_branch(info);
 570   __ relocate(relocInfo::poll_type);
 571   address pre_pc = __ pc();
 572   __ testl(rax, Address(poll_addr, 0));
 573   address post_pc = __ pc();
 574   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 575   return offset;
 576 }

 597       break;
 598     }
 599 
 600     case T_ADDRESS: {
 601       assert(patch_code == lir_patch_none, "no patching handled here");
 602       __ movptr(dest->as_register(), c->as_jint());
 603       break;
 604     }
 605 
 606     case T_LONG: {
 607       assert(patch_code == lir_patch_none, "no patching handled here");
 608 #ifdef _LP64
 609       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 610 #else
 611       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 612       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 613 #endif // _LP64
 614       break;
 615     }
 616 

 617     case T_OBJECT: {
 618       if (patch_code != lir_patch_none) {
 619         jobject2reg_with_patching(dest->as_register(), info);
 620       } else {
 621         __ movoop(dest->as_register(), c->as_jobject());
 622       }
 623       break;
 624     }
 625 
 626     case T_METADATA: {
 627       if (patch_code != lir_patch_none) {
 628         klass2reg_with_patching(dest->as_register(), info);
 629       } else {
 630         __ mov_metadata(dest->as_register(), c->as_metadata());
 631       }
 632       break;
 633     }
 634 
 635     case T_FLOAT: {
 636       if (dest->is_single_xmm()) {

 687     default:
 688       ShouldNotReachHere();
 689   }
 690 }
 691 
 692 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 693   assert(src->is_constant(), "should not call otherwise");
 694   assert(dest->is_stack(), "should not call otherwise");
 695   LIR_Const* c = src->as_constant_ptr();
 696 
 697   switch (c->type()) {
 698     case T_INT:  // fall through
 699     case T_FLOAT:
 700       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 701       break;
 702 
 703     case T_ADDRESS:
 704       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 705       break;
 706 

 707     case T_OBJECT:
 708       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 709       break;
 710 
 711     case T_LONG:  // fall through
 712     case T_DOUBLE:
 713 #ifdef _LP64
 714       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 715                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 716 #else
 717       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 718                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 719       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 720                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 721 #endif // _LP64
 722       break;
 723 
 724     default:
 725       ShouldNotReachHere();
 726   }
 727 }
 728 
 729 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 730   assert(src->is_constant(), "should not call otherwise");
 731   assert(dest->is_address(), "should not call otherwise");
 732   LIR_Const* c = src->as_constant_ptr();
 733   LIR_Address* addr = dest->as_address_ptr();
 734 
 735   int null_check_here = code_offset();
 736   switch (type) {
 737     case T_INT:    // fall through
 738     case T_FLOAT:
 739       __ movl(as_Address(addr), c->as_jint_bits());
 740       break;
 741 
 742     case T_ADDRESS:
 743       __ movptr(as_Address(addr), c->as_jint_bits());
 744       break;
 745 

 746     case T_OBJECT:  // fall through
 747     case T_ARRAY:
 748       if (c->as_jobject() == NULL) {
 749         if (UseCompressedOops && !wide) {
 750           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 751         } else {
 752 #ifdef _LP64
 753           __ xorptr(rscratch1, rscratch1);
 754           null_check_here = code_offset();
 755           __ movptr(as_Address(addr), rscratch1);
 756 #else
 757           __ movptr(as_Address(addr), NULL_WORD);
 758 #endif
 759         }
 760       } else {
 761         if (is_literal_address(addr)) {
 762           ShouldNotReachHere();
 763           __ movoop(as_Address(addr, noreg), c->as_jobject());
 764         } else {
 765 #ifdef _LP64

 814   if (info != NULL) {
 815     add_debug_info_for_null_check(null_check_here, info);
 816   }
 817 }
 818 
 819 
 820 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 821   assert(src->is_register(), "should not call otherwise");
 822   assert(dest->is_register(), "should not call otherwise");
 823 
 824   // move between cpu-registers
 825   if (dest->is_single_cpu()) {
 826 #ifdef _LP64
 827     if (src->type() == T_LONG) {
 828       // Can do LONG -> OBJECT
 829       move_regs(src->as_register_lo(), dest->as_register());
 830       return;
 831     }
 832 #endif
 833     assert(src->is_single_cpu(), "must match");
 834     if (src->type() == T_OBJECT) {
 835       __ verify_oop(src->as_register());
 836     }
 837     move_regs(src->as_register(), dest->as_register());
 838 
 839   } else if (dest->is_double_cpu()) {
 840 #ifdef _LP64
 841     if (is_reference_type(src->type())) {
 842       // Surprising to me but we can see move of a long to t_object
 843       __ verify_oop(src->as_register());
 844       move_regs(src->as_register(), dest->as_register_lo());
 845       return;
 846     }
 847 #endif
 848     assert(src->is_double_cpu(), "must match");
 849     Register f_lo = src->as_register_lo();
 850     Register f_hi = src->as_register_hi();
 851     Register t_lo = dest->as_register_lo();
 852     Register t_hi = dest->as_register_hi();
 853 #ifdef _LP64
 854     assert(f_hi == f_lo, "must be same");

1000       break;
1001     }
1002 
1003     case T_DOUBLE: {
1004 #ifdef _LP64
1005       assert(src->is_double_xmm(), "not a double");
1006       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1007 #else
1008       if (src->is_double_xmm()) {
1009         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1010       } else {
1011         assert(src->is_double_fpu(), "must be");
1012         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1013         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1014         else                    __ fst_d (as_Address(to_addr));
1015       }
1016 #endif // _LP64
1017       break;
1018     }
1019 

1020     case T_ARRAY:   // fall through
1021     case T_OBJECT:  // fall through
1022       if (UseCompressedOops && !wide) {
1023         __ movl(as_Address(to_addr), compressed_src);
1024       } else {
1025         __ movptr(as_Address(to_addr), src->as_register());
1026       }
1027       break;
1028     case T_METADATA:
1029       // We get here to store a method pointer to the stack to pass to
1030       // a dtrace runtime call. This can't work on 64 bit with
1031       // compressed klass ptrs: T_METADATA can be a compressed klass
1032       // ptr or a 64 bit method pointer.
1033       LP64_ONLY(ShouldNotReachHere());
1034       __ movptr(as_Address(to_addr), src->as_register());
1035       break;
1036     case T_ADDRESS:
1037       __ movptr(as_Address(to_addr), src->as_register());
1038       break;
1039     case T_INT:

1172     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1173     // push and pop the part at src + wordSize, adding wordSize for the previous push
1174     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1175     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1176     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1177 #endif // _LP64
1178 
1179   } else {
1180     ShouldNotReachHere();
1181   }
1182 }
1183 
1184 
1185 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1186   assert(src->is_address(), "should not call otherwise");
1187   assert(dest->is_register(), "should not call otherwise");
1188 
1189   LIR_Address* addr = src->as_address_ptr();
1190   Address from_addr = as_Address(addr);
1191 
1192   if (addr->base()->type() == T_OBJECT) {
1193     __ verify_oop(addr->base()->as_pointer_register());
1194   }
1195 
1196   switch (type) {
1197     case T_BOOLEAN: // fall through
1198     case T_BYTE:    // fall through
1199     case T_CHAR:    // fall through
1200     case T_SHORT:
1201       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1202         // on pre P6 processors we may get partial register stalls
1203         // so blow away the value of to_rinfo before loading a
1204         // partial word into it.  Do it here so that it precedes
1205         // the potential patch point below.
1206         __ xorptr(dest->as_register(), dest->as_register());
1207       }
1208       break;
1209    default:
1210      break;
1211   }
1212 

1233 #endif // !LP64
1234       }
1235       break;
1236     }
1237 
1238     case T_DOUBLE: {
1239       if (dest->is_double_xmm()) {
1240         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1241       } else {
1242 #ifndef _LP64
1243         assert(dest->is_double_fpu(), "must be");
1244         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1245         __ fld_d(from_addr);
1246 #else
1247         ShouldNotReachHere();
1248 #endif // !LP64
1249       }
1250       break;
1251     }
1252 

1253     case T_OBJECT:  // fall through
1254     case T_ARRAY:   // fall through
1255       if (UseCompressedOops && !wide) {
1256         __ movl(dest->as_register(), from_addr);
1257       } else {
1258         __ movptr(dest->as_register(), from_addr);
1259       }
1260       break;
1261 
1262     case T_ADDRESS:
1263       __ movptr(dest->as_register(), from_addr);
1264       break;
1265     case T_INT:
1266       __ movl(dest->as_register(), from_addr);
1267       break;
1268 
1269     case T_LONG: {
1270       Register to_lo = dest->as_register_lo();
1271       Register to_hi = dest->as_register_hi();
1272 #ifdef _LP64

1609     add_debug_info_for_null_check_here(op->stub()->info());
1610     __ cmpb(Address(op->klass()->as_register(),
1611                     InstanceKlass::init_state_offset()),
1612                     InstanceKlass::fully_initialized);
1613     __ jcc(Assembler::notEqual, *op->stub()->entry());
1614   }
1615   __ allocate_object(op->obj()->as_register(),
1616                      op->tmp1()->as_register(),
1617                      op->tmp2()->as_register(),
1618                      op->header_size(),
1619                      op->object_size(),
1620                      op->klass()->as_register(),
1621                      *op->stub()->entry());
1622   __ bind(*op->stub()->continuation());
1623 }
1624 
1625 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1626   Register len =  op->len()->as_register();
1627   LP64_ONLY( __ movslq(len, len); )
1628 
1629   if (UseSlowPath ||
1630       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1631       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1632     __ jmp(*op->stub()->entry());
1633   } else {
1634     Register tmp1 = op->tmp1()->as_register();
1635     Register tmp2 = op->tmp2()->as_register();
1636     Register tmp3 = op->tmp3()->as_register();
1637     if (len == tmp1) {
1638       tmp1 = tmp3;
1639     } else if (len == tmp2) {
1640       tmp2 = tmp3;
1641     } else if (len == tmp3) {
1642       // everything is ok
1643     } else {
1644       __ mov(tmp3, len);
1645     }
1646     __ allocate_array(op->obj()->as_register(),
1647                       len,
1648                       tmp1,
1649                       tmp2,

1708     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1709   }
1710   Label profile_cast_success, profile_cast_failure;
1711   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1712   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1713 
1714   if (obj == k_RInfo) {
1715     k_RInfo = dst;
1716   } else if (obj == klass_RInfo) {
1717     klass_RInfo = dst;
1718   }
1719   if (k->is_loaded() && !UseCompressedClassPointers) {
1720     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1721   } else {
1722     Rtmp1 = op->tmp3()->as_register();
1723     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1724   }
1725 
1726   assert_different_registers(obj, k_RInfo, klass_RInfo);
1727 
1728   __ cmpptr(obj, (int32_t)NULL_WORD);
1729   if (op->should_profile()) {
1730     Label not_null;
1731     __ jccb(Assembler::notEqual, not_null);
1732     // Object is null; update MDO and exit
1733     Register mdo  = klass_RInfo;
1734     __ mov_metadata(mdo, md->constant_encoding());
1735     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1736     int header_bits = BitData::null_seen_byte_constant();
1737     __ orb(data_addr, header_bits);
1738     __ jmp(*obj_is_null);
1739     __ bind(not_null);
1740   } else {
1741     __ jcc(Assembler::equal, *obj_is_null);


1742   }
1743 
1744   if (!k->is_loaded()) {
1745     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1746   } else {
1747 #ifdef _LP64
1748     __ mov_metadata(k_RInfo, k->constant_encoding());
1749 #endif // _LP64
1750   }
1751   __ verify_oop(obj);
1752 
1753   if (op->fast_check()) {
1754     // get object class
1755     // not a safepoint as obj null check happens earlier
1756 #ifdef _LP64
1757     if (UseCompressedClassPointers) {
1758       __ load_klass(Rtmp1, obj, tmp_load_klass);
1759       __ cmpptr(k_RInfo, Rtmp1);
1760     } else {
1761       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1933         __ mov(dst, obj);
1934       }
1935     } else
1936       if (code == lir_instanceof) {
1937         Register obj = op->object()->as_register();
1938         Register dst = op->result_opr()->as_register();
1939         Label success, failure, done;
1940         emit_typecheck_helper(op, &success, &failure, &failure);
1941         __ bind(failure);
1942         __ xorptr(dst, dst);
1943         __ jmpb(done);
1944         __ bind(success);
1945         __ movptr(dst, 1);
1946         __ bind(done);
1947       } else {
1948         ShouldNotReachHere();
1949       }
1950 
1951 }
1952 




























































































































1953 
1954 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1955   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1956     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1957     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1958     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1959     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1960     Register addr = op->addr()->as_register();
1961     __ lock();
1962     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1963 
1964   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1965     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1966     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1967     Register newval = op->new_value()->as_register();
1968     Register cmpval = op->cmp_value()->as_register();
1969     assert(cmpval == rax, "wrong register");
1970     assert(newval != NULL, "new val must be register");
1971     assert(cmpval != newval, "cmp and new values must be in different registers");
1972     assert(cmpval != addr, "cmp and addr must be in different registers");

1993       __ cmpxchgl(newval, Address(addr, 0));
1994     }
1995 #ifdef _LP64
1996   } else if (op->code() == lir_cas_long) {
1997     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1998     Register newval = op->new_value()->as_register_lo();
1999     Register cmpval = op->cmp_value()->as_register_lo();
2000     assert(cmpval == rax, "wrong register");
2001     assert(newval != NULL, "new val must be register");
2002     assert(cmpval != newval, "cmp and new values must be in different registers");
2003     assert(cmpval != addr, "cmp and addr must be in different registers");
2004     assert(newval != addr, "new value and addr must be in different registers");
2005     __ lock();
2006     __ cmpxchgq(newval, Address(addr, 0));
2007 #endif // _LP64
2008   } else {
2009     Unimplemented();
2010   }
2011 }
2012 















2013 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2014   Assembler::Condition acond, ncond;
2015   switch (condition) {
2016     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2017     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2018     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2019     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2020     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2021     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2022     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2023     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2024     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2025                                 ShouldNotReachHere();
2026   }
2027 
2028   if (opr1->is_cpu_register()) {
2029     reg2reg(opr1, result);
2030   } else if (opr1->is_stack()) {
2031     stack2reg(opr1, result, result->type());
2032   } else if (opr1->is_constant()) {

2857   int offset = __ offset();
2858   switch (code) {
2859   case lir_static_call:
2860   case lir_optvirtual_call:
2861   case lir_dynamic_call:
2862     offset += NativeCall::displacement_offset;
2863     break;
2864   case lir_icvirtual_call:
2865     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2866     break;
2867   default: ShouldNotReachHere();
2868   }
2869   __ align(BytesPerWord, offset);
2870 }
2871 
2872 
2873 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2874   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2875          "must be aligned");
2876   __ call(AddressLiteral(op->addr(), rtype));
2877   add_call_info(code_offset(), op->info());
2878 }
2879 
2880 
2881 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2882   __ ic_call(op->addr());
2883   add_call_info(code_offset(), op->info());
2884   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2885          "must be aligned");
2886 }
2887 
2888 
2889 void LIR_Assembler::emit_static_call_stub() {
2890   address call_pc = __ pc();
2891   address stub = __ start_a_stub(call_stub_size());
2892   if (stub == NULL) {
2893     bailout("static call stub overflow");
2894     return;
2895   }
2896 
2897   int start = __ offset();
2898 
2899   // make sure that the displacement word of the call ends up word aligned
2900   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2901   __ relocate(static_stub_Relocation::spec(call_pc));
2902   __ mov_metadata(rbx, (Metadata*)NULL);
2903   // must be set to -1 at code generation time

3043   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3044 }
3045 
3046 
3047 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3048   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3049   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3050   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3051   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3052 }
3053 
3054 
3055 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3056   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3057   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3058   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3059   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3060 }
3061 
3062 


























3063 // This code replaces a call to arraycopy; no exception may
3064 // be thrown in this code, they must be thrown in the System.arraycopy
3065 // activation frame; we could save some checks if this would not be the case
3066 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3067   ciArrayKlass* default_type = op->expected_type();
3068   Register src = op->src()->as_register();
3069   Register dst = op->dst()->as_register();
3070   Register src_pos = op->src_pos()->as_register();
3071   Register dst_pos = op->dst_pos()->as_register();
3072   Register length  = op->length()->as_register();
3073   Register tmp = op->tmp()->as_register();
3074   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3075 
3076   CodeStub* stub = op->stub();
3077   int flags = op->flags();
3078   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3079   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3080 






3081   // if we don't know anything, just go through the generic arraycopy
3082   if (default_type == NULL) {
3083     // save outgoing arguments on stack in case call to System.arraycopy is needed
3084     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3085     // for interpreter calling conventions. Now we have to do it in new style conventions.
3086     // For the moment until C1 gets the new register allocator I just force all the
3087     // args to the right place (except the register args) and then on the back side
3088     // reload the register args properly if we go slow path. Yuck
3089 
3090     // These are proper for the calling convention
3091     store_parameter(length, 2);
3092     store_parameter(dst_pos, 1);
3093     store_parameter(dst, 0);
3094 
3095     // these are just temporary placements until we need to reload
3096     store_parameter(src_pos, 3);
3097     store_parameter(src, 4);
3098     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3099 
3100     address copyfunc_addr = StubRoutines::generic_arraycopy();

3154     __ mov(tmp, rax);
3155     __ xorl(tmp, -1);
3156 
3157     // Reload values from the stack so they are where the stub
3158     // expects them.
3159     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3160     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3161     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3162     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3163     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3164 
3165     __ subl(length, tmp);
3166     __ addl(src_pos, tmp);
3167     __ addl(dst_pos, tmp);
3168     __ jmp(*stub->entry());
3169 
3170     __ bind(*stub->continuation());
3171     return;
3172   }
3173 








3174   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3175 
3176   int elem_size = type2aelembytes(basic_type);
3177   Address::ScaleFactor scale;
3178 
3179   switch (elem_size) {
3180     case 1 :
3181       scale = Address::times_1;
3182       break;
3183     case 2 :
3184       scale = Address::times_2;
3185       break;
3186     case 4 :
3187       scale = Address::times_4;
3188       break;
3189     case 8 :
3190       scale = Address::times_8;
3191       break;
3192     default:
3193       scale = Address::no_scale;

3750         __ jccb(Assembler::zero, next);
3751 #endif
3752         // first time here. Set profile type.
3753         __ movptr(mdo_addr, tmp);
3754       } else {
3755         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3756                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3757 
3758         __ movptr(tmp, mdo_addr);
3759         __ testptr(tmp, TypeEntries::type_unknown);
3760         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3761 
3762         __ orptr(mdo_addr, TypeEntries::type_unknown);
3763       }
3764     }
3765 
3766     __ bind(next);
3767   }
3768 }
3769 




















3770 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3771   Unimplemented();
3772 }
3773 
3774 
3775 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3776   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3777 }
3778 
3779 
3780 void LIR_Assembler::align_backward_branch_target() {
3781   __ align(BytesPerWord);
3782 }
3783 
3784 
3785 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3786   if (left->is_single_cpu()) {
3787     __ negl(left->as_register());
3788     move_regs(left->as_register(), dest->as_register());
3789 

4010 }
4011 
4012 void LIR_Assembler::membar_storeload() {
4013   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4014 }
4015 
4016 void LIR_Assembler::on_spin_wait() {
4017   __ pause ();
4018 }
4019 
4020 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4021   assert(result_reg->is_register(), "check");
4022 #ifdef _LP64
4023   // __ get_thread(result_reg->as_register_lo());
4024   __ mov(result_reg->as_register(), r15_thread);
4025 #else
4026   __ get_thread(result_reg->as_register());
4027 #endif // _LP64
4028 }
4029 



4030 
4031 void LIR_Assembler::peephole(LIR_List*) {
4032   // do nothing for now
4033 }
4034 
4035 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4036   assert(data == dest, "xchg/xadd uses only 2 operands");
4037 
4038   if (data->type() == T_INT) {
4039     if (code == lir_xadd) {
4040       __ lock();
4041       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4042     } else {
4043       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4044     }
4045   } else if (data->is_oop()) {
4046     assert (code == lir_xchg, "xadd for oops");
4047     Register obj = data->as_register();
4048 #ifdef _LP64
4049     if (UseCompressedOops) {

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;

 180 
 181 void LIR_Assembler::ffree(int i) {
 182   __ ffree(i);
 183 }
 184 #endif // !_LP64
 185 
 186 void LIR_Assembler::breakpoint() {
 187   __ int3();
 188 }
 189 
 190 void LIR_Assembler::push(LIR_Opr opr) {
 191   if (opr->is_single_cpu()) {
 192     __ push_reg(opr->as_register());
 193   } else if (opr->is_double_cpu()) {
 194     NOT_LP64(__ push_reg(opr->as_register_hi()));
 195     __ push_reg(opr->as_register_lo());
 196   } else if (opr->is_stack()) {
 197     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 198   } else if (opr->is_constant()) {
 199     LIR_Const* const_opr = opr->as_constant_ptr();
 200     if (const_opr->type() == T_OBJECT || const_opr->type() == T_PRIMITIVE_OBJECT) {
 201       __ push_oop(const_opr->as_jobject());
 202     } else if (const_opr->type() == T_INT) {
 203       __ push_jint(const_opr->as_jint());
 204     } else {
 205       ShouldNotReachHere();
 206     }
 207 
 208   } else {
 209     ShouldNotReachHere();
 210   }
 211 }
 212 
 213 void LIR_Assembler::pop(LIR_Opr opr) {
 214   if (opr->is_single_cpu()) {
 215     __ pop_reg(opr->as_register());
 216   } else {
 217     ShouldNotReachHere();
 218   }
 219 }
 220 

 471     __ bind(*stub->continuation());
 472   }
 473 
 474   if (compilation()->env()->dtrace_method_probes()) {
 475 #ifdef _LP64
 476     __ mov(rdi, r15_thread);
 477     __ mov_metadata(rsi, method()->constant_encoding());
 478 #else
 479     __ get_thread(rax);
 480     __ movptr(Address(rsp, 0), rax);
 481     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 482 #endif
 483     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 484   }
 485 
 486   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 487     __ mov(rax, rbx);  // Restore the exception
 488   }
 489 
 490   // remove the activation and dispatch to the unwind handler
 491   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 492   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 493 
 494   // Emit the slow path assembly
 495   if (stub != NULL) {
 496     stub->emit_code(this);
 497   }
 498 
 499   return offset;
 500 }
 501 
 502 
 503 int LIR_Assembler::emit_deopt_handler() {
 504   // if the last instruction is a call (typically to do a throw which
 505   // is coming at the end after block reordering) the return address
 506   // must still point into the code area in order to avoid assertion
 507   // failures when searching for the corresponding bci => add a nop
 508   // (was bug 5/14/1999 - gri)
 509   __ nop();
 510 
 511   // generate code for exception handler

 516     return -1;
 517   }
 518 
 519   int offset = code_offset();
 520   InternalAddress here(__ pc());
 521 
 522   __ pushptr(here.addr());
 523   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 524   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 525   __ end_a_stub();
 526 
 527   return offset;
 528 }
 529 
 530 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 531   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 532   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 533     assert(result->fpu() == 0, "result must already be on TOS");
 534   }
 535 
 536   ciMethod* method = compilation()->method();
 537   if (InlineTypeReturnedAsFields && method->return_type()->is_inlinetype()) {
 538     ciInlineKlass* vk = method->return_type()->as_inline_klass();
 539     if (vk->can_be_returned_as_fields()) {
 540 #ifndef _LP64
 541       Unimplemented();
 542 #else
 543       address unpack_handler = vk->unpack_handler();
 544       assert(unpack_handler != NULL, "must be");
 545       __ call(RuntimeAddress(unpack_handler));
 546       // At this point, rax points to the value object (for interpreter or C1 caller).
 547       // The fields of the object are copied into registers (for C2 caller).
 548 #endif
 549     }
 550   }
 551 
 552   // Pop the stack before the safepoint code
 553   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 554 
 555   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 556     __ reserved_stack_check();
 557   }
 558 
 559   // Note: we do not need to round double result; float result has the right precision
 560   // the poll sets the condition code, but no data registers
 561 
 562 #ifdef _LP64
 563   const Register thread = r15_thread;
 564 #else
 565   const Register thread = rbx;
 566   __ get_thread(thread);
 567 #endif
 568   code_stub->set_safepoint_offset(__ offset());
 569   __ relocate(relocInfo::poll_return_type);
 570   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 571   __ ret(0);
 572 }
 573 
 574 
 575 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 576   return (__ store_inline_type_fields_to_buf(vk, false));
 577 }
 578 
 579 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 580   guarantee(info != NULL, "Shouldn't be NULL");
 581   int offset = __ offset();
 582 #ifdef _LP64
 583   const Register poll_addr = rscratch1;
 584   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 585 #else
 586   assert(tmp->is_cpu_register(), "needed");
 587   const Register poll_addr = tmp->as_register();
 588   __ get_thread(poll_addr);
 589   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 590 #endif
 591   add_debug_info_for_branch(info);
 592   __ relocate(relocInfo::poll_type);
 593   address pre_pc = __ pc();
 594   __ testl(rax, Address(poll_addr, 0));
 595   address post_pc = __ pc();
 596   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 597   return offset;
 598 }

 619       break;
 620     }
 621 
 622     case T_ADDRESS: {
 623       assert(patch_code == lir_patch_none, "no patching handled here");
 624       __ movptr(dest->as_register(), c->as_jint());
 625       break;
 626     }
 627 
 628     case T_LONG: {
 629       assert(patch_code == lir_patch_none, "no patching handled here");
 630 #ifdef _LP64
 631       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 632 #else
 633       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 634       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 635 #endif // _LP64
 636       break;
 637     }
 638 
 639     case T_PRIMITIVE_OBJECT: // Fall through
 640     case T_OBJECT: {
 641       if (patch_code != lir_patch_none) {
 642         jobject2reg_with_patching(dest->as_register(), info);
 643       } else {
 644         __ movoop(dest->as_register(), c->as_jobject());
 645       }
 646       break;
 647     }
 648 
 649     case T_METADATA: {
 650       if (patch_code != lir_patch_none) {
 651         klass2reg_with_patching(dest->as_register(), info);
 652       } else {
 653         __ mov_metadata(dest->as_register(), c->as_metadata());
 654       }
 655       break;
 656     }
 657 
 658     case T_FLOAT: {
 659       if (dest->is_single_xmm()) {

 710     default:
 711       ShouldNotReachHere();
 712   }
 713 }
 714 
 715 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 716   assert(src->is_constant(), "should not call otherwise");
 717   assert(dest->is_stack(), "should not call otherwise");
 718   LIR_Const* c = src->as_constant_ptr();
 719 
 720   switch (c->type()) {
 721     case T_INT:  // fall through
 722     case T_FLOAT:
 723       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 724       break;
 725 
 726     case T_ADDRESS:
 727       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 728       break;
 729 
 730     case T_PRIMITIVE_OBJECT: // Fall through
 731     case T_OBJECT:
 732       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 733       break;
 734 
 735     case T_LONG:  // fall through
 736     case T_DOUBLE:
 737 #ifdef _LP64
 738       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 739                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 740 #else
 741       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 742                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 743       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 744                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 745 #endif // _LP64
 746       break;
 747 
 748     default:
 749       ShouldNotReachHere();
 750   }
 751 }
 752 
 753 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 754   assert(src->is_constant(), "should not call otherwise");
 755   assert(dest->is_address(), "should not call otherwise");
 756   LIR_Const* c = src->as_constant_ptr();
 757   LIR_Address* addr = dest->as_address_ptr();
 758 
 759   int null_check_here = code_offset();
 760   switch (type) {
 761     case T_INT:    // fall through
 762     case T_FLOAT:
 763       __ movl(as_Address(addr), c->as_jint_bits());
 764       break;
 765 
 766     case T_ADDRESS:
 767       __ movptr(as_Address(addr), c->as_jint_bits());
 768       break;
 769 
 770     case T_PRIMITIVE_OBJECT: // fall through
 771     case T_OBJECT:  // fall through
 772     case T_ARRAY:
 773       if (c->as_jobject() == NULL) {
 774         if (UseCompressedOops && !wide) {
 775           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 776         } else {
 777 #ifdef _LP64
 778           __ xorptr(rscratch1, rscratch1);
 779           null_check_here = code_offset();
 780           __ movptr(as_Address(addr), rscratch1);
 781 #else
 782           __ movptr(as_Address(addr), NULL_WORD);
 783 #endif
 784         }
 785       } else {
 786         if (is_literal_address(addr)) {
 787           ShouldNotReachHere();
 788           __ movoop(as_Address(addr, noreg), c->as_jobject());
 789         } else {
 790 #ifdef _LP64

 839   if (info != NULL) {
 840     add_debug_info_for_null_check(null_check_here, info);
 841   }
 842 }
 843 
 844 
 845 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 846   assert(src->is_register(), "should not call otherwise");
 847   assert(dest->is_register(), "should not call otherwise");
 848 
 849   // move between cpu-registers
 850   if (dest->is_single_cpu()) {
 851 #ifdef _LP64
 852     if (src->type() == T_LONG) {
 853       // Can do LONG -> OBJECT
 854       move_regs(src->as_register_lo(), dest->as_register());
 855       return;
 856     }
 857 #endif
 858     assert(src->is_single_cpu(), "must match");
 859     if (src->type() == T_OBJECT || src->type() == T_PRIMITIVE_OBJECT) {
 860       __ verify_oop(src->as_register());
 861     }
 862     move_regs(src->as_register(), dest->as_register());
 863 
 864   } else if (dest->is_double_cpu()) {
 865 #ifdef _LP64
 866     if (is_reference_type(src->type())) {
 867       // Surprising to me but we can see move of a long to t_object
 868       __ verify_oop(src->as_register());
 869       move_regs(src->as_register(), dest->as_register_lo());
 870       return;
 871     }
 872 #endif
 873     assert(src->is_double_cpu(), "must match");
 874     Register f_lo = src->as_register_lo();
 875     Register f_hi = src->as_register_hi();
 876     Register t_lo = dest->as_register_lo();
 877     Register t_hi = dest->as_register_hi();
 878 #ifdef _LP64
 879     assert(f_hi == f_lo, "must be same");

1025       break;
1026     }
1027 
1028     case T_DOUBLE: {
1029 #ifdef _LP64
1030       assert(src->is_double_xmm(), "not a double");
1031       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1032 #else
1033       if (src->is_double_xmm()) {
1034         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1035       } else {
1036         assert(src->is_double_fpu(), "must be");
1037         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1038         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1039         else                    __ fst_d (as_Address(to_addr));
1040       }
1041 #endif // _LP64
1042       break;
1043     }
1044 
1045     case T_PRIMITIVE_OBJECT: // fall through
1046     case T_ARRAY:   // fall through
1047     case T_OBJECT:  // fall through
1048       if (UseCompressedOops && !wide) {
1049         __ movl(as_Address(to_addr), compressed_src);
1050       } else {
1051         __ movptr(as_Address(to_addr), src->as_register());
1052       }
1053       break;
1054     case T_METADATA:
1055       // We get here to store a method pointer to the stack to pass to
1056       // a dtrace runtime call. This can't work on 64 bit with
1057       // compressed klass ptrs: T_METADATA can be a compressed klass
1058       // ptr or a 64 bit method pointer.
1059       LP64_ONLY(ShouldNotReachHere());
1060       __ movptr(as_Address(to_addr), src->as_register());
1061       break;
1062     case T_ADDRESS:
1063       __ movptr(as_Address(to_addr), src->as_register());
1064       break;
1065     case T_INT:

1198     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1199     // push and pop the part at src + wordSize, adding wordSize for the previous push
1200     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1201     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1202     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1203 #endif // _LP64
1204 
1205   } else {
1206     ShouldNotReachHere();
1207   }
1208 }
1209 
1210 
1211 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1212   assert(src->is_address(), "should not call otherwise");
1213   assert(dest->is_register(), "should not call otherwise");
1214 
1215   LIR_Address* addr = src->as_address_ptr();
1216   Address from_addr = as_Address(addr);
1217 
1218   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_PRIMITIVE_OBJECT) {
1219     __ verify_oop(addr->base()->as_pointer_register());
1220   }
1221 
1222   switch (type) {
1223     case T_BOOLEAN: // fall through
1224     case T_BYTE:    // fall through
1225     case T_CHAR:    // fall through
1226     case T_SHORT:
1227       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1228         // on pre P6 processors we may get partial register stalls
1229         // so blow away the value of to_rinfo before loading a
1230         // partial word into it.  Do it here so that it precedes
1231         // the potential patch point below.
1232         __ xorptr(dest->as_register(), dest->as_register());
1233       }
1234       break;
1235    default:
1236      break;
1237   }
1238 

1259 #endif // !LP64
1260       }
1261       break;
1262     }
1263 
1264     case T_DOUBLE: {
1265       if (dest->is_double_xmm()) {
1266         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1267       } else {
1268 #ifndef _LP64
1269         assert(dest->is_double_fpu(), "must be");
1270         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1271         __ fld_d(from_addr);
1272 #else
1273         ShouldNotReachHere();
1274 #endif // !LP64
1275       }
1276       break;
1277     }
1278 
1279     case T_PRIMITIVE_OBJECT: // fall through
1280     case T_OBJECT:  // fall through
1281     case T_ARRAY:   // fall through
1282       if (UseCompressedOops && !wide) {
1283         __ movl(dest->as_register(), from_addr);
1284       } else {
1285         __ movptr(dest->as_register(), from_addr);
1286       }
1287       break;
1288 
1289     case T_ADDRESS:
1290       __ movptr(dest->as_register(), from_addr);
1291       break;
1292     case T_INT:
1293       __ movl(dest->as_register(), from_addr);
1294       break;
1295 
1296     case T_LONG: {
1297       Register to_lo = dest->as_register_lo();
1298       Register to_hi = dest->as_register_hi();
1299 #ifdef _LP64

1636     add_debug_info_for_null_check_here(op->stub()->info());
1637     __ cmpb(Address(op->klass()->as_register(),
1638                     InstanceKlass::init_state_offset()),
1639                     InstanceKlass::fully_initialized);
1640     __ jcc(Assembler::notEqual, *op->stub()->entry());
1641   }
1642   __ allocate_object(op->obj()->as_register(),
1643                      op->tmp1()->as_register(),
1644                      op->tmp2()->as_register(),
1645                      op->header_size(),
1646                      op->object_size(),
1647                      op->klass()->as_register(),
1648                      *op->stub()->entry());
1649   __ bind(*op->stub()->continuation());
1650 }
1651 
1652 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1653   Register len =  op->len()->as_register();
1654   LP64_ONLY( __ movslq(len, len); )
1655 
1656   if (UseSlowPath || op->type() == T_PRIMITIVE_OBJECT ||
1657       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1658       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1659     __ jmp(*op->stub()->entry());
1660   } else {
1661     Register tmp1 = op->tmp1()->as_register();
1662     Register tmp2 = op->tmp2()->as_register();
1663     Register tmp3 = op->tmp3()->as_register();
1664     if (len == tmp1) {
1665       tmp1 = tmp3;
1666     } else if (len == tmp2) {
1667       tmp2 = tmp3;
1668     } else if (len == tmp3) {
1669       // everything is ok
1670     } else {
1671       __ mov(tmp3, len);
1672     }
1673     __ allocate_array(op->obj()->as_register(),
1674                       len,
1675                       tmp1,
1676                       tmp2,

1735     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1736   }
1737   Label profile_cast_success, profile_cast_failure;
1738   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1739   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1740 
1741   if (obj == k_RInfo) {
1742     k_RInfo = dst;
1743   } else if (obj == klass_RInfo) {
1744     klass_RInfo = dst;
1745   }
1746   if (k->is_loaded() && !UseCompressedClassPointers) {
1747     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1748   } else {
1749     Rtmp1 = op->tmp3()->as_register();
1750     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1751   }
1752 
1753   assert_different_registers(obj, k_RInfo, klass_RInfo);
1754 
1755   if (op->need_null_check()) {
1756     __ cmpptr(obj, (int32_t)NULL_WORD);
1757     if (op->should_profile()) {
1758       Label not_null;
1759       __ jccb(Assembler::notEqual, not_null);
1760       // Object is null; update MDO and exit
1761       Register mdo  = klass_RInfo;
1762       __ mov_metadata(mdo, md->constant_encoding());
1763       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1764       int header_bits = BitData::null_seen_byte_constant();
1765       __ orb(data_addr, header_bits);
1766       __ jmp(*obj_is_null);
1767       __ bind(not_null);
1768     } else {
1769       __ jcc(Assembler::equal, *obj_is_null);
1770     }
1771   }
1772 
1773   if (!k->is_loaded()) {
1774     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1775   } else {
1776 #ifdef _LP64
1777     __ mov_metadata(k_RInfo, k->constant_encoding());
1778 #endif // _LP64
1779   }
1780   __ verify_oop(obj);
1781 
1782   if (op->fast_check()) {
1783     // get object class
1784     // not a safepoint as obj null check happens earlier
1785 #ifdef _LP64
1786     if (UseCompressedClassPointers) {
1787       __ load_klass(Rtmp1, obj, tmp_load_klass);
1788       __ cmpptr(k_RInfo, Rtmp1);
1789     } else {
1790       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1962         __ mov(dst, obj);
1963       }
1964     } else
1965       if (code == lir_instanceof) {
1966         Register obj = op->object()->as_register();
1967         Register dst = op->result_opr()->as_register();
1968         Label success, failure, done;
1969         emit_typecheck_helper(op, &success, &failure, &failure);
1970         __ bind(failure);
1971         __ xorptr(dst, dst);
1972         __ jmpb(done);
1973         __ bind(success);
1974         __ movptr(dst, 1);
1975         __ bind(done);
1976       } else {
1977         ShouldNotReachHere();
1978       }
1979 
1980 }
1981 
1982 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1983   // We are loading/storing from/to an array that *may* be flattened (the
1984   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1985   // If this array is flattened, take the slow path.
1986   Register klass = op->tmp()->as_register();
1987   if (UseArrayMarkWordCheck) {
1988     __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1989   } else {
1990     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1991     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
1992     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
1993     __ testl(klass, Klass::_lh_array_tag_flat_value_bit_inplace);
1994     __ jcc(Assembler::notZero, *op->stub()->entry());
1995   }
1996   if (!op->value()->is_illegal()) {
1997     // The array is not flattened, but it might be null-free. If we are storing
1998     // a null into a null-free array, take the slow path (which will throw NPE).
1999     Label skip;
2000     __ cmpptr(op->value()->as_register(), (int32_t)NULL_WORD);
2001     __ jcc(Assembler::notEqual, skip);
2002     if (UseArrayMarkWordCheck) {
2003       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
2004     } else {
2005       __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
2006       __ jcc(Assembler::notZero, *op->stub()->entry());
2007     }
2008     __ bind(skip);
2009   }
2010 }
2011 
2012 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
2013   // We are storing into an array that *may* be null-free (the declared type is
2014   // Object[], abstract[], interface[] or VT.ref[]).
2015   if (UseArrayMarkWordCheck) {
2016     Label test_mark_word;
2017     Register tmp = op->tmp()->as_register();
2018     __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
2019     __ testl(tmp, markWord::unlocked_value);
2020     __ jccb(Assembler::notZero, test_mark_word);
2021     __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
2022     __ bind(test_mark_word);
2023     __ testl(tmp, markWord::null_free_array_bit_in_place);
2024   } else {
2025     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
2026     Register klass = op->tmp()->as_register();
2027     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
2028     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
2029     __ testl(klass, Klass::_lh_null_free_array_bit_inplace);
2030   }
2031 }
2032 
2033 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
2034   Label L_oops_equal;
2035   Label L_oops_not_equal;
2036   Label L_end;
2037 
2038   Register left  = op->left()->as_register();
2039   Register right = op->right()->as_register();
2040 
2041   __ cmpptr(left, right);
2042   __ jcc(Assembler::equal, L_oops_equal);
2043 
2044   // (1) Null check -- if one of the operands is null, the other must not be null (because
2045   //     the two references are not equal), so they are not substitutable,
2046   //     FIXME: do null check only if the operand is nullable
2047   __ testptr(left, right);
2048   __ jcc(Assembler::zero, L_oops_not_equal);
2049 
2050   ciKlass* left_klass = op->left_klass();
2051   ciKlass* right_klass = op->right_klass();
2052 
2053   // (2) Inline type check -- if either of the operands is not a inline type,
2054   //     they are not substitutable. We do this only if we are not sure that the
2055   //     operands are inline type
2056   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
2057       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
2058     Register tmp1  = op->tmp1()->as_register();
2059     __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2060     __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2061     __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2062     __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2063     __ jcc(Assembler::notEqual, L_oops_not_equal);
2064   }
2065 
2066   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2067   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
2068     // No need to load klass -- the operands are statically known to be the same inline klass.
2069     __ jmp(*op->stub()->entry());
2070   } else {
2071     Register left_klass_op = op->left_klass_op()->as_register();
2072     Register right_klass_op = op->right_klass_op()->as_register();
2073 
2074     if (UseCompressedClassPointers) {
2075       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2076       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2077       __ cmpl(left_klass_op, right_klass_op);
2078     } else {
2079       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2080       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2081       __ cmpptr(left_klass_op, right_klass_op);
2082     }
2083 
2084     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2085     // fall through to L_oops_not_equal
2086   }
2087 
2088   __ bind(L_oops_not_equal);
2089   move(op->not_equal_result(), op->result_opr());
2090   __ jmp(L_end);
2091 
2092   __ bind(L_oops_equal);
2093   move(op->equal_result(), op->result_opr());
2094   __ jmp(L_end);
2095 
2096   // We've returned from the stub. RAX contains 0x0 IFF the two
2097   // operands are not substitutable. (Don't compare against 0x1 in case the
2098   // C compiler is naughty)
2099   __ bind(*op->stub()->continuation());
2100   __ cmpl(rax, 0);
2101   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2102   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2103   // fall-through
2104   __ bind(L_end);
2105 }
2106 
2107 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2108   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
2109     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2110     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2111     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2112     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2113     Register addr = op->addr()->as_register();
2114     __ lock();
2115     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2116 
2117   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2118     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2119     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2120     Register newval = op->new_value()->as_register();
2121     Register cmpval = op->cmp_value()->as_register();
2122     assert(cmpval == rax, "wrong register");
2123     assert(newval != NULL, "new val must be register");
2124     assert(cmpval != newval, "cmp and new values must be in different registers");
2125     assert(cmpval != addr, "cmp and addr must be in different registers");

2146       __ cmpxchgl(newval, Address(addr, 0));
2147     }
2148 #ifdef _LP64
2149   } else if (op->code() == lir_cas_long) {
2150     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2151     Register newval = op->new_value()->as_register_lo();
2152     Register cmpval = op->cmp_value()->as_register_lo();
2153     assert(cmpval == rax, "wrong register");
2154     assert(newval != NULL, "new val must be register");
2155     assert(cmpval != newval, "cmp and new values must be in different registers");
2156     assert(cmpval != addr, "cmp and addr must be in different registers");
2157     assert(newval != addr, "new value and addr must be in different registers");
2158     __ lock();
2159     __ cmpxchgq(newval, Address(addr, 0));
2160 #endif // _LP64
2161   } else {
2162     Unimplemented();
2163   }
2164 }
2165 
2166 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2167   assert(dst->is_cpu_register(), "must be");
2168   assert(dst->type() == src->type(), "must be");
2169 
2170   if (src->is_cpu_register()) {
2171     reg2reg(src, dst);
2172   } else if (src->is_stack()) {
2173     stack2reg(src, dst, dst->type());
2174   } else if (src->is_constant()) {
2175     const2reg(src, dst, lir_patch_none, NULL);
2176   } else {
2177     ShouldNotReachHere();
2178   }
2179 }
2180 
2181 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2182   Assembler::Condition acond, ncond;
2183   switch (condition) {
2184     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2185     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2186     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2187     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2188     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2189     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2190     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2191     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2192     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2193                                 ShouldNotReachHere();
2194   }
2195 
2196   if (opr1->is_cpu_register()) {
2197     reg2reg(opr1, result);
2198   } else if (opr1->is_stack()) {
2199     stack2reg(opr1, result, result->type());
2200   } else if (opr1->is_constant()) {

3025   int offset = __ offset();
3026   switch (code) {
3027   case lir_static_call:
3028   case lir_optvirtual_call:
3029   case lir_dynamic_call:
3030     offset += NativeCall::displacement_offset;
3031     break;
3032   case lir_icvirtual_call:
3033     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
3034     break;
3035   default: ShouldNotReachHere();
3036   }
3037   __ align(BytesPerWord, offset);
3038 }
3039 
3040 
3041 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
3042   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
3043          "must be aligned");
3044   __ call(AddressLiteral(op->addr(), rtype));
3045   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3046 }
3047 
3048 
3049 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3050   __ ic_call(op->addr());
3051   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3052   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3053          "must be aligned");
3054 }
3055 
3056 
3057 void LIR_Assembler::emit_static_call_stub() {
3058   address call_pc = __ pc();
3059   address stub = __ start_a_stub(call_stub_size());
3060   if (stub == NULL) {
3061     bailout("static call stub overflow");
3062     return;
3063   }
3064 
3065   int start = __ offset();
3066 
3067   // make sure that the displacement word of the call ends up word aligned
3068   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
3069   __ relocate(static_stub_Relocation::spec(call_pc));
3070   __ mov_metadata(rbx, (Metadata*)NULL);
3071   // must be set to -1 at code generation time

3211   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3212 }
3213 
3214 
3215 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3216   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3217   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3218   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3219   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3220 }
3221 
3222 
3223 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3224   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3225   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3226   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3227   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3228 }
3229 
3230 
3231 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3232   if (null_check) {
3233     __ testptr(obj, obj);
3234     __ jcc(Assembler::zero, *slow_path->entry());
3235   }
3236   if (UseArrayMarkWordCheck) {
3237     if (is_dest) {
3238       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
3239     } else {
3240       __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
3241     }
3242   } else {
3243     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3244     __ load_klass(tmp, obj, tmp_load_klass);
3245     __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
3246     if (is_dest) {
3247       // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
3248       __ testl(tmp, Klass::_lh_null_free_array_bit_inplace);
3249     } else {
3250       __ testl(tmp, Klass::_lh_array_tag_flat_value_bit_inplace);
3251     }
3252     __ jcc(Assembler::notZero, *slow_path->entry());
3253   }
3254 }
3255 
3256 
3257 // This code replaces a call to arraycopy; no exception may
3258 // be thrown in this code, they must be thrown in the System.arraycopy
3259 // activation frame; we could save some checks if this would not be the case
3260 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3261   ciArrayKlass* default_type = op->expected_type();
3262   Register src = op->src()->as_register();
3263   Register dst = op->dst()->as_register();
3264   Register src_pos = op->src_pos()->as_register();
3265   Register dst_pos = op->dst_pos()->as_register();
3266   Register length  = op->length()->as_register();
3267   Register tmp = op->tmp()->as_register();
3268   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3269 
3270   CodeStub* stub = op->stub();
3271   int flags = op->flags();
3272   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3273   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3274 
3275   if (flags & LIR_OpArrayCopy::always_slow_path) {
3276     __ jmp(*stub->entry());
3277     __ bind(*stub->continuation());
3278     return;
3279   }
3280 
3281   // if we don't know anything, just go through the generic arraycopy
3282   if (default_type == NULL) {
3283     // save outgoing arguments on stack in case call to System.arraycopy is needed
3284     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3285     // for interpreter calling conventions. Now we have to do it in new style conventions.
3286     // For the moment until C1 gets the new register allocator I just force all the
3287     // args to the right place (except the register args) and then on the back side
3288     // reload the register args properly if we go slow path. Yuck
3289 
3290     // These are proper for the calling convention
3291     store_parameter(length, 2);
3292     store_parameter(dst_pos, 1);
3293     store_parameter(dst, 0);
3294 
3295     // these are just temporary placements until we need to reload
3296     store_parameter(src_pos, 3);
3297     store_parameter(src, 4);
3298     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3299 
3300     address copyfunc_addr = StubRoutines::generic_arraycopy();

3354     __ mov(tmp, rax);
3355     __ xorl(tmp, -1);
3356 
3357     // Reload values from the stack so they are where the stub
3358     // expects them.
3359     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3360     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3361     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3362     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3363     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3364 
3365     __ subl(length, tmp);
3366     __ addl(src_pos, tmp);
3367     __ addl(dst_pos, tmp);
3368     __ jmp(*stub->entry());
3369 
3370     __ bind(*stub->continuation());
3371     return;
3372   }
3373 
3374   // Handle inline type arrays
3375   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
3376     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3377   }
3378   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
3379     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3380   }
3381 
3382   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3383 
3384   int elem_size = type2aelembytes(basic_type);
3385   Address::ScaleFactor scale;
3386 
3387   switch (elem_size) {
3388     case 1 :
3389       scale = Address::times_1;
3390       break;
3391     case 2 :
3392       scale = Address::times_2;
3393       break;
3394     case 4 :
3395       scale = Address::times_4;
3396       break;
3397     case 8 :
3398       scale = Address::times_8;
3399       break;
3400     default:
3401       scale = Address::no_scale;

3958         __ jccb(Assembler::zero, next);
3959 #endif
3960         // first time here. Set profile type.
3961         __ movptr(mdo_addr, tmp);
3962       } else {
3963         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3964                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3965 
3966         __ movptr(tmp, mdo_addr);
3967         __ testptr(tmp, TypeEntries::type_unknown);
3968         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3969 
3970         __ orptr(mdo_addr, TypeEntries::type_unknown);
3971       }
3972     }
3973 
3974     __ bind(next);
3975   }
3976 }
3977 
3978 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3979   Register obj = op->obj()->as_register();
3980   Register tmp = op->tmp()->as_pointer_register();
3981   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3982   bool not_null = op->not_null();
3983   int flag = op->flag();
3984 
3985   Label not_inline_type;
3986   if (!not_null) {
3987     __ testptr(obj, obj);
3988     __ jccb(Assembler::zero, not_inline_type);
3989   }
3990 
3991   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3992 
3993   __ orb(mdo_addr, flag);
3994 
3995   __ bind(not_inline_type);
3996 }
3997 
3998 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3999   Unimplemented();
4000 }
4001 
4002 
4003 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
4004   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
4005 }
4006 
4007 
4008 void LIR_Assembler::align_backward_branch_target() {
4009   __ align(BytesPerWord);
4010 }
4011 
4012 
4013 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
4014   if (left->is_single_cpu()) {
4015     __ negl(left->as_register());
4016     move_regs(left->as_register(), dest->as_register());
4017 

4238 }
4239 
4240 void LIR_Assembler::membar_storeload() {
4241   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4242 }
4243 
4244 void LIR_Assembler::on_spin_wait() {
4245   __ pause ();
4246 }
4247 
4248 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4249   assert(result_reg->is_register(), "check");
4250 #ifdef _LP64
4251   // __ get_thread(result_reg->as_register_lo());
4252   __ mov(result_reg->as_register(), r15_thread);
4253 #else
4254   __ get_thread(result_reg->as_register());
4255 #endif // _LP64
4256 }
4257 
4258 void LIR_Assembler::check_orig_pc() {
4259   __ cmpptr(frame_map()->address_for_orig_pc_addr(), (int32_t)NULL_WORD);
4260 }
4261 
4262 void LIR_Assembler::peephole(LIR_List*) {
4263   // do nothing for now
4264 }
4265 
4266 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4267   assert(data == dest, "xchg/xadd uses only 2 operands");
4268 
4269   if (data->type() == T_INT) {
4270     if (code == lir_xadd) {
4271       __ lock();
4272       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4273     } else {
4274       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4275     }
4276   } else if (data->is_oop()) {
4277     assert (code == lir_xchg, "xadd for oops");
4278     Register obj = data->as_register();
4279 #ifdef _LP64
4280     if (UseCompressedOops) {
< prev index next >