< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"

  35 #include "ci/ciInstance.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/gc_globals.hpp"
  39 #include "nativeInst_x86.hpp"

  40 #include "oops/objArrayKlass.hpp"
  41 #include "runtime/frame.inline.hpp"
  42 #include "runtime/safepointMechanism.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 #include "vmreg_x86.inline.hpp"
  47 
  48 
  49 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  50 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  51 // fast versions of NegF/NegD and AbsF/AbsD.
  52 
  53 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  54 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  55   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  56   // of 128-bits operands for SSE instructions.
  57   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  58   // Store the value to a 128-bits operand.
  59   operand[0] = lo;

 178 
 179 void LIR_Assembler::ffree(int i) {
 180   __ ffree(i);
 181 }
 182 #endif // !_LP64
 183 
 184 void LIR_Assembler::breakpoint() {
 185   __ int3();
 186 }
 187 
 188 void LIR_Assembler::push(LIR_Opr opr) {
 189   if (opr->is_single_cpu()) {
 190     __ push_reg(opr->as_register());
 191   } else if (opr->is_double_cpu()) {
 192     NOT_LP64(__ push_reg(opr->as_register_hi()));
 193     __ push_reg(opr->as_register_lo());
 194   } else if (opr->is_stack()) {
 195     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 196   } else if (opr->is_constant()) {
 197     LIR_Const* const_opr = opr->as_constant_ptr();
 198     if (const_opr->type() == T_OBJECT) {
 199       __ push_oop(const_opr->as_jobject());
 200     } else if (const_opr->type() == T_INT) {
 201       __ push_jint(const_opr->as_jint());
 202     } else {
 203       ShouldNotReachHere();
 204     }
 205 
 206   } else {
 207     ShouldNotReachHere();
 208   }
 209 }
 210 
 211 void LIR_Assembler::pop(LIR_Opr opr) {
 212   if (opr->is_single_cpu()) {
 213     __ pop_reg(opr->as_register());
 214   } else {
 215     ShouldNotReachHere();
 216   }
 217 }
 218 

 465     __ bind(*stub->continuation());
 466   }
 467 
 468   if (compilation()->env()->dtrace_method_probes()) {
 469 #ifdef _LP64
 470     __ mov(rdi, r15_thread);
 471     __ mov_metadata(rsi, method()->constant_encoding());
 472 #else
 473     __ get_thread(rax);
 474     __ movptr(Address(rsp, 0), rax);
 475     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 476 #endif
 477     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 478   }
 479 
 480   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 481     __ mov(rax, rbx);  // Restore the exception
 482   }
 483 
 484   // remove the activation and dispatch to the unwind handler
 485   __ remove_frame(initial_frame_size_in_bytes());
 486   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 487 
 488   // Emit the slow path assembly
 489   if (stub != NULL) {
 490     stub->emit_code(this);
 491   }
 492 
 493   return offset;
 494 }
 495 
 496 
 497 int LIR_Assembler::emit_deopt_handler() {
 498   // if the last instruction is a call (typically to do a throw which
 499   // is coming at the end after block reordering) the return address
 500   // must still point into the code area in order to avoid assertion
 501   // failures when searching for the corresponding bci => add a nop
 502   // (was bug 5/14/1999 - gri)
 503   __ nop();
 504 
 505   // generate code for exception handler

 510     return -1;
 511   }
 512 
 513   int offset = code_offset();
 514   InternalAddress here(__ pc());
 515 
 516   __ pushptr(here.addr());
 517   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 518   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 519   __ end_a_stub();
 520 
 521   return offset;
 522 }
 523 
 524 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 525   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 526   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 527     assert(result->fpu() == 0, "result must already be on TOS");
 528   }
 529 
















 530   // Pop the stack before the safepoint code
 531   __ remove_frame(initial_frame_size_in_bytes());
 532 
 533   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 534     __ reserved_stack_check();
 535   }
 536 
 537   // Note: we do not need to round double result; float result has the right precision
 538   // the poll sets the condition code, but no data registers
 539 
 540 #ifdef _LP64
 541   const Register thread = r15_thread;
 542 #else
 543   const Register thread = rbx;
 544   __ get_thread(thread);
 545 #endif
 546   code_stub->set_safepoint_offset(__ offset());
 547   __ relocate(relocInfo::poll_return_type);
 548   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 549   __ ret(0);
 550 }
 551 
 552 




 553 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 554   guarantee(info != NULL, "Shouldn't be NULL");
 555   int offset = __ offset();
 556 #ifdef _LP64
 557   const Register poll_addr = rscratch1;
 558   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 559 #else
 560   assert(tmp->is_cpu_register(), "needed");
 561   const Register poll_addr = tmp->as_register();
 562   __ get_thread(poll_addr);
 563   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 564 #endif
 565   add_debug_info_for_branch(info);
 566   __ relocate(relocInfo::poll_type);
 567   address pre_pc = __ pc();
 568   __ testl(rax, Address(poll_addr, 0));
 569   address post_pc = __ pc();
 570   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 571   return offset;
 572 }

 593       break;
 594     }
 595 
 596     case T_ADDRESS: {
 597       assert(patch_code == lir_patch_none, "no patching handled here");
 598       __ movptr(dest->as_register(), c->as_jint());
 599       break;
 600     }
 601 
 602     case T_LONG: {
 603       assert(patch_code == lir_patch_none, "no patching handled here");
 604 #ifdef _LP64
 605       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 606 #else
 607       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 608       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 609 #endif // _LP64
 610       break;
 611     }
 612 

 613     case T_OBJECT: {
 614       if (patch_code != lir_patch_none) {
 615         jobject2reg_with_patching(dest->as_register(), info);
 616       } else {
 617         __ movoop(dest->as_register(), c->as_jobject());
 618       }
 619       break;
 620     }
 621 
 622     case T_METADATA: {
 623       if (patch_code != lir_patch_none) {
 624         klass2reg_with_patching(dest->as_register(), info);
 625       } else {
 626         __ mov_metadata(dest->as_register(), c->as_metadata());
 627       }
 628       break;
 629     }
 630 
 631     case T_FLOAT: {
 632       if (dest->is_single_xmm()) {

 683     default:
 684       ShouldNotReachHere();
 685   }
 686 }
 687 
 688 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 689   assert(src->is_constant(), "should not call otherwise");
 690   assert(dest->is_stack(), "should not call otherwise");
 691   LIR_Const* c = src->as_constant_ptr();
 692 
 693   switch (c->type()) {
 694     case T_INT:  // fall through
 695     case T_FLOAT:
 696       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 697       break;
 698 
 699     case T_ADDRESS:
 700       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 701       break;
 702 

 703     case T_OBJECT:
 704       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 705       break;
 706 
 707     case T_LONG:  // fall through
 708     case T_DOUBLE:
 709 #ifdef _LP64
 710       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 711                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 712 #else
 713       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 714                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 715       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 716                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 717 #endif // _LP64
 718       break;
 719 
 720     default:
 721       ShouldNotReachHere();
 722   }
 723 }
 724 
 725 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 726   assert(src->is_constant(), "should not call otherwise");
 727   assert(dest->is_address(), "should not call otherwise");
 728   LIR_Const* c = src->as_constant_ptr();
 729   LIR_Address* addr = dest->as_address_ptr();
 730 
 731   int null_check_here = code_offset();
 732   switch (type) {
 733     case T_INT:    // fall through
 734     case T_FLOAT:
 735       __ movl(as_Address(addr), c->as_jint_bits());
 736       break;
 737 
 738     case T_ADDRESS:
 739       __ movptr(as_Address(addr), c->as_jint_bits());
 740       break;
 741 

 742     case T_OBJECT:  // fall through
 743     case T_ARRAY:
 744       if (c->as_jobject() == NULL) {
 745         if (UseCompressedOops && !wide) {
 746           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 747         } else {
 748 #ifdef _LP64
 749           __ xorptr(rscratch1, rscratch1);
 750           null_check_here = code_offset();
 751           __ movptr(as_Address(addr), rscratch1);
 752 #else
 753           __ movptr(as_Address(addr), NULL_WORD);
 754 #endif
 755         }
 756       } else {
 757         if (is_literal_address(addr)) {
 758           ShouldNotReachHere();
 759           __ movoop(as_Address(addr, noreg), c->as_jobject());
 760         } else {
 761 #ifdef _LP64

 810   if (info != NULL) {
 811     add_debug_info_for_null_check(null_check_here, info);
 812   }
 813 }
 814 
 815 
 816 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 817   assert(src->is_register(), "should not call otherwise");
 818   assert(dest->is_register(), "should not call otherwise");
 819 
 820   // move between cpu-registers
 821   if (dest->is_single_cpu()) {
 822 #ifdef _LP64
 823     if (src->type() == T_LONG) {
 824       // Can do LONG -> OBJECT
 825       move_regs(src->as_register_lo(), dest->as_register());
 826       return;
 827     }
 828 #endif
 829     assert(src->is_single_cpu(), "must match");
 830     if (src->type() == T_OBJECT) {
 831       __ verify_oop(src->as_register());
 832     }
 833     move_regs(src->as_register(), dest->as_register());
 834 
 835   } else if (dest->is_double_cpu()) {
 836 #ifdef _LP64
 837     if (is_reference_type(src->type())) {
 838       // Surprising to me but we can see move of a long to t_object
 839       __ verify_oop(src->as_register());
 840       move_regs(src->as_register(), dest->as_register_lo());
 841       return;
 842     }
 843 #endif
 844     assert(src->is_double_cpu(), "must match");
 845     Register f_lo = src->as_register_lo();
 846     Register f_hi = src->as_register_hi();
 847     Register t_lo = dest->as_register_lo();
 848     Register t_hi = dest->as_register_hi();
 849 #ifdef _LP64
 850     assert(f_hi == f_lo, "must be same");

 996       break;
 997     }
 998 
 999     case T_DOUBLE: {
1000 #ifdef _LP64
1001       assert(src->is_double_xmm(), "not a double");
1002       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1003 #else
1004       if (src->is_double_xmm()) {
1005         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1006       } else {
1007         assert(src->is_double_fpu(), "must be");
1008         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1009         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1010         else                    __ fst_d (as_Address(to_addr));
1011       }
1012 #endif // _LP64
1013       break;
1014     }
1015 

1016     case T_ARRAY:   // fall through
1017     case T_OBJECT:  // fall through
1018       if (UseCompressedOops && !wide) {
1019         __ movl(as_Address(to_addr), compressed_src);
1020       } else {
1021         __ movptr(as_Address(to_addr), src->as_register());
1022       }
1023       break;
1024     case T_METADATA:
1025       // We get here to store a method pointer to the stack to pass to
1026       // a dtrace runtime call. This can't work on 64 bit with
1027       // compressed klass ptrs: T_METADATA can be a compressed klass
1028       // ptr or a 64 bit method pointer.
1029       LP64_ONLY(ShouldNotReachHere());
1030       __ movptr(as_Address(to_addr), src->as_register());
1031       break;
1032     case T_ADDRESS:
1033       __ movptr(as_Address(to_addr), src->as_register());
1034       break;
1035     case T_INT:

1169     // push and pop the part at src + wordSize, adding wordSize for the previous push
1170     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1171     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1172     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1173 #endif // _LP64
1174 
1175   } else {
1176     ShouldNotReachHere();
1177   }
1178 }
1179 
1180 
1181 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1182   assert(src->is_address(), "should not call otherwise");
1183   assert(dest->is_register(), "should not call otherwise");
1184 
1185   LIR_Address* addr = src->as_address_ptr();
1186   Address from_addr = as_Address(addr);
1187   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1188 
1189   if (addr->base()->type() == T_OBJECT) {
1190     __ verify_oop(addr->base()->as_pointer_register());
1191   }
1192 
1193   switch (type) {
1194     case T_BOOLEAN: // fall through
1195     case T_BYTE:    // fall through
1196     case T_CHAR:    // fall through
1197     case T_SHORT:
1198       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1199         // on pre P6 processors we may get partial register stalls
1200         // so blow away the value of to_rinfo before loading a
1201         // partial word into it.  Do it here so that it precedes
1202         // the potential patch point below.
1203         __ xorptr(dest->as_register(), dest->as_register());
1204       }
1205       break;
1206    default:
1207      break;
1208   }
1209 

1230 #endif // !LP64
1231       }
1232       break;
1233     }
1234 
1235     case T_DOUBLE: {
1236       if (dest->is_double_xmm()) {
1237         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1238       } else {
1239 #ifndef _LP64
1240         assert(dest->is_double_fpu(), "must be");
1241         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1242         __ fld_d(from_addr);
1243 #else
1244         ShouldNotReachHere();
1245 #endif // !LP64
1246       }
1247       break;
1248     }
1249 

1250     case T_OBJECT:  // fall through
1251     case T_ARRAY:   // fall through
1252       if (UseCompressedOops && !wide) {
1253         __ movl(dest->as_register(), from_addr);
1254       } else {
1255         __ movptr(dest->as_register(), from_addr);
1256       }
1257       break;
1258 
1259     case T_ADDRESS:
1260       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1261         __ movl(dest->as_register(), from_addr);
1262       } else {
1263         __ movptr(dest->as_register(), from_addr);
1264       }
1265       break;
1266     case T_INT:
1267       __ movl(dest->as_register(), from_addr);
1268       break;
1269 

1616     add_debug_info_for_null_check_here(op->stub()->info());
1617     __ cmpb(Address(op->klass()->as_register(),
1618                     InstanceKlass::init_state_offset()),
1619                     InstanceKlass::fully_initialized);
1620     __ jcc(Assembler::notEqual, *op->stub()->entry());
1621   }
1622   __ allocate_object(op->obj()->as_register(),
1623                      op->tmp1()->as_register(),
1624                      op->tmp2()->as_register(),
1625                      op->header_size(),
1626                      op->object_size(),
1627                      op->klass()->as_register(),
1628                      *op->stub()->entry());
1629   __ bind(*op->stub()->continuation());
1630 }
1631 
1632 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1633   Register len =  op->len()->as_register();
1634   LP64_ONLY( __ movslq(len, len); )
1635 
1636   if (UseSlowPath ||
1637       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1638       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1639     __ jmp(*op->stub()->entry());
1640   } else {
1641     Register tmp1 = op->tmp1()->as_register();
1642     Register tmp2 = op->tmp2()->as_register();
1643     Register tmp3 = op->tmp3()->as_register();
1644     if (len == tmp1) {
1645       tmp1 = tmp3;
1646     } else if (len == tmp2) {
1647       tmp2 = tmp3;
1648     } else if (len == tmp3) {
1649       // everything is ok
1650     } else {
1651       __ mov(tmp3, len);
1652     }
1653     __ allocate_array(op->obj()->as_register(),
1654                       len,
1655                       tmp1,
1656                       tmp2,

1715     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1716   }
1717   Label profile_cast_success, profile_cast_failure;
1718   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1719   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1720 
1721   if (obj == k_RInfo) {
1722     k_RInfo = dst;
1723   } else if (obj == klass_RInfo) {
1724     klass_RInfo = dst;
1725   }
1726   if (k->is_loaded() && !UseCompressedClassPointers) {
1727     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1728   } else {
1729     Rtmp1 = op->tmp3()->as_register();
1730     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1731   }
1732 
1733   assert_different_registers(obj, k_RInfo, klass_RInfo);
1734 
1735   __ cmpptr(obj, (int32_t)NULL_WORD);
1736   if (op->should_profile()) {
1737     Label not_null;
1738     __ jccb(Assembler::notEqual, not_null);
1739     // Object is null; update MDO and exit
1740     Register mdo  = klass_RInfo;
1741     __ mov_metadata(mdo, md->constant_encoding());
1742     Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1743     int header_bits = BitData::null_seen_byte_constant();
1744     __ orb(data_addr, header_bits);
1745     __ jmp(*obj_is_null);
1746     __ bind(not_null);
1747   } else {
1748     __ jcc(Assembler::equal, *obj_is_null);


1749   }
1750 
1751   if (!k->is_loaded()) {
1752     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1753   } else {
1754 #ifdef _LP64
1755     __ mov_metadata(k_RInfo, k->constant_encoding());
1756 #endif // _LP64
1757   }
1758   __ verify_oop(obj);
1759 
1760   if (op->fast_check()) {
1761     // get object class
1762     // not a safepoint as obj null check happens earlier
1763 #ifdef _LP64
1764     if (UseCompressedClassPointers) {
1765       __ load_klass(Rtmp1, obj, tmp_load_klass);
1766       __ cmpptr(k_RInfo, Rtmp1);
1767     } else {
1768       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1940         __ mov(dst, obj);
1941       }
1942     } else
1943       if (code == lir_instanceof) {
1944         Register obj = op->object()->as_register();
1945         Register dst = op->result_opr()->as_register();
1946         Label success, failure, done;
1947         emit_typecheck_helper(op, &success, &failure, &failure);
1948         __ bind(failure);
1949         __ xorptr(dst, dst);
1950         __ jmpb(done);
1951         __ bind(success);
1952         __ movptr(dst, 1);
1953         __ bind(done);
1954       } else {
1955         ShouldNotReachHere();
1956       }
1957 
1958 }
1959 




























































































































1960 
1961 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1962   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1963     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1964     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1965     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1966     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1967     Register addr = op->addr()->as_register();
1968     __ lock();
1969     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1970 
1971   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1972     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1973     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1974     Register newval = op->new_value()->as_register();
1975     Register cmpval = op->cmp_value()->as_register();
1976     assert(cmpval == rax, "wrong register");
1977     assert(newval != NULL, "new val must be register");
1978     assert(cmpval != newval, "cmp and new values must be in different registers");
1979     assert(cmpval != addr, "cmp and addr must be in different registers");

2000       __ cmpxchgl(newval, Address(addr, 0));
2001     }
2002 #ifdef _LP64
2003   } else if (op->code() == lir_cas_long) {
2004     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2005     Register newval = op->new_value()->as_register_lo();
2006     Register cmpval = op->cmp_value()->as_register_lo();
2007     assert(cmpval == rax, "wrong register");
2008     assert(newval != NULL, "new val must be register");
2009     assert(cmpval != newval, "cmp and new values must be in different registers");
2010     assert(cmpval != addr, "cmp and addr must be in different registers");
2011     assert(newval != addr, "new value and addr must be in different registers");
2012     __ lock();
2013     __ cmpxchgq(newval, Address(addr, 0));
2014 #endif // _LP64
2015   } else {
2016     Unimplemented();
2017   }
2018 }
2019 















2020 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2021   Assembler::Condition acond, ncond;
2022   switch (condition) {
2023     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2024     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2025     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2026     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2027     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2028     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2029     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2030     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2031     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2032                                 ShouldNotReachHere();
2033   }
2034 
2035   if (opr1->is_cpu_register()) {
2036     reg2reg(opr1, result);
2037   } else if (opr1->is_stack()) {
2038     stack2reg(opr1, result, result->type());
2039   } else if (opr1->is_constant()) {

2864   int offset = __ offset();
2865   switch (code) {
2866   case lir_static_call:
2867   case lir_optvirtual_call:
2868   case lir_dynamic_call:
2869     offset += NativeCall::displacement_offset;
2870     break;
2871   case lir_icvirtual_call:
2872     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2873     break;
2874   default: ShouldNotReachHere();
2875   }
2876   __ align(BytesPerWord, offset);
2877 }
2878 
2879 
2880 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2881   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2882          "must be aligned");
2883   __ call(AddressLiteral(op->addr(), rtype));
2884   add_call_info(code_offset(), op->info());
2885 }
2886 
2887 
2888 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2889   __ ic_call(op->addr());
2890   add_call_info(code_offset(), op->info());
2891   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2892          "must be aligned");
2893 }
2894 
2895 
2896 void LIR_Assembler::emit_static_call_stub() {
2897   address call_pc = __ pc();
2898   address stub = __ start_a_stub(call_stub_size());
2899   if (stub == NULL) {
2900     bailout("static call stub overflow");
2901     return;
2902   }
2903 
2904   int start = __ offset();
2905 
2906   // make sure that the displacement word of the call ends up word aligned
2907   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2908   __ relocate(static_stub_Relocation::spec(call_pc));
2909   __ mov_metadata(rbx, (Metadata*)NULL);
2910   // must be set to -1 at code generation time

3050   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3051 }
3052 
3053 
3054 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3055   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3056   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3057   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3058   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3059 }
3060 
3061 
3062 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3063   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3064   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3065   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3066   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3067 }
3068 
3069 


























3070 // This code replaces a call to arraycopy; no exception may
3071 // be thrown in this code, they must be thrown in the System.arraycopy
3072 // activation frame; we could save some checks if this would not be the case
3073 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3074   ciArrayKlass* default_type = op->expected_type();
3075   Register src = op->src()->as_register();
3076   Register dst = op->dst()->as_register();
3077   Register src_pos = op->src_pos()->as_register();
3078   Register dst_pos = op->dst_pos()->as_register();
3079   Register length  = op->length()->as_register();
3080   Register tmp = op->tmp()->as_register();
3081   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3082 
3083   CodeStub* stub = op->stub();
3084   int flags = op->flags();
3085   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3086   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3087 






3088   // if we don't know anything, just go through the generic arraycopy
3089   if (default_type == NULL) {
3090     // save outgoing arguments on stack in case call to System.arraycopy is needed
3091     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3092     // for interpreter calling conventions. Now we have to do it in new style conventions.
3093     // For the moment until C1 gets the new register allocator I just force all the
3094     // args to the right place (except the register args) and then on the back side
3095     // reload the register args properly if we go slow path. Yuck
3096 
3097     // These are proper for the calling convention
3098     store_parameter(length, 2);
3099     store_parameter(dst_pos, 1);
3100     store_parameter(dst, 0);
3101 
3102     // these are just temporary placements until we need to reload
3103     store_parameter(src_pos, 3);
3104     store_parameter(src, 4);
3105     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3106 
3107     address copyfunc_addr = StubRoutines::generic_arraycopy();

3161     __ mov(tmp, rax);
3162     __ xorl(tmp, -1);
3163 
3164     // Reload values from the stack so they are where the stub
3165     // expects them.
3166     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3167     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3168     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3169     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3170     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3171 
3172     __ subl(length, tmp);
3173     __ addl(src_pos, tmp);
3174     __ addl(dst_pos, tmp);
3175     __ jmp(*stub->entry());
3176 
3177     __ bind(*stub->continuation());
3178     return;
3179   }
3180 








3181   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3182 
3183   int elem_size = type2aelembytes(basic_type);
3184   Address::ScaleFactor scale;
3185 
3186   switch (elem_size) {
3187     case 1 :
3188       scale = Address::times_1;
3189       break;
3190     case 2 :
3191       scale = Address::times_2;
3192       break;
3193     case 4 :
3194       scale = Address::times_4;
3195       break;
3196     case 8 :
3197       scale = Address::times_8;
3198       break;
3199     default:
3200       scale = Address::no_scale;

3740         __ jccb(Assembler::zero, next);
3741 #endif
3742         // first time here. Set profile type.
3743         __ movptr(mdo_addr, tmp);
3744       } else {
3745         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3746                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3747 
3748         __ movptr(tmp, mdo_addr);
3749         __ testptr(tmp, TypeEntries::type_unknown);
3750         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3751 
3752         __ orptr(mdo_addr, TypeEntries::type_unknown);
3753       }
3754     }
3755 
3756     __ bind(next);
3757   }
3758 }
3759 




















3760 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3761   Unimplemented();
3762 }
3763 
3764 
3765 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3766   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3767 }
3768 
3769 
3770 void LIR_Assembler::align_backward_branch_target() {
3771   __ align(BytesPerWord);
3772 }
3773 
3774 
3775 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3776   if (left->is_single_cpu()) {
3777     __ negl(left->as_register());
3778     move_regs(left->as_register(), dest->as_register());
3779 

4000 }
4001 
4002 void LIR_Assembler::membar_storeload() {
4003   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4004 }
4005 
4006 void LIR_Assembler::on_spin_wait() {
4007   __ pause ();
4008 }
4009 
4010 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4011   assert(result_reg->is_register(), "check");
4012 #ifdef _LP64
4013   // __ get_thread(result_reg->as_register_lo());
4014   __ mov(result_reg->as_register(), r15_thread);
4015 #else
4016   __ get_thread(result_reg->as_register());
4017 #endif // _LP64
4018 }
4019 



4020 
4021 void LIR_Assembler::peephole(LIR_List*) {
4022   // do nothing for now
4023 }
4024 
4025 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4026   assert(data == dest, "xchg/xadd uses only 2 operands");
4027 
4028   if (data->type() == T_INT) {
4029     if (code == lir_xadd) {
4030       __ lock();
4031       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4032     } else {
4033       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4034     }
4035   } else if (data->is_oop()) {
4036     assert (code == lir_xchg, "xadd for oops");
4037     Register obj = data->as_register();
4038 #ifdef _LP64
4039     if (UseCompressedOops) {

  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Compilation.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "c1/c1_ValueStack.hpp"
  34 #include "ci/ciArrayKlass.hpp"
  35 #include "ci/ciInlineKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "compiler/oopMap.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "gc/shared/gc_globals.hpp"
  40 #include "nativeInst_x86.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "utilities/powerOfTwo.hpp"
  48 #include "vmreg_x86.inline.hpp"
  49 
  50 
  51 // These masks are used to provide 128-bit aligned bitmasks to the XMM
  52 // instructions, to allow sign-masking or sign-bit flipping.  They allow
  53 // fast versions of NegF/NegD and AbsF/AbsD.
  54 
  55 // Note: 'double' and 'long long' have 32-bits alignment on x86.
  56 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
  57   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
  58   // of 128-bits operands for SSE instructions.
  59   jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
  60   // Store the value to a 128-bits operand.
  61   operand[0] = lo;

 180 
 181 void LIR_Assembler::ffree(int i) {
 182   __ ffree(i);
 183 }
 184 #endif // !_LP64
 185 
 186 void LIR_Assembler::breakpoint() {
 187   __ int3();
 188 }
 189 
 190 void LIR_Assembler::push(LIR_Opr opr) {
 191   if (opr->is_single_cpu()) {
 192     __ push_reg(opr->as_register());
 193   } else if (opr->is_double_cpu()) {
 194     NOT_LP64(__ push_reg(opr->as_register_hi()));
 195     __ push_reg(opr->as_register_lo());
 196   } else if (opr->is_stack()) {
 197     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 198   } else if (opr->is_constant()) {
 199     LIR_Const* const_opr = opr->as_constant_ptr();
 200     if (const_opr->type() == T_OBJECT || const_opr->type() == T_INLINE_TYPE) {
 201       __ push_oop(const_opr->as_jobject());
 202     } else if (const_opr->type() == T_INT) {
 203       __ push_jint(const_opr->as_jint());
 204     } else {
 205       ShouldNotReachHere();
 206     }
 207 
 208   } else {
 209     ShouldNotReachHere();
 210   }
 211 }
 212 
 213 void LIR_Assembler::pop(LIR_Opr opr) {
 214   if (opr->is_single_cpu()) {
 215     __ pop_reg(opr->as_register());
 216   } else {
 217     ShouldNotReachHere();
 218   }
 219 }
 220 

 467     __ bind(*stub->continuation());
 468   }
 469 
 470   if (compilation()->env()->dtrace_method_probes()) {
 471 #ifdef _LP64
 472     __ mov(rdi, r15_thread);
 473     __ mov_metadata(rsi, method()->constant_encoding());
 474 #else
 475     __ get_thread(rax);
 476     __ movptr(Address(rsp, 0), rax);
 477     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 478 #endif
 479     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 480   }
 481 
 482   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 483     __ mov(rax, rbx);  // Restore the exception
 484   }
 485 
 486   // remove the activation and dispatch to the unwind handler
 487   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 488   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 489 
 490   // Emit the slow path assembly
 491   if (stub != NULL) {
 492     stub->emit_code(this);
 493   }
 494 
 495   return offset;
 496 }
 497 
 498 
 499 int LIR_Assembler::emit_deopt_handler() {
 500   // if the last instruction is a call (typically to do a throw which
 501   // is coming at the end after block reordering) the return address
 502   // must still point into the code area in order to avoid assertion
 503   // failures when searching for the corresponding bci => add a nop
 504   // (was bug 5/14/1999 - gri)
 505   __ nop();
 506 
 507   // generate code for exception handler

 512     return -1;
 513   }
 514 
 515   int offset = code_offset();
 516   InternalAddress here(__ pc());
 517 
 518   __ pushptr(here.addr());
 519   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 520   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 521   __ end_a_stub();
 522 
 523   return offset;
 524 }
 525 
 526 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
 527   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 528   if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 529     assert(result->fpu() == 0, "result must already be on TOS");
 530   }
 531 
 532   ciMethod* method = compilation()->method();
 533   if (InlineTypeReturnedAsFields && method->signature()->returns_null_free_inline_type()) {
 534     ciInlineKlass* vk = method->return_type()->as_inline_klass();
 535     if (vk->can_be_returned_as_fields()) {
 536 #ifndef _LP64
 537       Unimplemented();
 538 #else
 539       address unpack_handler = vk->unpack_handler();
 540       assert(unpack_handler != NULL, "must be");
 541       __ call(RuntimeAddress(unpack_handler));
 542       // At this point, rax points to the value object (for interpreter or C1 caller).
 543       // The fields of the object are copied into registers (for C2 caller).
 544 #endif
 545     }
 546   }
 547 
 548   // Pop the stack before the safepoint code
 549   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 550 
 551   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 552     __ reserved_stack_check();
 553   }
 554 
 555   // Note: we do not need to round double result; float result has the right precision
 556   // the poll sets the condition code, but no data registers
 557 
 558 #ifdef _LP64
 559   const Register thread = r15_thread;
 560 #else
 561   const Register thread = rbx;
 562   __ get_thread(thread);
 563 #endif
 564   code_stub->set_safepoint_offset(__ offset());
 565   __ relocate(relocInfo::poll_return_type);
 566   __ safepoint_poll(*code_stub->entry(), thread, true /* at_return */, true /* in_nmethod */);
 567   __ ret(0);
 568 }
 569 
 570 
 571 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
 572   return (__ store_inline_type_fields_to_buf(vk, false));
 573 }
 574 
 575 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 576   guarantee(info != NULL, "Shouldn't be NULL");
 577   int offset = __ offset();
 578 #ifdef _LP64
 579   const Register poll_addr = rscratch1;
 580   __ movptr(poll_addr, Address(r15_thread, JavaThread::polling_page_offset()));
 581 #else
 582   assert(tmp->is_cpu_register(), "needed");
 583   const Register poll_addr = tmp->as_register();
 584   __ get_thread(poll_addr);
 585   __ movptr(poll_addr, Address(poll_addr, in_bytes(JavaThread::polling_page_offset())));
 586 #endif
 587   add_debug_info_for_branch(info);
 588   __ relocate(relocInfo::poll_type);
 589   address pre_pc = __ pc();
 590   __ testl(rax, Address(poll_addr, 0));
 591   address post_pc = __ pc();
 592   guarantee(pointer_delta(post_pc, pre_pc, 1) == 2 LP64_ONLY(+1), "must be exact length");
 593   return offset;
 594 }

 615       break;
 616     }
 617 
 618     case T_ADDRESS: {
 619       assert(patch_code == lir_patch_none, "no patching handled here");
 620       __ movptr(dest->as_register(), c->as_jint());
 621       break;
 622     }
 623 
 624     case T_LONG: {
 625       assert(patch_code == lir_patch_none, "no patching handled here");
 626 #ifdef _LP64
 627       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 628 #else
 629       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 630       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 631 #endif // _LP64
 632       break;
 633     }
 634 
 635     case T_INLINE_TYPE: // Fall through
 636     case T_OBJECT: {
 637       if (patch_code != lir_patch_none) {
 638         jobject2reg_with_patching(dest->as_register(), info);
 639       } else {
 640         __ movoop(dest->as_register(), c->as_jobject());
 641       }
 642       break;
 643     }
 644 
 645     case T_METADATA: {
 646       if (patch_code != lir_patch_none) {
 647         klass2reg_with_patching(dest->as_register(), info);
 648       } else {
 649         __ mov_metadata(dest->as_register(), c->as_metadata());
 650       }
 651       break;
 652     }
 653 
 654     case T_FLOAT: {
 655       if (dest->is_single_xmm()) {

 706     default:
 707       ShouldNotReachHere();
 708   }
 709 }
 710 
 711 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 712   assert(src->is_constant(), "should not call otherwise");
 713   assert(dest->is_stack(), "should not call otherwise");
 714   LIR_Const* c = src->as_constant_ptr();
 715 
 716   switch (c->type()) {
 717     case T_INT:  // fall through
 718     case T_FLOAT:
 719       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 720       break;
 721 
 722     case T_ADDRESS:
 723       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 724       break;
 725 
 726     case T_INLINE_TYPE: // Fall through
 727     case T_OBJECT:
 728       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 729       break;
 730 
 731     case T_LONG:  // fall through
 732     case T_DOUBLE:
 733 #ifdef _LP64
 734       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 735                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 736 #else
 737       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 738                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 739       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 740                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 741 #endif // _LP64
 742       break;
 743 
 744     default:
 745       ShouldNotReachHere();
 746   }
 747 }
 748 
 749 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 750   assert(src->is_constant(), "should not call otherwise");
 751   assert(dest->is_address(), "should not call otherwise");
 752   LIR_Const* c = src->as_constant_ptr();
 753   LIR_Address* addr = dest->as_address_ptr();
 754 
 755   int null_check_here = code_offset();
 756   switch (type) {
 757     case T_INT:    // fall through
 758     case T_FLOAT:
 759       __ movl(as_Address(addr), c->as_jint_bits());
 760       break;
 761 
 762     case T_ADDRESS:
 763       __ movptr(as_Address(addr), c->as_jint_bits());
 764       break;
 765 
 766     case T_INLINE_TYPE: // fall through
 767     case T_OBJECT:  // fall through
 768     case T_ARRAY:
 769       if (c->as_jobject() == NULL) {
 770         if (UseCompressedOops && !wide) {
 771           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 772         } else {
 773 #ifdef _LP64
 774           __ xorptr(rscratch1, rscratch1);
 775           null_check_here = code_offset();
 776           __ movptr(as_Address(addr), rscratch1);
 777 #else
 778           __ movptr(as_Address(addr), NULL_WORD);
 779 #endif
 780         }
 781       } else {
 782         if (is_literal_address(addr)) {
 783           ShouldNotReachHere();
 784           __ movoop(as_Address(addr, noreg), c->as_jobject());
 785         } else {
 786 #ifdef _LP64

 835   if (info != NULL) {
 836     add_debug_info_for_null_check(null_check_here, info);
 837   }
 838 }
 839 
 840 
 841 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 842   assert(src->is_register(), "should not call otherwise");
 843   assert(dest->is_register(), "should not call otherwise");
 844 
 845   // move between cpu-registers
 846   if (dest->is_single_cpu()) {
 847 #ifdef _LP64
 848     if (src->type() == T_LONG) {
 849       // Can do LONG -> OBJECT
 850       move_regs(src->as_register_lo(), dest->as_register());
 851       return;
 852     }
 853 #endif
 854     assert(src->is_single_cpu(), "must match");
 855     if (src->type() == T_OBJECT || src->type() == T_INLINE_TYPE) {
 856       __ verify_oop(src->as_register());
 857     }
 858     move_regs(src->as_register(), dest->as_register());
 859 
 860   } else if (dest->is_double_cpu()) {
 861 #ifdef _LP64
 862     if (is_reference_type(src->type())) {
 863       // Surprising to me but we can see move of a long to t_object
 864       __ verify_oop(src->as_register());
 865       move_regs(src->as_register(), dest->as_register_lo());
 866       return;
 867     }
 868 #endif
 869     assert(src->is_double_cpu(), "must match");
 870     Register f_lo = src->as_register_lo();
 871     Register f_hi = src->as_register_hi();
 872     Register t_lo = dest->as_register_lo();
 873     Register t_hi = dest->as_register_hi();
 874 #ifdef _LP64
 875     assert(f_hi == f_lo, "must be same");

1021       break;
1022     }
1023 
1024     case T_DOUBLE: {
1025 #ifdef _LP64
1026       assert(src->is_double_xmm(), "not a double");
1027       __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1028 #else
1029       if (src->is_double_xmm()) {
1030         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1031       } else {
1032         assert(src->is_double_fpu(), "must be");
1033         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1034         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1035         else                    __ fst_d (as_Address(to_addr));
1036       }
1037 #endif // _LP64
1038       break;
1039     }
1040 
1041     case T_INLINE_TYPE: // fall through
1042     case T_ARRAY:   // fall through
1043     case T_OBJECT:  // fall through
1044       if (UseCompressedOops && !wide) {
1045         __ movl(as_Address(to_addr), compressed_src);
1046       } else {
1047         __ movptr(as_Address(to_addr), src->as_register());
1048       }
1049       break;
1050     case T_METADATA:
1051       // We get here to store a method pointer to the stack to pass to
1052       // a dtrace runtime call. This can't work on 64 bit with
1053       // compressed klass ptrs: T_METADATA can be a compressed klass
1054       // ptr or a 64 bit method pointer.
1055       LP64_ONLY(ShouldNotReachHere());
1056       __ movptr(as_Address(to_addr), src->as_register());
1057       break;
1058     case T_ADDRESS:
1059       __ movptr(as_Address(to_addr), src->as_register());
1060       break;
1061     case T_INT:

1195     // push and pop the part at src + wordSize, adding wordSize for the previous push
1196     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1197     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1198     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1199 #endif // _LP64
1200 
1201   } else {
1202     ShouldNotReachHere();
1203   }
1204 }
1205 
1206 
1207 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide) {
1208   assert(src->is_address(), "should not call otherwise");
1209   assert(dest->is_register(), "should not call otherwise");
1210 
1211   LIR_Address* addr = src->as_address_ptr();
1212   Address from_addr = as_Address(addr);
1213   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1214 
1215   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_INLINE_TYPE) {
1216     __ verify_oop(addr->base()->as_pointer_register());
1217   }
1218 
1219   switch (type) {
1220     case T_BOOLEAN: // fall through
1221     case T_BYTE:    // fall through
1222     case T_CHAR:    // fall through
1223     case T_SHORT:
1224       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1225         // on pre P6 processors we may get partial register stalls
1226         // so blow away the value of to_rinfo before loading a
1227         // partial word into it.  Do it here so that it precedes
1228         // the potential patch point below.
1229         __ xorptr(dest->as_register(), dest->as_register());
1230       }
1231       break;
1232    default:
1233      break;
1234   }
1235 

1256 #endif // !LP64
1257       }
1258       break;
1259     }
1260 
1261     case T_DOUBLE: {
1262       if (dest->is_double_xmm()) {
1263         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1264       } else {
1265 #ifndef _LP64
1266         assert(dest->is_double_fpu(), "must be");
1267         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1268         __ fld_d(from_addr);
1269 #else
1270         ShouldNotReachHere();
1271 #endif // !LP64
1272       }
1273       break;
1274     }
1275 
1276     case T_INLINE_TYPE: // fall through
1277     case T_OBJECT:  // fall through
1278     case T_ARRAY:   // fall through
1279       if (UseCompressedOops && !wide) {
1280         __ movl(dest->as_register(), from_addr);
1281       } else {
1282         __ movptr(dest->as_register(), from_addr);
1283       }
1284       break;
1285 
1286     case T_ADDRESS:
1287       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1288         __ movl(dest->as_register(), from_addr);
1289       } else {
1290         __ movptr(dest->as_register(), from_addr);
1291       }
1292       break;
1293     case T_INT:
1294       __ movl(dest->as_register(), from_addr);
1295       break;
1296 

1643     add_debug_info_for_null_check_here(op->stub()->info());
1644     __ cmpb(Address(op->klass()->as_register(),
1645                     InstanceKlass::init_state_offset()),
1646                     InstanceKlass::fully_initialized);
1647     __ jcc(Assembler::notEqual, *op->stub()->entry());
1648   }
1649   __ allocate_object(op->obj()->as_register(),
1650                      op->tmp1()->as_register(),
1651                      op->tmp2()->as_register(),
1652                      op->header_size(),
1653                      op->object_size(),
1654                      op->klass()->as_register(),
1655                      *op->stub()->entry());
1656   __ bind(*op->stub()->continuation());
1657 }
1658 
1659 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1660   Register len =  op->len()->as_register();
1661   LP64_ONLY( __ movslq(len, len); )
1662 
1663   if (UseSlowPath || op->type() == T_INLINE_TYPE ||
1664       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1665       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1666     __ jmp(*op->stub()->entry());
1667   } else {
1668     Register tmp1 = op->tmp1()->as_register();
1669     Register tmp2 = op->tmp2()->as_register();
1670     Register tmp3 = op->tmp3()->as_register();
1671     if (len == tmp1) {
1672       tmp1 = tmp3;
1673     } else if (len == tmp2) {
1674       tmp2 = tmp3;
1675     } else if (len == tmp3) {
1676       // everything is ok
1677     } else {
1678       __ mov(tmp3, len);
1679     }
1680     __ allocate_array(op->obj()->as_register(),
1681                       len,
1682                       tmp1,
1683                       tmp2,

1742     assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1743   }
1744   Label profile_cast_success, profile_cast_failure;
1745   Label *success_target = op->should_profile() ? &profile_cast_success : success;
1746   Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1747 
1748   if (obj == k_RInfo) {
1749     k_RInfo = dst;
1750   } else if (obj == klass_RInfo) {
1751     klass_RInfo = dst;
1752   }
1753   if (k->is_loaded() && !UseCompressedClassPointers) {
1754     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1755   } else {
1756     Rtmp1 = op->tmp3()->as_register();
1757     select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1758   }
1759 
1760   assert_different_registers(obj, k_RInfo, klass_RInfo);
1761 
1762   if (op->need_null_check()) {
1763     __ cmpptr(obj, (int32_t)NULL_WORD);
1764     if (op->should_profile()) {
1765       Label not_null;
1766       __ jccb(Assembler::notEqual, not_null);
1767       // Object is null; update MDO and exit
1768       Register mdo  = klass_RInfo;
1769       __ mov_metadata(mdo, md->constant_encoding());
1770       Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
1771       int header_bits = BitData::null_seen_byte_constant();
1772       __ orb(data_addr, header_bits);
1773       __ jmp(*obj_is_null);
1774       __ bind(not_null);
1775     } else {
1776       __ jcc(Assembler::equal, *obj_is_null);
1777     }
1778   }
1779 
1780   if (!k->is_loaded()) {
1781     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1782   } else {
1783 #ifdef _LP64
1784     __ mov_metadata(k_RInfo, k->constant_encoding());
1785 #endif // _LP64
1786   }
1787   __ verify_oop(obj);
1788 
1789   if (op->fast_check()) {
1790     // get object class
1791     // not a safepoint as obj null check happens earlier
1792 #ifdef _LP64
1793     if (UseCompressedClassPointers) {
1794       __ load_klass(Rtmp1, obj, tmp_load_klass);
1795       __ cmpptr(k_RInfo, Rtmp1);
1796     } else {
1797       __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));

1969         __ mov(dst, obj);
1970       }
1971     } else
1972       if (code == lir_instanceof) {
1973         Register obj = op->object()->as_register();
1974         Register dst = op->result_opr()->as_register();
1975         Label success, failure, done;
1976         emit_typecheck_helper(op, &success, &failure, &failure);
1977         __ bind(failure);
1978         __ xorptr(dst, dst);
1979         __ jmpb(done);
1980         __ bind(success);
1981         __ movptr(dst, 1);
1982         __ bind(done);
1983       } else {
1984         ShouldNotReachHere();
1985       }
1986 
1987 }
1988 
1989 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1990   // We are loading/storing from/to an array that *may* be flattened (the
1991   // declared type is Object[], abstract[], interface[] or VT.ref[]).
1992   // If this array is flattened, take the slow path.
1993   Register klass = op->tmp()->as_register();
1994   if (UseArrayMarkWordCheck) {
1995     __ test_flattened_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1996   } else {
1997     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1998     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
1999     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
2000     __ testl(klass, Klass::_lh_array_tag_vt_value_bit_inplace);
2001     __ jcc(Assembler::notZero, *op->stub()->entry());
2002   }
2003   if (!op->value()->is_illegal()) {
2004     // The array is not flattened, but it might be null-free. If we are storing
2005     // a null into a null-free array, take the slow path (which will throw NPE).
2006     Label skip;
2007     __ cmpptr(op->value()->as_register(), (int32_t)NULL_WORD);
2008     __ jcc(Assembler::notEqual, skip);
2009     if (UseArrayMarkWordCheck) {
2010       __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
2011     } else {
2012       __ testl(klass, Klass::_lh_null_free_bit_inplace);
2013       __ jcc(Assembler::notZero, *op->stub()->entry());
2014     }
2015     __ bind(skip);
2016   }
2017 }
2018 
2019 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
2020   // We are storing into an array that *may* be null-free (the declared type is
2021   // Object[], abstract[], interface[] or VT.ref[]).
2022   if (UseArrayMarkWordCheck) {
2023     Label test_mark_word;
2024     Register tmp = op->tmp()->as_register();
2025     __ movptr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
2026     __ testl(tmp, markWord::unlocked_value);
2027     __ jccb(Assembler::notZero, test_mark_word);
2028     __ load_prototype_header(tmp, op->array()->as_register(), rscratch1);
2029     __ bind(test_mark_word);
2030     __ testl(tmp, markWord::null_free_array_bit_in_place);
2031   } else {
2032     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
2033     Register klass = op->tmp()->as_register();
2034     __ load_klass(klass, op->array()->as_register(), tmp_load_klass);
2035     __ movl(klass, Address(klass, Klass::layout_helper_offset()));
2036     __ testl(klass, Klass::_lh_null_free_bit_inplace);
2037   }
2038 }
2039 
2040 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
2041   Label L_oops_equal;
2042   Label L_oops_not_equal;
2043   Label L_end;
2044 
2045   Register left  = op->left()->as_register();
2046   Register right = op->right()->as_register();
2047 
2048   __ cmpptr(left, right);
2049   __ jcc(Assembler::equal, L_oops_equal);
2050 
2051   // (1) Null check -- if one of the operands is null, the other must not be null (because
2052   //     the two references are not equal), so they are not substitutable,
2053   //     FIXME: do null check only if the operand is nullable
2054   __ testptr(left, right);
2055   __ jcc(Assembler::zero, L_oops_not_equal);
2056 
2057   ciKlass* left_klass = op->left_klass();
2058   ciKlass* right_klass = op->right_klass();
2059 
2060   // (2) Inline type check -- if either of the operands is not a inline type,
2061   //     they are not substitutable. We do this only if we are not sure that the
2062   //     operands are inline type
2063   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
2064       !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
2065     Register tmp1  = op->tmp1()->as_register();
2066     __ movptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2067     __ andptr(tmp1, Address(left, oopDesc::mark_offset_in_bytes()));
2068     __ andptr(tmp1, Address(right, oopDesc::mark_offset_in_bytes()));
2069     __ cmpptr(tmp1, (intptr_t)markWord::inline_type_pattern);
2070     __ jcc(Assembler::notEqual, L_oops_not_equal);
2071   }
2072 
2073   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
2074   if (left_klass != NULL && left_klass->is_inlinetype() && left_klass == right_klass) {
2075     // No need to load klass -- the operands are statically known to be the same inline klass.
2076     __ jmp(*op->stub()->entry());
2077   } else {
2078     Register left_klass_op = op->left_klass_op()->as_register();
2079     Register right_klass_op = op->right_klass_op()->as_register();
2080 
2081     if (UseCompressedClassPointers) {
2082       __ movl(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2083       __ movl(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2084       __ cmpl(left_klass_op, right_klass_op);
2085     } else {
2086       __ movptr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
2087       __ movptr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
2088       __ cmpptr(left_klass_op, right_klass_op);
2089     }
2090 
2091     __ jcc(Assembler::equal, *op->stub()->entry()); // same klass -> do slow check
2092     // fall through to L_oops_not_equal
2093   }
2094 
2095   __ bind(L_oops_not_equal);
2096   move(op->not_equal_result(), op->result_opr());
2097   __ jmp(L_end);
2098 
2099   __ bind(L_oops_equal);
2100   move(op->equal_result(), op->result_opr());
2101   __ jmp(L_end);
2102 
2103   // We've returned from the stub. RAX contains 0x0 IFF the two
2104   // operands are not substitutable. (Don't compare against 0x1 in case the
2105   // C compiler is naughty)
2106   __ bind(*op->stub()->continuation());
2107   __ cmpl(rax, 0);
2108   __ jcc(Assembler::equal, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
2109   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
2110   // fall-through
2111   __ bind(L_end);
2112 }
2113 
2114 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2115   if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
2116     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
2117     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
2118     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
2119     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
2120     Register addr = op->addr()->as_register();
2121     __ lock();
2122     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
2123 
2124   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
2125     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
2126     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2127     Register newval = op->new_value()->as_register();
2128     Register cmpval = op->cmp_value()->as_register();
2129     assert(cmpval == rax, "wrong register");
2130     assert(newval != NULL, "new val must be register");
2131     assert(cmpval != newval, "cmp and new values must be in different registers");
2132     assert(cmpval != addr, "cmp and addr must be in different registers");

2153       __ cmpxchgl(newval, Address(addr, 0));
2154     }
2155 #ifdef _LP64
2156   } else if (op->code() == lir_cas_long) {
2157     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2158     Register newval = op->new_value()->as_register_lo();
2159     Register cmpval = op->cmp_value()->as_register_lo();
2160     assert(cmpval == rax, "wrong register");
2161     assert(newval != NULL, "new val must be register");
2162     assert(cmpval != newval, "cmp and new values must be in different registers");
2163     assert(cmpval != addr, "cmp and addr must be in different registers");
2164     assert(newval != addr, "new value and addr must be in different registers");
2165     __ lock();
2166     __ cmpxchgq(newval, Address(addr, 0));
2167 #endif // _LP64
2168   } else {
2169     Unimplemented();
2170   }
2171 }
2172 
2173 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
2174   assert(dst->is_cpu_register(), "must be");
2175   assert(dst->type() == src->type(), "must be");
2176 
2177   if (src->is_cpu_register()) {
2178     reg2reg(src, dst);
2179   } else if (src->is_stack()) {
2180     stack2reg(src, dst, dst->type());
2181   } else if (src->is_constant()) {
2182     const2reg(src, dst, lir_patch_none, NULL);
2183   } else {
2184     ShouldNotReachHere();
2185   }
2186 }
2187 
2188 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2189   Assembler::Condition acond, ncond;
2190   switch (condition) {
2191     case lir_cond_equal:        acond = Assembler::equal;        ncond = Assembler::notEqual;     break;
2192     case lir_cond_notEqual:     acond = Assembler::notEqual;     ncond = Assembler::equal;        break;
2193     case lir_cond_less:         acond = Assembler::less;         ncond = Assembler::greaterEqual; break;
2194     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    ncond = Assembler::greater;      break;
2195     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less;         break;
2196     case lir_cond_greater:      acond = Assembler::greater;      ncond = Assembler::lessEqual;    break;
2197     case lir_cond_belowEqual:   acond = Assembler::belowEqual;   ncond = Assembler::above;        break;
2198     case lir_cond_aboveEqual:   acond = Assembler::aboveEqual;   ncond = Assembler::below;        break;
2199     default:                    acond = Assembler::equal;        ncond = Assembler::notEqual;
2200                                 ShouldNotReachHere();
2201   }
2202 
2203   if (opr1->is_cpu_register()) {
2204     reg2reg(opr1, result);
2205   } else if (opr1->is_stack()) {
2206     stack2reg(opr1, result, result->type());
2207   } else if (opr1->is_constant()) {

3032   int offset = __ offset();
3033   switch (code) {
3034   case lir_static_call:
3035   case lir_optvirtual_call:
3036   case lir_dynamic_call:
3037     offset += NativeCall::displacement_offset;
3038     break;
3039   case lir_icvirtual_call:
3040     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
3041     break;
3042   default: ShouldNotReachHere();
3043   }
3044   __ align(BytesPerWord, offset);
3045 }
3046 
3047 
3048 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
3049   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
3050          "must be aligned");
3051   __ call(AddressLiteral(op->addr(), rtype));
3052   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3053 }
3054 
3055 
3056 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
3057   __ ic_call(op->addr());
3058   add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
3059   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
3060          "must be aligned");
3061 }
3062 
3063 
3064 void LIR_Assembler::emit_static_call_stub() {
3065   address call_pc = __ pc();
3066   address stub = __ start_a_stub(call_stub_size());
3067   if (stub == NULL) {
3068     bailout("static call stub overflow");
3069     return;
3070   }
3071 
3072   int start = __ offset();
3073 
3074   // make sure that the displacement word of the call ends up word aligned
3075   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
3076   __ relocate(static_stub_Relocation::spec(call_pc));
3077   __ mov_metadata(rbx, (Metadata*)NULL);
3078   // must be set to -1 at code generation time

3218   __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3219 }
3220 
3221 
3222 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
3223   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3224   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3225   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3226   __ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3227 }
3228 
3229 
3230 void LIR_Assembler::store_parameter(Metadata* m,  int offset_from_rsp_in_words) {
3231   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3232   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3233   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3234   __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3235 }
3236 
3237 
3238 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
3239   if (null_check) {
3240     __ testptr(obj, obj);
3241     __ jcc(Assembler::zero, *slow_path->entry());
3242   }
3243   if (UseArrayMarkWordCheck) {
3244     if (is_dest) {
3245       __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
3246     } else {
3247       __ test_flattened_array_oop(obj, tmp, *slow_path->entry());
3248     }
3249   } else {
3250     Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3251     __ load_klass(tmp, obj, tmp_load_klass);
3252     __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
3253     if (is_dest) {
3254       // Take the slow path if it's a null_free destination array, in case the source array contains NULLs.
3255       __ testl(tmp, Klass::_lh_null_free_bit_inplace);
3256     } else {
3257       __ testl(tmp, Klass::_lh_array_tag_vt_value_bit_inplace);
3258     }
3259     __ jcc(Assembler::notZero, *slow_path->entry());
3260   }
3261 }
3262 
3263 
3264 // This code replaces a call to arraycopy; no exception may
3265 // be thrown in this code, they must be thrown in the System.arraycopy
3266 // activation frame; we could save some checks if this would not be the case
3267 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3268   ciArrayKlass* default_type = op->expected_type();
3269   Register src = op->src()->as_register();
3270   Register dst = op->dst()->as_register();
3271   Register src_pos = op->src_pos()->as_register();
3272   Register dst_pos = op->dst_pos()->as_register();
3273   Register length  = op->length()->as_register();
3274   Register tmp = op->tmp()->as_register();
3275   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3276 
3277   CodeStub* stub = op->stub();
3278   int flags = op->flags();
3279   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3280   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3281 
3282   if (flags & LIR_OpArrayCopy::always_slow_path) {
3283     __ jmp(*stub->entry());
3284     __ bind(*stub->continuation());
3285     return;
3286   }
3287 
3288   // if we don't know anything, just go through the generic arraycopy
3289   if (default_type == NULL) {
3290     // save outgoing arguments on stack in case call to System.arraycopy is needed
3291     // HACK ALERT. This code used to push the parameters in a hardwired fashion
3292     // for interpreter calling conventions. Now we have to do it in new style conventions.
3293     // For the moment until C1 gets the new register allocator I just force all the
3294     // args to the right place (except the register args) and then on the back side
3295     // reload the register args properly if we go slow path. Yuck
3296 
3297     // These are proper for the calling convention
3298     store_parameter(length, 2);
3299     store_parameter(dst_pos, 1);
3300     store_parameter(dst, 0);
3301 
3302     // these are just temporary placements until we need to reload
3303     store_parameter(src_pos, 3);
3304     store_parameter(src, 4);
3305     NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3306 
3307     address copyfunc_addr = StubRoutines::generic_arraycopy();

3361     __ mov(tmp, rax);
3362     __ xorl(tmp, -1);
3363 
3364     // Reload values from the stack so they are where the stub
3365     // expects them.
3366     __ movptr   (dst,     Address(rsp, 0*BytesPerWord));
3367     __ movptr   (dst_pos, Address(rsp, 1*BytesPerWord));
3368     __ movptr   (length,  Address(rsp, 2*BytesPerWord));
3369     __ movptr   (src_pos, Address(rsp, 3*BytesPerWord));
3370     __ movptr   (src,     Address(rsp, 4*BytesPerWord));
3371 
3372     __ subl(length, tmp);
3373     __ addl(src_pos, tmp);
3374     __ addl(dst_pos, tmp);
3375     __ jmp(*stub->entry());
3376 
3377     __ bind(*stub->continuation());
3378     return;
3379   }
3380 
3381   // Handle inline type arrays
3382   if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
3383     arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
3384   }
3385   if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
3386     arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
3387   }
3388 
3389   assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3390 
3391   int elem_size = type2aelembytes(basic_type);
3392   Address::ScaleFactor scale;
3393 
3394   switch (elem_size) {
3395     case 1 :
3396       scale = Address::times_1;
3397       break;
3398     case 2 :
3399       scale = Address::times_2;
3400       break;
3401     case 4 :
3402       scale = Address::times_4;
3403       break;
3404     case 8 :
3405       scale = Address::times_8;
3406       break;
3407     default:
3408       scale = Address::no_scale;

3948         __ jccb(Assembler::zero, next);
3949 #endif
3950         // first time here. Set profile type.
3951         __ movptr(mdo_addr, tmp);
3952       } else {
3953         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3954                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3955 
3956         __ movptr(tmp, mdo_addr);
3957         __ testptr(tmp, TypeEntries::type_unknown);
3958         __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3959 
3960         __ orptr(mdo_addr, TypeEntries::type_unknown);
3961       }
3962     }
3963 
3964     __ bind(next);
3965   }
3966 }
3967 
3968 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
3969   Register obj = op->obj()->as_register();
3970   Register tmp = op->tmp()->as_pointer_register();
3971   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3972   bool not_null = op->not_null();
3973   int flag = op->flag();
3974 
3975   Label not_inline_type;
3976   if (!not_null) {
3977     __ testptr(obj, obj);
3978     __ jccb(Assembler::zero, not_inline_type);
3979   }
3980 
3981   __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
3982 
3983   __ orb(mdo_addr, flag);
3984 
3985   __ bind(not_inline_type);
3986 }
3987 
3988 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3989   Unimplemented();
3990 }
3991 
3992 
3993 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3994   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3995 }
3996 
3997 
3998 void LIR_Assembler::align_backward_branch_target() {
3999   __ align(BytesPerWord);
4000 }
4001 
4002 
4003 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
4004   if (left->is_single_cpu()) {
4005     __ negl(left->as_register());
4006     move_regs(left->as_register(), dest->as_register());
4007 

4228 }
4229 
4230 void LIR_Assembler::membar_storeload() {
4231   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4232 }
4233 
4234 void LIR_Assembler::on_spin_wait() {
4235   __ pause ();
4236 }
4237 
4238 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4239   assert(result_reg->is_register(), "check");
4240 #ifdef _LP64
4241   // __ get_thread(result_reg->as_register_lo());
4242   __ mov(result_reg->as_register(), r15_thread);
4243 #else
4244   __ get_thread(result_reg->as_register());
4245 #endif // _LP64
4246 }
4247 
4248 void LIR_Assembler::check_orig_pc() {
4249   __ cmpptr(frame_map()->address_for_orig_pc_addr(), (int32_t)NULL_WORD);
4250 }
4251 
4252 void LIR_Assembler::peephole(LIR_List*) {
4253   // do nothing for now
4254 }
4255 
4256 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4257   assert(data == dest, "xchg/xadd uses only 2 operands");
4258 
4259   if (data->type() == T_INT) {
4260     if (code == lir_xadd) {
4261       __ lock();
4262       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4263     } else {
4264       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4265     }
4266   } else if (data->is_oop()) {
4267     assert (code == lir_xchg, "xadd for oops");
4268     Register obj = data->as_register();
4269 #ifdef _LP64
4270     if (UseCompressedOops) {
< prev index next >