< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"

  34 #include "runtime/vm_version.hpp"
  35 


  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );

  85  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  86  // The implementation is only non-empty for the InterpreterMacroAssembler,
  87  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  88  virtual void check_and_handle_popframe(Register java_thread);
  89  virtual void check_and_handle_earlyret(Register java_thread);
  90 
  91   Address as_Address(AddressLiteral adr);
  92   Address as_Address(ArrayAddress adr, Register rscratch);
  93 
  94   // Support for NULL-checks
  95   //
  96   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  97   // If the accessed location is M[reg + offset] and the offset is known, provide the
  98   // offset. No explicit code generation is needed if the offset is within a certain
  99   // range (0 <= offset <= page_size).
 100 
 101   void null_check(Register reg, int offset = -1);
 102   static bool needs_explicit_null_check(intptr_t offset);
 103   static bool uses_implicit_null_check(void* address);
 104 































 105   // Required platform-specific helpers for Label::patch_instructions.
 106   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 107   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 108     unsigned char op = branch[0];
 109     assert(op == 0xE8 /* call */ ||
 110         op == 0xE9 /* jmp */ ||
 111         op == 0xEB /* short jmp */ ||
 112         (op & 0xF0) == 0x70 /* short jcc */ ||
 113         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 114         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 115         "Invalid opcode at patch point");
 116 
 117     if (op == 0xEB || (op & 0xF0) == 0x70) {
 118       // short offset operators (jmp and jcc)
 119       char* disp = (char*) &branch[1];
 120       int imm8 = target - (address) &disp[1];
 121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 122                 file == NULL ? "<NULL>" : file, line);
 123       *disp = imm8;
 124     } else {

 330   void resolve_jobject(Register value, Register thread, Register tmp);
 331 
 332   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 333   void c2bool(Register x);
 334 
 335   // C++ bool manipulation
 336 
 337   void movbool(Register dst, Address src);
 338   void movbool(Address dst, bool boolconst);
 339   void movbool(Address dst, Register src);
 340   void testbool(Register dst);
 341 
 342   void resolve_oop_handle(Register result, Register tmp);
 343   void resolve_weak_handle(Register result, Register tmp);
 344   void load_mirror(Register mirror, Register method, Register tmp);
 345   void load_method_holder_cld(Register rresult, Register rmethod);
 346 
 347   void load_method_holder(Register holder, Register method);
 348 
 349   // oop manipulations

 350   void load_klass(Register dst, Register src, Register tmp);
 351   void store_klass(Register dst, Register src, Register tmp);
 352 
 353   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 354                       Register tmp1, Register thread_tmp);
 355   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 356                        Register tmp1, Register tmp2, Register tmp3);
 357 










 358   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 359                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 360   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 361                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 362   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 363                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 364 
 365   // Used for storing NULL. All other oop constants should be
 366   // stored using routines that take a jobject.
 367   void store_heap_oop_null(Address dst);
 368 


 369 #ifdef _LP64
 370   void store_klass_gap(Register dst, Register src);
 371 
 372   // This dummy is to prevent a call to store_heap_oop from
 373   // converting a zero (like NULL) into a Register by giving
 374   // the compiler two choices it can't resolve
 375 
 376   void store_heap_oop(Address dst, void* dummy);
 377 
 378   void encode_heap_oop(Register r);
 379   void decode_heap_oop(Register r);
 380   void encode_heap_oop_not_null(Register r);
 381   void decode_heap_oop_not_null(Register r);
 382   void encode_heap_oop_not_null(Register dst, Register src);
 383   void decode_heap_oop_not_null(Register dst, Register src);
 384 
 385   void set_narrow_oop(Register dst, jobject obj);
 386   void set_narrow_oop(Address dst, jobject obj);
 387   void cmp_narrow_oop(Register dst, jobject obj);
 388   void cmp_narrow_oop(Address dst, jobject obj);

 550 
 551 public:
 552   void push_set(RegSet set, int offset = -1);
 553   void pop_set(RegSet set, int offset = -1);
 554 
 555   // Push and pop everything that might be clobbered by a native
 556   // runtime call.
 557   // Only save the lower 64 bits of each vector register.
 558   // Additional registers can be excluded in a passed RegSet.
 559   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 560   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 561 
 562   void push_call_clobbered_registers(bool save_fpu = true) {
 563     push_call_clobbered_registers_except(RegSet(), save_fpu);
 564   }
 565   void pop_call_clobbered_registers(bool restore_fpu = true) {
 566     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 567   }
 568 
 569   // allocation









 570   void tlab_allocate(
 571     Register thread,                   // Current thread
 572     Register obj,                      // result: pointer to object after successful allocation
 573     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 574     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 575     Register t1,                       // temp register
 576     Register t2,                       // temp register
 577     Label&   slow_case                 // continuation point if fast allocation fails
 578   );
 579   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 580 



 581   // interface method calling
 582   void lookup_interface_method(Register recv_klass,
 583                                Register intf_klass,
 584                                RegisterOrConstant itable_index,
 585                                Register method_result,
 586                                Register scan_temp,
 587                                Label& no_such_interface,
 588                                bool return_method = true);
 589 
 590   // virtual method calling
 591   void lookup_virtual_method(Register recv_klass,
 592                              RegisterOrConstant vtable_index,
 593                              Register method_result);
 594 
 595   // Test sub_klass against super_klass, with fast and slow paths.
 596 
 597   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 598   // One of the three labels can be NULL, meaning take the fall-through.
 599   // If super_check_offset is -1, the value is loaded up from super_klass.
 600   // No registers are killed, except temp_reg.

 711   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 712   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 713   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 714   // here in MacroAssembler. The major exception to this rule is call
 715 
 716   // Arithmetics
 717 
 718 
 719   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 720   void addptr(Address dst, Register src);
 721 
 722   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 723   void addptr(Register dst, int32_t src);
 724   void addptr(Register dst, Register src);
 725   void addptr(Register dst, RegisterOrConstant src) {
 726     if (src.is_constant()) addptr(dst, src.as_constant());
 727     else                   addptr(dst, src.as_register());
 728   }
 729 
 730   void andptr(Register dst, int32_t src);
 731   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }

 732 
 733   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 734 
 735   // renamed to drag out the casting of address to int32_t/intptr_t
 736   void cmp32(Register src1, int32_t imm);
 737 
 738   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 739   // compare reg - mem, or reg - &mem
 740   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 741 
 742   void cmp32(Register src1, Address src2);
 743 
 744 #ifndef _LP64
 745   void cmpklass(Address dst, Metadata* obj);
 746   void cmpklass(Register dst, Metadata* obj);
 747   void cmpoop(Address dst, jobject obj);
 748 #endif // _LP64
 749 
 750   void cmpoop(Register src1, Register src2);
 751   void cmpoop(Register src1, Address src2);

1792   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1793 
1794   using Assembler::movq;
1795   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1796 
1797   // Can push value or effective address
1798   void pushptr(AddressLiteral src, Register rscratch);
1799 
1800   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1801   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1802 
1803   void pushoop(jobject obj, Register rscratch);
1804   void pushklass(Metadata* obj, Register rscratch);
1805 
1806   // sign extend as need a l to ptr sized element
1807   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1808   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1809 
1810 
1811  public:















1812   // clear memory of size 'cnt' qwords, starting at 'base';
1813   // if 'is_large' is set, do not try to produce short loop
1814   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1815 
1816   // clear memory initialization sequence for constant size;
1817   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1818 
1819   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1820   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1821 
1822   // Fill primitive arrays
1823   void generate_fill(BasicType t, bool aligned,
1824                      Register to, Register value, Register count,
1825                      Register rtmp, XMMRegister xtmp);
1826 
1827   void encode_iso_array(Register src, Register dst, Register len,
1828                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1829                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1830 
1831 #ifdef _LP64
1832   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1833   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1834                              Register y, Register y_idx, Register z,

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"
  34 #include "runtime/signature.hpp"
  35 #include "runtime/vm_version.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );

  88  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  89  // The implementation is only non-empty for the InterpreterMacroAssembler,
  90  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  91  virtual void check_and_handle_popframe(Register java_thread);
  92  virtual void check_and_handle_earlyret(Register java_thread);
  93 
  94   Address as_Address(AddressLiteral adr);
  95   Address as_Address(ArrayAddress adr, Register rscratch);
  96 
  97   // Support for NULL-checks
  98   //
  99   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 100   // If the accessed location is M[reg + offset] and the offset is known, provide the
 101   // offset. No explicit code generation is needed if the offset is within a certain
 102   // range (0 <= offset <= page_size).
 103 
 104   void null_check(Register reg, int offset = -1);
 105   static bool needs_explicit_null_check(intptr_t offset);
 106   static bool uses_implicit_null_check(void* address);
 107 
 108   // markWord tests, kills markWord reg
 109   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 110 
 111   // inlineKlass queries, kills temp_reg
 112   void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
 113   void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
 114   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
 115 
 116   // Get the default value oop for the given InlineKlass
 117   void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
 118   // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
 119   // get_default_value_oop with extra assertion for empty inline klass
 120   void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
 121 
 122   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 123   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 124   void test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined);
 125 
 126   // Check oops for special arrays, i.e. flattened and/or null-free
 127   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 128   void test_flattened_array_oop(Register oop, Register temp_reg, Label&is_flattened_array);
 129   void test_non_flattened_array_oop(Register oop, Register temp_reg, Label&is_non_flattened_array);
 130   void test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array);
 131   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
 132 
 133   // Check array klass layout helper for flatten or null-free arrays...
 134   void test_flattened_array_layout(Register lh, Label& is_flattened_array);
 135   void test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array);
 136   void test_null_free_array_layout(Register lh, Label& is_null_free_array);
 137   void test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array);
 138 
 139   // Required platform-specific helpers for Label::patch_instructions.
 140   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 141   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 142     unsigned char op = branch[0];
 143     assert(op == 0xE8 /* call */ ||
 144         op == 0xE9 /* jmp */ ||
 145         op == 0xEB /* short jmp */ ||
 146         (op & 0xF0) == 0x70 /* short jcc */ ||
 147         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 148         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 149         "Invalid opcode at patch point");
 150 
 151     if (op == 0xEB || (op & 0xF0) == 0x70) {
 152       // short offset operators (jmp and jcc)
 153       char* disp = (char*) &branch[1];
 154       int imm8 = target - (address) &disp[1];
 155       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 156                 file == NULL ? "<NULL>" : file, line);
 157       *disp = imm8;
 158     } else {

 364   void resolve_jobject(Register value, Register thread, Register tmp);
 365 
 366   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 367   void c2bool(Register x);
 368 
 369   // C++ bool manipulation
 370 
 371   void movbool(Register dst, Address src);
 372   void movbool(Address dst, bool boolconst);
 373   void movbool(Address dst, Register src);
 374   void testbool(Register dst);
 375 
 376   void resolve_oop_handle(Register result, Register tmp);
 377   void resolve_weak_handle(Register result, Register tmp);
 378   void load_mirror(Register mirror, Register method, Register tmp);
 379   void load_method_holder_cld(Register rresult, Register rmethod);
 380 
 381   void load_method_holder(Register holder, Register method);
 382 
 383   // oop manipulations
 384   void load_metadata(Register dst, Register src);
 385   void load_klass(Register dst, Register src, Register tmp);
 386   void store_klass(Register dst, Register src, Register tmp);
 387 
 388   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 389                       Register tmp1, Register thread_tmp);
 390   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 391                        Register tmp1, Register tmp2, Register tmp3);
 392 
 393   void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
 394 
 395   // inline type data payload offsets...
 396   void first_field_offset(Register inline_klass, Register offset);
 397   void data_for_oop(Register oop, Register data, Register inline_klass);
 398   // get data payload ptr a flat value array at index, kills rcx and index
 399   void data_for_value_array_index(Register array, Register array_klass,
 400                                   Register index, Register data);
 401 
 402 
 403   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 404                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 405   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 406                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 407   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 408                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 409 
 410   // Used for storing NULL. All other oop constants should be
 411   // stored using routines that take a jobject.
 412   void store_heap_oop_null(Address dst);
 413 
 414   void load_prototype_header(Register dst, Register src, Register tmp);
 415 
 416 #ifdef _LP64
 417   void store_klass_gap(Register dst, Register src);
 418 
 419   // This dummy is to prevent a call to store_heap_oop from
 420   // converting a zero (like NULL) into a Register by giving
 421   // the compiler two choices it can't resolve
 422 
 423   void store_heap_oop(Address dst, void* dummy);
 424 
 425   void encode_heap_oop(Register r);
 426   void decode_heap_oop(Register r);
 427   void encode_heap_oop_not_null(Register r);
 428   void decode_heap_oop_not_null(Register r);
 429   void encode_heap_oop_not_null(Register dst, Register src);
 430   void decode_heap_oop_not_null(Register dst, Register src);
 431 
 432   void set_narrow_oop(Register dst, jobject obj);
 433   void set_narrow_oop(Address dst, jobject obj);
 434   void cmp_narrow_oop(Register dst, jobject obj);
 435   void cmp_narrow_oop(Address dst, jobject obj);

 597 
 598 public:
 599   void push_set(RegSet set, int offset = -1);
 600   void pop_set(RegSet set, int offset = -1);
 601 
 602   // Push and pop everything that might be clobbered by a native
 603   // runtime call.
 604   // Only save the lower 64 bits of each vector register.
 605   // Additional registers can be excluded in a passed RegSet.
 606   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 607   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 608 
 609   void push_call_clobbered_registers(bool save_fpu = true) {
 610     push_call_clobbered_registers_except(RegSet(), save_fpu);
 611   }
 612   void pop_call_clobbered_registers(bool restore_fpu = true) {
 613     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 614   }
 615 
 616   // allocation
 617 
 618   // Object / value buffer allocation...
 619   // Allocate instance of klass, assumes klass initialized by caller
 620   // new_obj prefers to be rax
 621   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 622   void allocate_instance(Register klass, Register new_obj,
 623                          Register t1, Register t2,
 624                          bool clear_fields, Label& alloc_failed);
 625 
 626   void tlab_allocate(
 627     Register thread,                   // Current thread
 628     Register obj,                      // result: pointer to object after successful allocation
 629     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 630     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 631     Register t1,                       // temp register
 632     Register t2,                       // temp register
 633     Label&   slow_case                 // continuation point if fast allocation fails
 634   );
 635   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 636 
 637   // For field "index" within "klass", return inline_klass ...
 638   void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
 639 
 640   // interface method calling
 641   void lookup_interface_method(Register recv_klass,
 642                                Register intf_klass,
 643                                RegisterOrConstant itable_index,
 644                                Register method_result,
 645                                Register scan_temp,
 646                                Label& no_such_interface,
 647                                bool return_method = true);
 648 
 649   // virtual method calling
 650   void lookup_virtual_method(Register recv_klass,
 651                              RegisterOrConstant vtable_index,
 652                              Register method_result);
 653 
 654   // Test sub_klass against super_klass, with fast and slow paths.
 655 
 656   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 657   // One of the three labels can be NULL, meaning take the fall-through.
 658   // If super_check_offset is -1, the value is loaded up from super_klass.
 659   // No registers are killed, except temp_reg.

 770   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 771   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 772   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 773   // here in MacroAssembler. The major exception to this rule is call
 774 
 775   // Arithmetics
 776 
 777 
 778   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 779   void addptr(Address dst, Register src);
 780 
 781   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 782   void addptr(Register dst, int32_t src);
 783   void addptr(Register dst, Register src);
 784   void addptr(Register dst, RegisterOrConstant src) {
 785     if (src.is_constant()) addptr(dst, src.as_constant());
 786     else                   addptr(dst, src.as_register());
 787   }
 788 
 789   void andptr(Register dst, int32_t src);
 790   void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 791   void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 792 
 793   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 794 
 795   // renamed to drag out the casting of address to int32_t/intptr_t
 796   void cmp32(Register src1, int32_t imm);
 797 
 798   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 799   // compare reg - mem, or reg - &mem
 800   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 801 
 802   void cmp32(Register src1, Address src2);
 803 
 804 #ifndef _LP64
 805   void cmpklass(Address dst, Metadata* obj);
 806   void cmpklass(Register dst, Metadata* obj);
 807   void cmpoop(Address dst, jobject obj);
 808 #endif // _LP64
 809 
 810   void cmpoop(Register src1, Register src2);
 811   void cmpoop(Register src1, Address src2);

1852   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1853 
1854   using Assembler::movq;
1855   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1856 
1857   // Can push value or effective address
1858   void pushptr(AddressLiteral src, Register rscratch);
1859 
1860   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1861   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1862 
1863   void pushoop(jobject obj, Register rscratch);
1864   void pushklass(Metadata* obj, Register rscratch);
1865 
1866   // sign extend as need a l to ptr sized element
1867   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1868   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1869 
1870 
1871  public:
1872   // Inline type specific methods
1873   #include "asm/macroAssembler_common.hpp"
1874 
1875   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1876   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1877   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1878                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1879                             RegState reg_state[]);
1880   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1881                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1882                           RegState reg_state[], Register val_array);
1883   int extend_stack_for_inline_args(int args_on_stack);
1884   void remove_frame(int initial_framesize, bool needs_stack_repair);
1885   VMReg spill_reg_for(VMReg reg);
1886 
1887   // clear memory of size 'cnt' qwords, starting at 'base';
1888   // if 'is_large' is set, do not try to produce short loop
1889   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1890 
1891   // clear memory initialization sequence for constant size;
1892   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1893 
1894   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1895   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1896 
1897   // Fill primitive arrays
1898   void generate_fill(BasicType t, bool aligned,
1899                      Register to, Register value, Register count,
1900                      Register rtmp, XMMRegister xtmp);
1901 
1902   void encode_iso_array(Register src, Register dst, Register len,
1903                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1904                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1905 
1906 #ifdef _LP64
1907   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1908   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1909                              Register y, Register y_idx, Register z,
< prev index next >