< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"

  34 #include "runtime/vm_version.hpp"
  35 


  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );

  85  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  86  // The implementation is only non-empty for the InterpreterMacroAssembler,
  87  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  88  virtual void check_and_handle_popframe(Register java_thread);
  89  virtual void check_and_handle_earlyret(Register java_thread);
  90 
  91   Address as_Address(AddressLiteral adr);
  92   Address as_Address(ArrayAddress adr);
  93 
  94   // Support for NULL-checks
  95   //
  96   // Generates code that causes a NULL OS exception if the content of reg is NULL.
  97   // If the accessed location is M[reg + offset] and the offset is known, provide the
  98   // offset. No explicit code generation is needed if the offset is within a certain
  99   // range (0 <= offset <= page_size).
 100 
 101   void null_check(Register reg, int offset = -1);
 102   static bool needs_explicit_null_check(intptr_t offset);
 103   static bool uses_implicit_null_check(void* address);
 104 































 105   // Required platform-specific helpers for Label::patch_instructions.
 106   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 107   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 108     unsigned char op = branch[0];
 109     assert(op == 0xE8 /* call */ ||
 110         op == 0xE9 /* jmp */ ||
 111         op == 0xEB /* short jmp */ ||
 112         (op & 0xF0) == 0x70 /* short jcc */ ||
 113         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 114         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 115         "Invalid opcode at patch point");
 116 
 117     if (op == 0xEB || (op & 0xF0) == 0x70) {
 118       // short offset operators (jmp and jcc)
 119       char* disp = (char*) &branch[1];
 120       int imm8 = target - (address) &disp[1];
 121       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 122                 file == NULL ? "<NULL>" : file, line);
 123       *disp = imm8;
 124     } else {

 323   void resolve_jobject(Register value, Register thread, Register tmp);
 324 
 325   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 326   void c2bool(Register x);
 327 
 328   // C++ bool manipulation
 329 
 330   void movbool(Register dst, Address src);
 331   void movbool(Address dst, bool boolconst);
 332   void movbool(Address dst, Register src);
 333   void testbool(Register dst);
 334 
 335   void resolve_oop_handle(Register result, Register tmp = rscratch2);
 336   void resolve_weak_handle(Register result, Register tmp);
 337   void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
 338   void load_method_holder_cld(Register rresult, Register rmethod);
 339 
 340   void load_method_holder(Register holder, Register method);
 341 
 342   // oop manipulations

 343   void load_klass(Register dst, Register src, Register tmp);
 344   void store_klass(Register dst, Register src, Register tmp);
 345 
 346   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 347                       Register tmp1, Register thread_tmp);
 348   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 349                        Register tmp1, Register tmp2, Register tmp3);
 350 










 351   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 352                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 353   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 354                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 355   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 356                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 357 
 358   // Used for storing NULL. All other oop constants should be
 359   // stored using routines that take a jobject.
 360   void store_heap_oop_null(Address dst);
 361 


 362 #ifdef _LP64
 363   void store_klass_gap(Register dst, Register src);
 364 
 365   // This dummy is to prevent a call to store_heap_oop from
 366   // converting a zero (like NULL) into a Register by giving
 367   // the compiler two choices it can't resolve
 368 
 369   void store_heap_oop(Address dst, void* dummy);
 370 
 371   void encode_heap_oop(Register r);
 372   void decode_heap_oop(Register r);
 373   void encode_heap_oop_not_null(Register r);
 374   void decode_heap_oop_not_null(Register r);
 375   void encode_heap_oop_not_null(Register dst, Register src);
 376   void decode_heap_oop_not_null(Register dst, Register src);
 377 
 378   void set_narrow_oop(Register dst, jobject obj);
 379   void set_narrow_oop(Address dst, jobject obj);
 380   void cmp_narrow_oop(Register dst, jobject obj);
 381   void cmp_narrow_oop(Address dst, jobject obj);

 535 
 536 public:
 537   void push_set(RegSet set, int offset = -1);
 538   void pop_set(RegSet set, int offset = -1);
 539 
 540   // Push and pop everything that might be clobbered by a native
 541   // runtime call.
 542   // Only save the lower 64 bits of each vector register.
 543   // Additonal registers can be excluded in a passed RegSet.
 544   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 545   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 546 
 547   void push_call_clobbered_registers(bool save_fpu = true) {
 548     push_call_clobbered_registers_except(RegSet(), save_fpu);
 549   }
 550   void pop_call_clobbered_registers(bool restore_fpu = true) {
 551     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 552   }
 553 
 554   // allocation









 555   void eden_allocate(
 556     Register thread,                   // Current thread
 557     Register obj,                      // result: pointer to object after successful allocation
 558     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 559     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 560     Register t1,                       // temp register
 561     Label&   slow_case                 // continuation point if fast allocation fails
 562   );
 563   void tlab_allocate(
 564     Register thread,                   // Current thread
 565     Register obj,                      // result: pointer to object after successful allocation
 566     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 567     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 568     Register t1,                       // temp register
 569     Register t2,                       // temp register
 570     Label&   slow_case                 // continuation point if fast allocation fails
 571   );
 572   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 573 



 574   // interface method calling
 575   void lookup_interface_method(Register recv_klass,
 576                                Register intf_klass,
 577                                RegisterOrConstant itable_index,
 578                                Register method_result,
 579                                Register scan_temp,
 580                                Label& no_such_interface,
 581                                bool return_method = true);
 582 
 583   // virtual method calling
 584   void lookup_virtual_method(Register recv_klass,
 585                              RegisterOrConstant vtable_index,
 586                              Register method_result);
 587 
 588   // Test sub_klass against super_klass, with fast and slow paths.
 589 
 590   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 591   // One of the three labels can be NULL, meaning take the fall-through.
 592   // If super_check_offset is -1, the value is loaded up from super_klass.
 593   // No registers are killed, except temp_reg.

 704   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 705   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 706   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 707   // here in MacroAssembler. The major exception to this rule is call
 708 
 709   // Arithmetics
 710 
 711 
 712   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 713   void addptr(Address dst, Register src);
 714 
 715   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 716   void addptr(Register dst, int32_t src);
 717   void addptr(Register dst, Register src);
 718   void addptr(Register dst, RegisterOrConstant src) {
 719     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 720     else                   addptr(dst,       src.as_register());
 721   }
 722 
 723   void andptr(Register dst, int32_t src);
 724   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }

 725 
 726   void cmp8(AddressLiteral src1, int imm);
 727 
 728   // renamed to drag out the casting of address to int32_t/intptr_t
 729   void cmp32(Register src1, int32_t imm);
 730 
 731   void cmp32(AddressLiteral src1, int32_t imm);
 732   // compare reg - mem, or reg - &mem
 733   void cmp32(Register src1, AddressLiteral src2);
 734 
 735   void cmp32(Register src1, Address src2);
 736 
 737 #ifndef _LP64
 738   void cmpklass(Address dst, Metadata* obj);
 739   void cmpklass(Register dst, Metadata* obj);
 740   void cmpoop(Address dst, jobject obj);
 741 #endif // _LP64
 742 
 743   void cmpoop(Register src1, Register src2);
 744   void cmpoop(Register src1, Address src2);

1826   using Assembler::movq;
1827   void movdl(XMMRegister dst, AddressLiteral src);
1828   void movq(XMMRegister dst, AddressLiteral src);
1829 
1830   // Can push value or effective address
1831   void pushptr(AddressLiteral src);
1832 
1833   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1834   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1835 
1836   void pushoop(jobject obj);
1837   void pushklass(Metadata* obj);
1838 
1839   // sign extend as need a l to ptr sized element
1840   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1841   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1842 
1843 
1844  public:
1845   // C2 compiled method's prolog code.
1846   void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub);















1847 
1848   // clear memory of size 'cnt' qwords, starting at 'base';
1849   // if 'is_large' is set, do not try to produce short loop
1850   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1851 
1852   // clear memory initialization sequence for constant size;
1853   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1854 
1855   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1856   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1857 
1858   // Fill primitive arrays
1859   void generate_fill(BasicType t, bool aligned,
1860                      Register to, Register value, Register count,
1861                      Register rtmp, XMMRegister xtmp);
1862 
1863   void encode_iso_array(Register src, Register dst, Register len,
1864                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1865                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1866 
1867 #ifdef _LP64
1868   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1869   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1870                              Register y, Register y_idx, Register z,

  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/rtmLocking.hpp"
  34 #include "runtime/signature.hpp"
  35 #include "runtime/vm_version.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );

  88  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  89  // The implementation is only non-empty for the InterpreterMacroAssembler,
  90  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  91  virtual void check_and_handle_popframe(Register java_thread);
  92  virtual void check_and_handle_earlyret(Register java_thread);
  93 
  94   Address as_Address(AddressLiteral adr);
  95   Address as_Address(ArrayAddress adr);
  96 
  97   // Support for NULL-checks
  98   //
  99   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 100   // If the accessed location is M[reg + offset] and the offset is known, provide the
 101   // offset. No explicit code generation is needed if the offset is within a certain
 102   // range (0 <= offset <= page_size).
 103 
 104   void null_check(Register reg, int offset = -1);
 105   static bool needs_explicit_null_check(intptr_t offset);
 106   static bool uses_implicit_null_check(void* address);
 107 
 108   // markWord tests, kills markWord reg
 109   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 110 
 111   // inlineKlass queries, kills temp_reg
 112   void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
 113   void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
 114   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
 115 
 116   // Get the default value oop for the given InlineKlass
 117   void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
 118   // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
 119   // get_default_value_oop with extra assertion for empty inline klass
 120   void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
 121 
 122   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 123   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 124   void test_field_is_inlined(Register flags, Register temp_reg, Label& is_inlined);
 125 
 126   // Check oops for special arrays, i.e. flattened and/or null-free
 127   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 128   void test_flattened_array_oop(Register oop, Register temp_reg, Label&is_flattened_array);
 129   void test_non_flattened_array_oop(Register oop, Register temp_reg, Label&is_non_flattened_array);
 130   void test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array);
 131   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array);
 132 
 133   // Check array klass layout helper for flatten or null-free arrays...
 134   void test_flattened_array_layout(Register lh, Label& is_flattened_array);
 135   void test_non_flattened_array_layout(Register lh, Label& is_non_flattened_array);
 136   void test_null_free_array_layout(Register lh, Label& is_null_free_array);
 137   void test_non_null_free_array_layout(Register lh, Label& is_non_null_free_array);
 138 
 139   // Required platform-specific helpers for Label::patch_instructions.
 140   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 141   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 142     unsigned char op = branch[0];
 143     assert(op == 0xE8 /* call */ ||
 144         op == 0xE9 /* jmp */ ||
 145         op == 0xEB /* short jmp */ ||
 146         (op & 0xF0) == 0x70 /* short jcc */ ||
 147         op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
 148         op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
 149         "Invalid opcode at patch point");
 150 
 151     if (op == 0xEB || (op & 0xF0) == 0x70) {
 152       // short offset operators (jmp and jcc)
 153       char* disp = (char*) &branch[1];
 154       int imm8 = target - (address) &disp[1];
 155       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 156                 file == NULL ? "<NULL>" : file, line);
 157       *disp = imm8;
 158     } else {

 357   void resolve_jobject(Register value, Register thread, Register tmp);
 358 
 359   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 360   void c2bool(Register x);
 361 
 362   // C++ bool manipulation
 363 
 364   void movbool(Register dst, Address src);
 365   void movbool(Address dst, bool boolconst);
 366   void movbool(Address dst, Register src);
 367   void testbool(Register dst);
 368 
 369   void resolve_oop_handle(Register result, Register tmp = rscratch2);
 370   void resolve_weak_handle(Register result, Register tmp);
 371   void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
 372   void load_method_holder_cld(Register rresult, Register rmethod);
 373 
 374   void load_method_holder(Register holder, Register method);
 375 
 376   // oop manipulations
 377   void load_metadata(Register dst, Register src);
 378   void load_klass(Register dst, Register src, Register tmp);
 379   void store_klass(Register dst, Register src, Register tmp);
 380 
 381   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 382                       Register tmp1, Register thread_tmp);
 383   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
 384                        Register tmp1, Register tmp2, Register tmp3);
 385 
 386   void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
 387 
 388   // inline type data payload offsets...
 389   void first_field_offset(Register inline_klass, Register offset);
 390   void data_for_oop(Register oop, Register data, Register inline_klass);
 391   // get data payload ptr a flat value array at index, kills rcx and index
 392   void data_for_value_array_index(Register array, Register array_klass,
 393                                   Register index, Register data);
 394 
 395 
 396   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 397                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 398   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 399                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 400   void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
 401                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 402 
 403   // Used for storing NULL. All other oop constants should be
 404   // stored using routines that take a jobject.
 405   void store_heap_oop_null(Address dst);
 406 
 407   void load_prototype_header(Register dst, Register src, Register tmp);
 408 
 409 #ifdef _LP64
 410   void store_klass_gap(Register dst, Register src);
 411 
 412   // This dummy is to prevent a call to store_heap_oop from
 413   // converting a zero (like NULL) into a Register by giving
 414   // the compiler two choices it can't resolve
 415 
 416   void store_heap_oop(Address dst, void* dummy);
 417 
 418   void encode_heap_oop(Register r);
 419   void decode_heap_oop(Register r);
 420   void encode_heap_oop_not_null(Register r);
 421   void decode_heap_oop_not_null(Register r);
 422   void encode_heap_oop_not_null(Register dst, Register src);
 423   void decode_heap_oop_not_null(Register dst, Register src);
 424 
 425   void set_narrow_oop(Register dst, jobject obj);
 426   void set_narrow_oop(Address dst, jobject obj);
 427   void cmp_narrow_oop(Register dst, jobject obj);
 428   void cmp_narrow_oop(Address dst, jobject obj);

 582 
 583 public:
 584   void push_set(RegSet set, int offset = -1);
 585   void pop_set(RegSet set, int offset = -1);
 586 
 587   // Push and pop everything that might be clobbered by a native
 588   // runtime call.
 589   // Only save the lower 64 bits of each vector register.
 590   // Additonal registers can be excluded in a passed RegSet.
 591   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 592   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 593 
 594   void push_call_clobbered_registers(bool save_fpu = true) {
 595     push_call_clobbered_registers_except(RegSet(), save_fpu);
 596   }
 597   void pop_call_clobbered_registers(bool restore_fpu = true) {
 598     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 599   }
 600 
 601   // allocation
 602 
 603   // Object / value buffer allocation...
 604   // Allocate instance of klass, assumes klass initialized by caller
 605   // new_obj prefers to be rax
 606   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 607   void allocate_instance(Register klass, Register new_obj,
 608                          Register t1, Register t2,
 609                          bool clear_fields, Label& alloc_failed);
 610 
 611   void eden_allocate(
 612     Register thread,                   // Current thread
 613     Register obj,                      // result: pointer to object after successful allocation
 614     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 615     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 616     Register t1,                       // temp register
 617     Label&   slow_case                 // continuation point if fast allocation fails
 618   );
 619   void tlab_allocate(
 620     Register thread,                   // Current thread
 621     Register obj,                      // result: pointer to object after successful allocation
 622     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 623     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 624     Register t1,                       // temp register
 625     Register t2,                       // temp register
 626     Label&   slow_case                 // continuation point if fast allocation fails
 627   );
 628   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 629 
 630   // For field "index" within "klass", return inline_klass ...
 631   void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
 632 
 633   // interface method calling
 634   void lookup_interface_method(Register recv_klass,
 635                                Register intf_klass,
 636                                RegisterOrConstant itable_index,
 637                                Register method_result,
 638                                Register scan_temp,
 639                                Label& no_such_interface,
 640                                bool return_method = true);
 641 
 642   // virtual method calling
 643   void lookup_virtual_method(Register recv_klass,
 644                              RegisterOrConstant vtable_index,
 645                              Register method_result);
 646 
 647   // Test sub_klass against super_klass, with fast and slow paths.
 648 
 649   // The fast path produces a tri-state answer: yes / no / maybe-slow.
 650   // One of the three labels can be NULL, meaning take the fall-through.
 651   // If super_check_offset is -1, the value is loaded up from super_klass.
 652   // No registers are killed, except temp_reg.

 763   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 764   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 765   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 766   // here in MacroAssembler. The major exception to this rule is call
 767 
 768   // Arithmetics
 769 
 770 
 771   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 772   void addptr(Address dst, Register src);
 773 
 774   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 775   void addptr(Register dst, int32_t src);
 776   void addptr(Register dst, Register src);
 777   void addptr(Register dst, RegisterOrConstant src) {
 778     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 779     else                   addptr(dst,       src.as_register());
 780   }
 781 
 782   void andptr(Register dst, int32_t src);
 783   void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 784   void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 785 
 786   void cmp8(AddressLiteral src1, int imm);
 787 
 788   // renamed to drag out the casting of address to int32_t/intptr_t
 789   void cmp32(Register src1, int32_t imm);
 790 
 791   void cmp32(AddressLiteral src1, int32_t imm);
 792   // compare reg - mem, or reg - &mem
 793   void cmp32(Register src1, AddressLiteral src2);
 794 
 795   void cmp32(Register src1, Address src2);
 796 
 797 #ifndef _LP64
 798   void cmpklass(Address dst, Metadata* obj);
 799   void cmpklass(Register dst, Metadata* obj);
 800   void cmpoop(Address dst, jobject obj);
 801 #endif // _LP64
 802 
 803   void cmpoop(Register src1, Register src2);
 804   void cmpoop(Register src1, Address src2);

1886   using Assembler::movq;
1887   void movdl(XMMRegister dst, AddressLiteral src);
1888   void movq(XMMRegister dst, AddressLiteral src);
1889 
1890   // Can push value or effective address
1891   void pushptr(AddressLiteral src);
1892 
1893   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1894   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1895 
1896   void pushoop(jobject obj);
1897   void pushklass(Metadata* obj);
1898 
1899   // sign extend as need a l to ptr sized element
1900   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1901   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1902 
1903 
1904  public:
1905   // C2 compiled method's prolog code.
1906   void verified_entry(Compile* C, int sp_inc = 0);
1907 
1908   // Inline type specific methods
1909   #include "asm/macroAssembler_common.hpp"
1910 
1911   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1912   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1913   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1914                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1915                             RegState reg_state[]);
1916   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1917                           VMRegPair* from, int from_count, int& from_index, VMReg to,
1918                           RegState reg_state[], Register val_array);
1919   int extend_stack_for_inline_args(int args_on_stack);
1920   void remove_frame(int initial_framesize, bool needs_stack_repair);
1921   VMReg spill_reg_for(VMReg reg);
1922 
1923   // clear memory of size 'cnt' qwords, starting at 'base';
1924   // if 'is_large' is set, do not try to produce short loop
1925   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1926 
1927   // clear memory initialization sequence for constant size;
1928   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1929 
1930   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1931   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1932 
1933   // Fill primitive arrays
1934   void generate_fill(BasicType t, bool aligned,
1935                      Register to, Register value, Register count,
1936                      Register rtmp, XMMRegister xtmp);
1937 
1938   void encode_iso_array(Register src, Register dst, Register len,
1939                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1940                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1941 
1942 #ifdef _LP64
1943   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1944   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1945                              Register y, Register y_idx, Register z,
< prev index next >