< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp

Print this page

  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.inline.hpp"

  30 #include "oops/compressedOops.hpp"
  31 #include "runtime/vm_version.hpp"
  32 #include "utilities/powerOfTwo.hpp"
  33 


  34 // MacroAssembler extends Assembler by frequently used macros.
  35 //
  36 // Instructions for which a 'better' code sequence exists depending
  37 // on arguments should also go in here.
  38 
  39 class MacroAssembler: public Assembler {
  40   friend class LIR_Assembler;
  41 
  42  public:
  43   using Assembler::mov;
  44   using Assembler::movi;
  45 
  46  protected:
  47 
  48   // Support for VM calls
  49   //
  50   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  51   // may customize this version by overriding it for its purposes (e.g., to save/restore
  52   // additional registers when doing a VM call).
  53   virtual void call_VM_leaf_base(

  86     KlassDecodeNone,
  87     KlassDecodeZero,
  88     KlassDecodeXor,
  89     KlassDecodeMovk
  90   };
  91 
  92   KlassDecodeMode klass_decode_mode();
  93 
  94  private:
  95   static KlassDecodeMode _klass_decode_mode;
  96 
  97  public:
  98   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
  99 
 100  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 101  // The implementation is only non-empty for the InterpreterMacroAssembler,
 102  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 103  virtual void check_and_handle_popframe(Register java_thread);
 104  virtual void check_and_handle_earlyret(Register java_thread);
 105 
 106   void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);

 107 
 108   // Helper functions for statistics gathering.
 109   // Unconditional atomic increment.
 110   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 111   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 112     lea(tmp1, counter_addr);
 113     atomic_incw(tmp1, tmp2, tmp3);
 114   }
 115   // Load Effective Address
 116   void lea(Register r, const Address &a) {
 117     InstructionMark im(this);
 118     code_section()->relocate(inst_mark(), a.rspec());
 119     a.lea(this, r);
 120   }
 121 
 122   /* Sometimes we get misaligned loads and stores, usually from Unsafe
 123      accesses, and these can exceed the offset range. */
 124   Address legitimize_address(const Address &a, int size, Register scratch) {
 125     if (a.getMode() == Address::base_plus_offset) {
 126       if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {

 687 
 688   // Alignment
 689   void align(int modulus);
 690 
 691   // Stack frame creation/removal
 692   void enter()
 693   {
 694     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
 695     mov(rfp, sp);
 696   }
 697   void leave()
 698   {
 699     mov(sp, rfp);
 700     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
 701   }
 702 
 703   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 704   // The pointer will be loaded into the thread register.
 705   void get_thread(Register thread);
 706 














 707 
 708   // Support for VM calls
 709   //
 710   // It is imperative that all calls into the VM are handled via the call_VM macros.
 711   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 712   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 713 
 714 
 715   void call_VM(Register oop_result,
 716                address entry_point,
 717                bool check_exceptions = true);
 718   void call_VM(Register oop_result,
 719                address entry_point,
 720                Register arg_1,
 721                bool check_exceptions = true);
 722   void call_VM(Register oop_result,
 723                address entry_point,
 724                Register arg_1, Register arg_2,
 725                bool check_exceptions = true);
 726   void call_VM(Register oop_result,

 935                                      Register temp_reg,
 936                                      Register temp2_reg,
 937                                      Label* L_success,
 938                                      Label* L_failure,
 939                                      bool set_cond_codes = false);
 940 
 941   // Simplified, combined version, good for typical uses.
 942   // Falls through on failure.
 943   void check_klass_subtype(Register sub_klass,
 944                            Register super_klass,
 945                            Register temp_reg,
 946                            Label& L_success);
 947 
 948   void clinit_barrier(Register klass,
 949                       Register thread,
 950                       Label* L_fast_path = NULL,
 951                       Label* L_slow_path = NULL);
 952 
 953   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 954 
 955   void verify_sve_vector_length();
 956   void reinitialize_ptrue() {
 957     if (UseSVE > 0) {
 958       sve_ptrue(ptrue, B);
 959     }
 960   }
 961   void verify_ptrue();
 962 
 963   // Debugging
 964 
 965   // only if +VerifyOops
 966   void verify_oop(Register reg, const char* s = "broken oop");
 967   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 968 
 969 // TODO: verify method and klass metadata (compare against vptr?)
 970   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 971   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 972 
 973 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 974 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 975 

  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.inline.hpp"
  30 #include "code/vmreg.hpp"
  31 #include "oops/compressedOops.hpp"
  32 #include "runtime/vm_version.hpp"
  33 #include "utilities/powerOfTwo.hpp"
  34 
  35 class OopMap;
  36 
  37 // MacroAssembler extends Assembler by frequently used macros.
  38 //
  39 // Instructions for which a 'better' code sequence exists depending
  40 // on arguments should also go in here.
  41 
  42 class MacroAssembler: public Assembler {
  43   friend class LIR_Assembler;
  44 
  45  public:
  46   using Assembler::mov;
  47   using Assembler::movi;
  48 
  49  protected:
  50 
  51   // Support for VM calls
  52   //
  53   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  54   // may customize this version by overriding it for its purposes (e.g., to save/restore
  55   // additional registers when doing a VM call).
  56   virtual void call_VM_leaf_base(

  89     KlassDecodeNone,
  90     KlassDecodeZero,
  91     KlassDecodeXor,
  92     KlassDecodeMovk
  93   };
  94 
  95   KlassDecodeMode klass_decode_mode();
  96 
  97  private:
  98   static KlassDecodeMode _klass_decode_mode;
  99 
 100  public:
 101   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 102 
 103  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 104  // The implementation is only non-empty for the InterpreterMacroAssembler,
 105  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 106  virtual void check_and_handle_popframe(Register java_thread);
 107  virtual void check_and_handle_earlyret(Register java_thread);
 108 
 109   void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
 110   void rt_call(address dest, Register tmp = rscratch1);
 111 
 112   // Helper functions for statistics gathering.
 113   // Unconditional atomic increment.
 114   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 115   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 116     lea(tmp1, counter_addr);
 117     atomic_incw(tmp1, tmp2, tmp3);
 118   }
 119   // Load Effective Address
 120   void lea(Register r, const Address &a) {
 121     InstructionMark im(this);
 122     code_section()->relocate(inst_mark(), a.rspec());
 123     a.lea(this, r);
 124   }
 125 
 126   /* Sometimes we get misaligned loads and stores, usually from Unsafe
 127      accesses, and these can exceed the offset range. */
 128   Address legitimize_address(const Address &a, int size, Register scratch) {
 129     if (a.getMode() == Address::base_plus_offset) {
 130       if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {

 691 
 692   // Alignment
 693   void align(int modulus);
 694 
 695   // Stack frame creation/removal
 696   void enter()
 697   {
 698     stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
 699     mov(rfp, sp);
 700   }
 701   void leave()
 702   {
 703     mov(sp, rfp);
 704     ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
 705   }
 706 
 707   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 708   // The pointer will be loaded into the thread register.
 709   void get_thread(Register thread);
 710 
 711   // support for argument shuffling
 712   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 713   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 714   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 715   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 716   void object_move(
 717                    OopMap* map,
 718                    int oop_handle_offset,
 719                    int framesize_in_slots,
 720                    VMRegPair src,
 721                    VMRegPair dst,
 722                    bool is_receiver,
 723                    int* receiver_offset);
 724 
 725 
 726   // Support for VM calls
 727   //
 728   // It is imperative that all calls into the VM are handled via the call_VM macros.
 729   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 730   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 731 
 732 
 733   void call_VM(Register oop_result,
 734                address entry_point,
 735                bool check_exceptions = true);
 736   void call_VM(Register oop_result,
 737                address entry_point,
 738                Register arg_1,
 739                bool check_exceptions = true);
 740   void call_VM(Register oop_result,
 741                address entry_point,
 742                Register arg_1, Register arg_2,
 743                bool check_exceptions = true);
 744   void call_VM(Register oop_result,

 953                                      Register temp_reg,
 954                                      Register temp2_reg,
 955                                      Label* L_success,
 956                                      Label* L_failure,
 957                                      bool set_cond_codes = false);
 958 
 959   // Simplified, combined version, good for typical uses.
 960   // Falls through on failure.
 961   void check_klass_subtype(Register sub_klass,
 962                            Register super_klass,
 963                            Register temp_reg,
 964                            Label& L_success);
 965 
 966   void clinit_barrier(Register klass,
 967                       Register thread,
 968                       Label* L_fast_path = NULL,
 969                       Label* L_slow_path = NULL);
 970 
 971   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 972 
 973   void verify_sve_vector_length(Register tmp = rscratch1);
 974   void reinitialize_ptrue() {
 975     if (UseSVE > 0) {
 976       sve_ptrue(ptrue, B);
 977     }
 978   }
 979   void verify_ptrue();
 980 
 981   // Debugging
 982 
 983   // only if +VerifyOops
 984   void verify_oop(Register reg, const char* s = "broken oop");
 985   void verify_oop_addr(Address addr, const char * s = "broken oop addr");
 986 
 987 // TODO: verify method and klass metadata (compare against vptr?)
 988   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
 989   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
 990 
 991 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 992 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 993 
< prev index next >