< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp

Print this page

  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.inline.hpp"

  30 #include "metaprogramming/enableIf.hpp"
  31 #include "oops/compressedOops.hpp"
  32 #include "runtime/vm_version.hpp"
  33 #include "utilities/powerOfTwo.hpp"
  34 


  35 // MacroAssembler extends Assembler by frequently used macros.
  36 //
  37 // Instructions for which a 'better' code sequence exists depending
  38 // on arguments should also go in here.
  39 
  40 class MacroAssembler: public Assembler {
  41   friend class LIR_Assembler;
  42 
  43  public:
  44   using Assembler::mov;
  45   using Assembler::movi;
  46 
  47  protected:
  48 
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54   virtual void call_VM_leaf_base(

  87     KlassDecodeNone,
  88     KlassDecodeZero,
  89     KlassDecodeXor,
  90     KlassDecodeMovk
  91   };
  92 
  93   KlassDecodeMode klass_decode_mode();
  94 
  95  private:
  96   static KlassDecodeMode _klass_decode_mode;
  97 
  98  public:
  99   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 100 
 101  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 102  // The implementation is only non-empty for the InterpreterMacroAssembler,
 103  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 104  virtual void check_and_handle_popframe(Register java_thread);
 105  virtual void check_and_handle_earlyret(Register java_thread);
 106 
 107   void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);

 108 
 109   // Helper functions for statistics gathering.
 110   // Unconditional atomic increment.
 111   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 112   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 113     lea(tmp1, counter_addr);
 114     atomic_incw(tmp1, tmp2, tmp3);
 115   }
 116   // Load Effective Address
 117   void lea(Register r, const Address &a) {
 118     InstructionMark im(this);
 119     code_section()->relocate(inst_mark(), a.rspec());
 120     a.lea(this, r);
 121   }
 122 
 123   /* Sometimes we get misaligned loads and stores, usually from Unsafe
 124      accesses, and these can exceed the offset range. */
 125   Address legitimize_address(const Address &a, int size, Register scratch) {
 126     if (a.getMode() == Address::base_plus_offset) {
 127       if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {

 686 
 687   // Alignment
 688   void align(int modulus);
 689 
 690   // Stack frame creation/removal
 691   void enter(bool strip_ret_addr = false);
 692   void leave();
 693 
 694   // ROP Protection
 695   void protect_return_address();
 696   void protect_return_address(Register return_reg, Register temp_reg);
 697   void authenticate_return_address(Register return_reg = lr);
 698   void authenticate_return_address(Register return_reg, Register temp_reg);
 699   void strip_return_address();
 700   void check_return_address(Register return_reg=lr) PRODUCT_RETURN;
 701 
 702   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 703   // The pointer will be loaded into the thread register.
 704   void get_thread(Register thread);
 705 














 706 
 707   // Support for VM calls
 708   //
 709   // It is imperative that all calls into the VM are handled via the call_VM macros.
 710   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 711   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 712 
 713 
 714   void call_VM(Register oop_result,
 715                address entry_point,
 716                bool check_exceptions = true);
 717   void call_VM(Register oop_result,
 718                address entry_point,
 719                Register arg_1,
 720                bool check_exceptions = true);
 721   void call_VM(Register oop_result,
 722                address entry_point,
 723                Register arg_1, Register arg_2,
 724                bool check_exceptions = true);
 725   void call_VM(Register oop_result,

 938                                      Register temp_reg,
 939                                      Register temp2_reg,
 940                                      Label* L_success,
 941                                      Label* L_failure,
 942                                      bool set_cond_codes = false);
 943 
 944   // Simplified, combined version, good for typical uses.
 945   // Falls through on failure.
 946   void check_klass_subtype(Register sub_klass,
 947                            Register super_klass,
 948                            Register temp_reg,
 949                            Label& L_success);
 950 
 951   void clinit_barrier(Register klass,
 952                       Register thread,
 953                       Label* L_fast_path = NULL,
 954                       Label* L_slow_path = NULL);
 955 
 956   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 957 
 958   void verify_sve_vector_length();
 959   void reinitialize_ptrue() {
 960     if (UseSVE > 0) {
 961       sve_ptrue(ptrue, B);
 962     }
 963   }
 964   void verify_ptrue();
 965 
 966   // Debugging
 967 
 968   // only if +VerifyOops
 969   void _verify_oop(Register reg, const char* s, const char* file, int line);
 970   void _verify_oop_addr(Address addr, const char * s, const char* file, int line);
 971 
 972   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 973     if (VerifyOops) {
 974       _verify_oop(reg, s, file, line);
 975     }
 976   }
 977   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 978     if (VerifyOops) {

  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
  28 
  29 #include "asm/assembler.inline.hpp"
  30 #include "code/vmreg.hpp"
  31 #include "metaprogramming/enableIf.hpp"
  32 #include "oops/compressedOops.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/powerOfTwo.hpp"
  35 
  36 class OopMap;
  37 
  38 // MacroAssembler extends Assembler by frequently used macros.
  39 //
  40 // Instructions for which a 'better' code sequence exists depending
  41 // on arguments should also go in here.
  42 
  43 class MacroAssembler: public Assembler {
  44   friend class LIR_Assembler;
  45 
  46  public:
  47   using Assembler::mov;
  48   using Assembler::movi;
  49 
  50  protected:
  51 
  52   // Support for VM calls
  53   //
  54   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  55   // may customize this version by overriding it for its purposes (e.g., to save/restore
  56   // additional registers when doing a VM call).
  57   virtual void call_VM_leaf_base(

  90     KlassDecodeNone,
  91     KlassDecodeZero,
  92     KlassDecodeXor,
  93     KlassDecodeMovk
  94   };
  95 
  96   KlassDecodeMode klass_decode_mode();
  97 
  98  private:
  99   static KlassDecodeMode _klass_decode_mode;
 100 
 101  public:
 102   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 103 
 104  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 105  // The implementation is only non-empty for the InterpreterMacroAssembler,
 106  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
 107  virtual void check_and_handle_popframe(Register java_thread);
 108  virtual void check_and_handle_earlyret(Register java_thread);
 109 
 110   void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
 111   void rt_call(address dest, Register tmp = rscratch1);
 112 
 113   // Helper functions for statistics gathering.
 114   // Unconditional atomic increment.
 115   void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
 116   void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
 117     lea(tmp1, counter_addr);
 118     atomic_incw(tmp1, tmp2, tmp3);
 119   }
 120   // Load Effective Address
 121   void lea(Register r, const Address &a) {
 122     InstructionMark im(this);
 123     code_section()->relocate(inst_mark(), a.rspec());
 124     a.lea(this, r);
 125   }
 126 
 127   /* Sometimes we get misaligned loads and stores, usually from Unsafe
 128      accesses, and these can exceed the offset range. */
 129   Address legitimize_address(const Address &a, int size, Register scratch) {
 130     if (a.getMode() == Address::base_plus_offset) {
 131       if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {

 690 
 691   // Alignment
 692   void align(int modulus);
 693 
 694   // Stack frame creation/removal
 695   void enter(bool strip_ret_addr = false);
 696   void leave();
 697 
 698   // ROP Protection
 699   void protect_return_address();
 700   void protect_return_address(Register return_reg, Register temp_reg);
 701   void authenticate_return_address(Register return_reg = lr);
 702   void authenticate_return_address(Register return_reg, Register temp_reg);
 703   void strip_return_address();
 704   void check_return_address(Register return_reg=lr) PRODUCT_RETURN;
 705 
 706   // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
 707   // The pointer will be loaded into the thread register.
 708   void get_thread(Register thread);
 709 
 710   // support for argument shuffling
 711   void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 712   void float_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 713   void long_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 714   void double_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
 715   void object_move(
 716                    OopMap* map,
 717                    int oop_handle_offset,
 718                    int framesize_in_slots,
 719                    VMRegPair src,
 720                    VMRegPair dst,
 721                    bool is_receiver,
 722                    int* receiver_offset);
 723 
 724 
 725   // Support for VM calls
 726   //
 727   // It is imperative that all calls into the VM are handled via the call_VM macros.
 728   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 729   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 730 
 731 
 732   void call_VM(Register oop_result,
 733                address entry_point,
 734                bool check_exceptions = true);
 735   void call_VM(Register oop_result,
 736                address entry_point,
 737                Register arg_1,
 738                bool check_exceptions = true);
 739   void call_VM(Register oop_result,
 740                address entry_point,
 741                Register arg_1, Register arg_2,
 742                bool check_exceptions = true);
 743   void call_VM(Register oop_result,

 956                                      Register temp_reg,
 957                                      Register temp2_reg,
 958                                      Label* L_success,
 959                                      Label* L_failure,
 960                                      bool set_cond_codes = false);
 961 
 962   // Simplified, combined version, good for typical uses.
 963   // Falls through on failure.
 964   void check_klass_subtype(Register sub_klass,
 965                            Register super_klass,
 966                            Register temp_reg,
 967                            Label& L_success);
 968 
 969   void clinit_barrier(Register klass,
 970                       Register thread,
 971                       Label* L_fast_path = NULL,
 972                       Label* L_slow_path = NULL);
 973 
 974   Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
 975 
 976   void verify_sve_vector_length(Register tmp = rscratch1);
 977   void reinitialize_ptrue() {
 978     if (UseSVE > 0) {
 979       sve_ptrue(ptrue, B);
 980     }
 981   }
 982   void verify_ptrue();
 983 
 984   // Debugging
 985 
 986   // only if +VerifyOops
 987   void _verify_oop(Register reg, const char* s, const char* file, int line);
 988   void _verify_oop_addr(Address addr, const char * s, const char* file, int line);
 989 
 990   void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
 991     if (VerifyOops) {
 992       _verify_oop(reg, s, file, line);
 993     }
 994   }
 995   void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
 996     if (VerifyOops) {
< prev index next >