< prev index next > src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
Print this page
#ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP
#include "asm/assembler.inline.hpp"
+ #include "code/vmreg.hpp"
#include "metaprogramming/enableIf.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/powerOfTwo.hpp"
+ class OopMap;
+
// MacroAssembler extends Assembler by frequently used macros.
//
// Instructions for which a 'better' code sequence exists depending
// on arguments should also go in here.
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
! void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
// Helper functions for statistics gathering.
// Unconditional atomic increment.
void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
// The implementation is only non-empty for the InterpreterMacroAssembler,
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
virtual void check_and_handle_popframe(Register java_thread);
virtual void check_and_handle_earlyret(Register java_thread);
! void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
+ void rt_call(address dest, Register tmp = rscratch1);
// Helper functions for statistics gathering.
// Unconditional atomic increment.
void atomic_incw(Register counter_addr, Register tmp, Register tmp2);
void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) {
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
// The pointer will be loaded into the thread register.
void get_thread(Register thread);
+ // support for argument shuffling
+ void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
+ void float_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
+ void long_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
+ void double_move(VMRegPair src, VMRegPair dst, Register tmp = rscratch1);
+ void object_move(
+ OopMap* map,
+ int oop_handle_offset,
+ int framesize_in_slots,
+ VMRegPair src,
+ VMRegPair dst,
+ bool is_receiver,
+ int* receiver_offset);
+
// Support for VM calls
//
// It is imperative that all calls into the VM are handled via the call_VM macros.
// They make sure that the stack linkage is setup correctly. call_VM's correspond
Label* L_fast_path = NULL,
Label* L_slow_path = NULL);
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
! void verify_sve_vector_length();
void reinitialize_ptrue() {
if (UseSVE > 0) {
sve_ptrue(ptrue, B);
}
}
Label* L_fast_path = NULL,
Label* L_slow_path = NULL);
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
! void verify_sve_vector_length(Register tmp = rscratch1);
void reinitialize_ptrue() {
if (UseSVE > 0) {
sve_ptrue(ptrue, B);
}
}
< prev index next >