< prev index next > src/hotspot/cpu/x86/macroAssembler_x86.hpp
Print this page
#include "asm/assembler.hpp"
#include "asm/register.hpp"
#include "code/vmreg.inline.hpp"
#include "compiler/oopMap.hpp"
#include "utilities/macros.hpp"
+ #include "runtime/signature.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/checkedCast.hpp"
+ class ciInlineKlass;
+
// MacroAssembler extends Assembler by frequently used macros.
//
// Instructions for which a 'better' code sequence exists depending
// on arguments should also go in here.
void null_check(Register reg, int offset = -1);
static bool needs_explicit_null_check(intptr_t offset);
static bool uses_implicit_null_check(void* address);
+ // markWord tests, kills markWord reg
+ void test_markword_is_inline_type(Register markword, Label& is_inline_type);
+
+ // inlineKlass queries, kills temp_reg
+ void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
+ void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
+ void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
+
+ // Get the default value oop for the given InlineKlass
+ void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
+ // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
+ // get_default_value_oop with extra assertion for empty inline klass
+ void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
+
+ void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
+ void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
+ void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
+ void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
+
+ // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
+ void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
+ void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
+ void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
+ void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
+ void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
+
+ // Check array klass layout helper for flat or null-free arrays...
+ void test_flat_array_layout(Register lh, Label& is_flat_array);
+ void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
+
// Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
void pd_patch_instruction(address branch, address target, const char* file, int line) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
void load_method_holder_cld(Register rresult, Register rmethod);
void load_method_holder(Register holder, Register method);
// oop manipulations
+ void load_metadata(Register dst, Register src);
void load_klass(Register dst, Register src, Register tmp);
void store_klass(Register dst, Register src, Register tmp);
void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
Register tmp1, Register thread_tmp);
void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
Register tmp1, Register tmp2, Register tmp3);
+ void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass);
+ void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
+ // We probably need the following for arrays: TODO FIXME
+ // void flat_element_copy(DecoratorSet decorators, Register src, Register dst, Register array);
+
+ // inline type data payload offsets...
+ void first_field_offset(Register inline_klass, Register offset);
+ void data_for_oop(Register oop, Register data, Register inline_klass);
+ // get data payload ptr a flat value array at index, kills rcx and index
+ void data_for_value_array_index(Register array, Register array_klass,
+ Register index, Register data);
+
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
Register thread_tmp = noreg, DecoratorSet decorators = 0);
void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
// Used for storing null. All other oop constants should be
// stored using routines that take a jobject.
void store_heap_oop_null(Address dst);
+ void load_prototype_header(Register dst, Register src, Register tmp);
+
#ifdef _LP64
void store_klass_gap(Register dst, Register src);
// This dummy is to prevent a call to store_heap_oop from
// converting a zero (like null) into a Register by giving
void pop_call_clobbered_registers(bool restore_fpu = true) {
pop_call_clobbered_registers_except(RegSet(), restore_fpu);
}
// allocation
+
+ // Object / value buffer allocation...
+ // Allocate instance of klass, assumes klass initialized by caller
+ // new_obj prefers to be rax
+ // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
+ void allocate_instance(Register klass, Register new_obj,
+ Register t1, Register t2,
+ bool clear_fields, Label& alloc_failed);
+
void tlab_allocate(
Register thread, // Current thread
Register obj, // result: pointer to object after successful allocation
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
int con_size_in_bytes, // object size in bytes if known at compile time
Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
);
void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
+ // For field "index" within "klass", return inline_klass ...
+ void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
+
+ void inline_layout_info(Register klass, Register index, Register layout_info);
+
void population_count(Register dst, Register src, Register scratch1, Register scratch2);
// interface method calling
void lookup_interface_method(Register recv_klass,
Register intf_klass,
if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
else addptr(dst, src.as_register());
}
void andptr(Register dst, int32_t src);
! void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
#ifdef _LP64
using Assembler::andq;
void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
#endif
if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
else addptr(dst, src.as_register());
}
void andptr(Register dst, int32_t src);
! void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
+ void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
#ifdef _LP64
using Assembler::andq;
void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
#endif
void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
public:
// clear memory of size 'cnt' qwords, starting at 'base';
// if 'is_large' is set, do not try to produce short loop
! void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
// clear memory initialization sequence for constant size;
void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
// clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
public:
+ // Inline type specific methods
+ #include "asm/macroAssembler_common.hpp"
+
+ int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
+ bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
+ bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
+ VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
+ RegState reg_state[]);
+ bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
+ VMRegPair* from, int from_count, int& from_index, VMReg to,
+ RegState reg_state[], Register val_array);
+ int extend_stack_for_inline_args(int args_on_stack);
+ void remove_frame(int initial_framesize, bool needs_stack_repair);
+ VMReg spill_reg_for(VMReg reg);
+
// clear memory of size 'cnt' qwords, starting at 'base';
// if 'is_large' is set, do not try to produce short loop
! void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
// clear memory initialization sequence for constant size;
void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
// clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
< prev index next >