1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/vm_version.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
86 // The implementation is only non-empty for the InterpreterMacroAssembler,
87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
88 virtual void check_and_handle_popframe(Register java_thread);
89 virtual void check_and_handle_earlyret(Register java_thread);
90
91 Address as_Address(AddressLiteral adr);
92 Address as_Address(ArrayAddress adr, Register rscratch);
93
94 // Support for null-checks
95 //
96 // Generates code that causes a null OS exception if the content of reg is null.
97 // If the accessed location is M[reg + offset] and the offset is known, provide the
98 // offset. No explicit code generation is needed if the offset is within a certain
99 // range (0 <= offset <= page_size).
100
101 void null_check(Register reg, int offset = -1);
102 static bool needs_explicit_null_check(intptr_t offset);
103 static bool uses_implicit_null_check(void* address);
104
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target, const char* file, int line) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
114 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = checked_cast<int>(target - (address) &disp[1]);
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
122 file == nullptr ? "<null>" : file, line);
123 *disp = (char)imm8;
124 } else {
346 void resolve_global_jobject(Register value, Register thread, Register tmp);
347
348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
349 void c2bool(Register x);
350
351 // C++ bool manipulation
352
353 void movbool(Register dst, Address src);
354 void movbool(Address dst, bool boolconst);
355 void movbool(Address dst, Register src);
356 void testbool(Register dst);
357
358 void resolve_oop_handle(Register result, Register tmp);
359 void resolve_weak_handle(Register result, Register tmp);
360 void load_mirror(Register mirror, Register method, Register tmp);
361 void load_method_holder_cld(Register rresult, Register rmethod);
362
363 void load_method_holder(Register holder, Register method);
364
365 // oop manipulations
366 void load_klass(Register dst, Register src, Register tmp);
367 void store_klass(Register dst, Register src, Register tmp);
368
369 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
370 Register tmp1, Register thread_tmp);
371 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
372 Register tmp1, Register tmp2, Register tmp3);
373
374 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
375 Register thread_tmp = noreg, DecoratorSet decorators = 0);
376 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
377 Register thread_tmp = noreg, DecoratorSet decorators = 0);
378 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
379 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
380
381 // Used for storing null. All other oop constants should be
382 // stored using routines that take a jobject.
383 void store_heap_oop_null(Address dst);
384
385 #ifdef _LP64
386 void store_klass_gap(Register dst, Register src);
387
388 // This dummy is to prevent a call to store_heap_oop from
389 // converting a zero (like null) into a Register by giving
390 // the compiler two choices it can't resolve
391
392 void store_heap_oop(Address dst, void* dummy);
393
394 void encode_heap_oop(Register r);
395 void decode_heap_oop(Register r);
396 void encode_heap_oop_not_null(Register r);
397 void decode_heap_oop_not_null(Register r);
398 void encode_heap_oop_not_null(Register dst, Register src);
399 void decode_heap_oop_not_null(Register dst, Register src);
400
401 void set_narrow_oop(Register dst, jobject obj);
402 void set_narrow_oop(Address dst, jobject obj);
403 void cmp_narrow_oop(Register dst, jobject obj);
404 void cmp_narrow_oop(Address dst, jobject obj);
566
567 public:
568 void push_set(RegSet set, int offset = -1);
569 void pop_set(RegSet set, int offset = -1);
570
571 // Push and pop everything that might be clobbered by a native
572 // runtime call.
573 // Only save the lower 64 bits of each vector register.
574 // Additional registers can be excluded in a passed RegSet.
575 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
576 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
577
578 void push_call_clobbered_registers(bool save_fpu = true) {
579 push_call_clobbered_registers_except(RegSet(), save_fpu);
580 }
581 void pop_call_clobbered_registers(bool restore_fpu = true) {
582 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
583 }
584
585 // allocation
586 void tlab_allocate(
587 Register thread, // Current thread
588 Register obj, // result: pointer to object after successful allocation
589 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
590 int con_size_in_bytes, // object size in bytes if known at compile time
591 Register t1, // temp register
592 Register t2, // temp register
593 Label& slow_case // continuation point if fast allocation fails
594 );
595 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
596
597 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
598
599 // interface method calling
600 void lookup_interface_method(Register recv_klass,
601 Register intf_klass,
602 RegisterOrConstant itable_index,
603 Register method_result,
604 Register scan_temp,
605 Label& no_such_interface,
606 bool return_method = true);
607
608 void lookup_interface_method_stub(Register recv_klass,
609 Register holder_klass,
610 Register resolved_klass,
611 Register method_result,
612 Register scan_temp,
613 Register temp_reg2,
614 Register receiver,
615 int itable_index,
616 Label& L_no_such_interface);
827 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
828 // operands. In general the names are modified to avoid hiding the instruction in Assembler
829 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
830 // here in MacroAssembler. The major exception to this rule is call
831
832 // Arithmetics
833
834
835 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
836 void addptr(Address dst, Register src);
837
838 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
839 void addptr(Register dst, int32_t src);
840 void addptr(Register dst, Register src);
841 void addptr(Register dst, RegisterOrConstant src) {
842 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
843 else addptr(dst, src.as_register());
844 }
845
846 void andptr(Register dst, int32_t src);
847 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
848
849 #ifdef _LP64
850 using Assembler::andq;
851 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
852 #endif
853
854 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
855
856 // renamed to drag out the casting of address to int32_t/intptr_t
857 void cmp32(Register src1, int32_t imm);
858
859 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
860 // compare reg - mem, or reg - &mem
861 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
862
863 void cmp32(Register src1, Address src2);
864
865 #ifndef _LP64
866 void cmpklass(Address dst, Metadata* obj);
867 void cmpklass(Register dst, Metadata* obj);
2032 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2033
2034 using Assembler::movq;
2035 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2036
2037 // Can push value or effective address
2038 void pushptr(AddressLiteral src, Register rscratch);
2039
2040 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
2041 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
2042
2043 void pushoop(jobject obj, Register rscratch);
2044 void pushklass(Metadata* obj, Register rscratch);
2045
2046 // sign extend as need a l to ptr sized element
2047 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
2048 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
2049
2050
2051 public:
2052 // clear memory of size 'cnt' qwords, starting at 'base';
2053 // if 'is_large' is set, do not try to produce short loop
2054 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
2055
2056 // clear memory initialization sequence for constant size;
2057 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2058
2059 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2060 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2061
2062 // Fill primitive arrays
2063 void generate_fill(BasicType t, bool aligned,
2064 Register to, Register value, Register count,
2065 Register rtmp, XMMRegister xtmp);
2066
2067 void encode_iso_array(Register src, Register dst, Register len,
2068 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2069 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2070
2071 #ifdef _LP64
2072 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2073 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2074 Register y, Register y_idx, Register z,
|
1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/signature.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 class ciInlineKlass;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46 friend class Runtime1; // as_Address()
47
48 public:
49 // Support for VM calls
50 //
51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
52 // may customize this version by overriding it for its purposes (e.g., to save/restore
53 // additional registers when doing a VM call).
54
55 virtual void call_VM_leaf_base(
56 address entry_point, // the entry point
57 int number_of_arguments // the number of arguments to pop after the call
58 );
88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
89 // The implementation is only non-empty for the InterpreterMacroAssembler,
90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
91 virtual void check_and_handle_popframe(Register java_thread);
92 virtual void check_and_handle_earlyret(Register java_thread);
93
94 Address as_Address(AddressLiteral adr);
95 Address as_Address(ArrayAddress adr, Register rscratch);
96
97 // Support for null-checks
98 //
99 // Generates code that causes a null OS exception if the content of reg is null.
100 // If the accessed location is M[reg + offset] and the offset is known, provide the
101 // offset. No explicit code generation is needed if the offset is within a certain
102 // range (0 <= offset <= page_size).
103
104 void null_check(Register reg, int offset = -1);
105 static bool needs_explicit_null_check(intptr_t offset);
106 static bool uses_implicit_null_check(void* address);
107
108 // markWord tests, kills markWord reg
109 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
110
111 // inlineKlass queries, kills temp_reg
112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type);
114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
115
116 // Get the default value oop for the given InlineKlass
117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj);
118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields)
119 // get_default_value_oop with extra assertion for empty inline klass
120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj);
121
122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
126
127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
133
134 // Check array klass layout helper for flat or null-free arrays...
135 void test_flat_array_layout(Register lh, Label& is_flat_array);
136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
137
138 // Required platform-specific helpers for Label::patch_instructions.
139 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
140 void pd_patch_instruction(address branch, address target, const char* file, int line) {
141 unsigned char op = branch[0];
142 assert(op == 0xE8 /* call */ ||
143 op == 0xE9 /* jmp */ ||
144 op == 0xEB /* short jmp */ ||
145 (op & 0xF0) == 0x70 /* short jcc */ ||
146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */,
148 "Invalid opcode at patch point");
149
150 if (op == 0xEB || (op & 0xF0) == 0x70) {
151 // short offset operators (jmp and jcc)
152 char* disp = (char*) &branch[1];
153 int imm8 = checked_cast<int>(target - (address) &disp[1]);
154 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
155 file == nullptr ? "<null>" : file, line);
156 *disp = (char)imm8;
157 } else {
379 void resolve_global_jobject(Register value, Register thread, Register tmp);
380
381 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
382 void c2bool(Register x);
383
384 // C++ bool manipulation
385
386 void movbool(Register dst, Address src);
387 void movbool(Address dst, bool boolconst);
388 void movbool(Address dst, Register src);
389 void testbool(Register dst);
390
391 void resolve_oop_handle(Register result, Register tmp);
392 void resolve_weak_handle(Register result, Register tmp);
393 void load_mirror(Register mirror, Register method, Register tmp);
394 void load_method_holder_cld(Register rresult, Register rmethod);
395
396 void load_method_holder(Register holder, Register method);
397
398 // oop manipulations
399 void load_metadata(Register dst, Register src);
400 void load_klass(Register dst, Register src, Register tmp);
401 void store_klass(Register dst, Register src, Register tmp);
402
403 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
404 Register tmp1, Register thread_tmp);
405 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
406 Register tmp1, Register tmp2, Register tmp3);
407
408 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
409
410 // inline type data payload offsets...
411 void first_field_offset(Register inline_klass, Register offset);
412 void data_for_oop(Register oop, Register data, Register inline_klass);
413 // get data payload ptr a flat value array at index, kills rcx and index
414 void data_for_value_array_index(Register array, Register array_klass,
415 Register index, Register data);
416
417 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
418 Register thread_tmp = noreg, DecoratorSet decorators = 0);
419 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
420 Register thread_tmp = noreg, DecoratorSet decorators = 0);
421 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
422 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
423
424 // Used for storing null. All other oop constants should be
425 // stored using routines that take a jobject.
426 void store_heap_oop_null(Address dst);
427
428 void load_prototype_header(Register dst, Register src, Register tmp);
429
430 #ifdef _LP64
431 void store_klass_gap(Register dst, Register src);
432
433 // This dummy is to prevent a call to store_heap_oop from
434 // converting a zero (like null) into a Register by giving
435 // the compiler two choices it can't resolve
436
437 void store_heap_oop(Address dst, void* dummy);
438
439 void encode_heap_oop(Register r);
440 void decode_heap_oop(Register r);
441 void encode_heap_oop_not_null(Register r);
442 void decode_heap_oop_not_null(Register r);
443 void encode_heap_oop_not_null(Register dst, Register src);
444 void decode_heap_oop_not_null(Register dst, Register src);
445
446 void set_narrow_oop(Register dst, jobject obj);
447 void set_narrow_oop(Address dst, jobject obj);
448 void cmp_narrow_oop(Register dst, jobject obj);
449 void cmp_narrow_oop(Address dst, jobject obj);
611
612 public:
613 void push_set(RegSet set, int offset = -1);
614 void pop_set(RegSet set, int offset = -1);
615
616 // Push and pop everything that might be clobbered by a native
617 // runtime call.
618 // Only save the lower 64 bits of each vector register.
619 // Additional registers can be excluded in a passed RegSet.
620 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
621 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
622
623 void push_call_clobbered_registers(bool save_fpu = true) {
624 push_call_clobbered_registers_except(RegSet(), save_fpu);
625 }
626 void pop_call_clobbered_registers(bool restore_fpu = true) {
627 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
628 }
629
630 // allocation
631
632 // Object / value buffer allocation...
633 // Allocate instance of klass, assumes klass initialized by caller
634 // new_obj prefers to be rax
635 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
636 void allocate_instance(Register klass, Register new_obj,
637 Register t1, Register t2,
638 bool clear_fields, Label& alloc_failed);
639
640 void tlab_allocate(
641 Register thread, // Current thread
642 Register obj, // result: pointer to object after successful allocation
643 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
644 int con_size_in_bytes, // object size in bytes if known at compile time
645 Register t1, // temp register
646 Register t2, // temp register
647 Label& slow_case // continuation point if fast allocation fails
648 );
649 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
650
651 // For field "index" within "klass", return inline_klass ...
652 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
653
654 void inline_layout_info(Register klass, Register index, Register layout_info);
655
656 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
657
658 // interface method calling
659 void lookup_interface_method(Register recv_klass,
660 Register intf_klass,
661 RegisterOrConstant itable_index,
662 Register method_result,
663 Register scan_temp,
664 Label& no_such_interface,
665 bool return_method = true);
666
667 void lookup_interface_method_stub(Register recv_klass,
668 Register holder_klass,
669 Register resolved_klass,
670 Register method_result,
671 Register scan_temp,
672 Register temp_reg2,
673 Register receiver,
674 int itable_index,
675 Label& L_no_such_interface);
886 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
887 // operands. In general the names are modified to avoid hiding the instruction in Assembler
888 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
889 // here in MacroAssembler. The major exception to this rule is call
890
891 // Arithmetics
892
893
894 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
895 void addptr(Address dst, Register src);
896
897 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
898 void addptr(Register dst, int32_t src);
899 void addptr(Register dst, Register src);
900 void addptr(Register dst, RegisterOrConstant src) {
901 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
902 else addptr(dst, src.as_register());
903 }
904
905 void andptr(Register dst, int32_t src);
906 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
907 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
908
909 #ifdef _LP64
910 using Assembler::andq;
911 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
912 #endif
913
914 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
915
916 // renamed to drag out the casting of address to int32_t/intptr_t
917 void cmp32(Register src1, int32_t imm);
918
919 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
920 // compare reg - mem, or reg - &mem
921 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
922
923 void cmp32(Register src1, Address src2);
924
925 #ifndef _LP64
926 void cmpklass(Address dst, Metadata* obj);
927 void cmpklass(Register dst, Metadata* obj);
2092 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2093
2094 using Assembler::movq;
2095 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2096
2097 // Can push value or effective address
2098 void pushptr(AddressLiteral src, Register rscratch);
2099
2100 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
2101 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
2102
2103 void pushoop(jobject obj, Register rscratch);
2104 void pushklass(Metadata* obj, Register rscratch);
2105
2106 // sign extend as need a l to ptr sized element
2107 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
2108 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
2109
2110
2111 public:
2112 // Inline type specific methods
2113 #include "asm/macroAssembler_common.hpp"
2114
2115 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
2116 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
2117 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
2118 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
2119 RegState reg_state[]);
2120 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
2121 VMRegPair* from, int from_count, int& from_index, VMReg to,
2122 RegState reg_state[], Register val_array);
2123 int extend_stack_for_inline_args(int args_on_stack);
2124 void remove_frame(int initial_framesize, bool needs_stack_repair);
2125 VMReg spill_reg_for(VMReg reg);
2126
2127 // clear memory of size 'cnt' qwords, starting at 'base';
2128 // if 'is_large' is set, do not try to produce short loop
2129 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
2130
2131 // clear memory initialization sequence for constant size;
2132 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2133
2134 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2135 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2136
2137 // Fill primitive arrays
2138 void generate_fill(BasicType t, bool aligned,
2139 Register to, Register value, Register count,
2140 Register rtmp, XMMRegister xtmp);
2141
2142 void encode_iso_array(Register src, Register dst, Register len,
2143 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2144 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2145
2146 #ifdef _LP64
2147 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2148 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2149 Register y, Register y_idx, Register z,
|