1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/signature.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 class ciInlineKlass;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46 friend class Runtime1; // as_Address()
47
48 public:
49 // Support for VM calls
50 //
51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
52 // may customize this version by overriding it for its purposes (e.g., to save/restore
53 // additional registers when doing a VM call).
54
55 virtual void call_VM_leaf_base(
56 address entry_point, // the entry point
57 int number_of_arguments // the number of arguments to pop after the call
58 );
59
60 protected:
61 // This is the base routine called by the different versions of call_VM. The interpreter
62 // may customize this version by overriding it for its purposes (e.g., to save/restore
63 // additional registers when doing a VM call).
64 //
65 // call_VM_base returns the register which contains the thread upon return.
66 // If no last_java_sp is specified (noreg) than rsp will be used instead.
67 virtual void call_VM_base( // returns the register containing the thread upon return
68 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
70 address entry_point, // the entry point
71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
72 bool check_exceptions // whether to check for pending exceptions after return
73 );
74
75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
76
77 public:
78 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
79
80 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
81 // The implementation is only non-empty for the InterpreterMacroAssembler,
82 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
83 virtual void check_and_handle_popframe();
84 virtual void check_and_handle_earlyret();
85
86 Address as_Address(AddressLiteral adr);
87 Address as_Address(ArrayAddress adr, Register rscratch);
88
89 // Support for null-checks
90 //
91 // Generates code that causes a null OS exception if the content of reg is null.
92 // If the accessed location is M[reg + offset] and the offset is known, provide the
93 // offset. No explicit code generation is needed if the offset is within a certain
94 // range (0 <= offset <= page_size).
95
96 void null_check(Register reg, int offset = -1);
97 static bool needs_explicit_null_check(intptr_t offset);
98 static bool uses_implicit_null_check(void* address);
99
100 // markWord tests, kills markWord reg
101 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
102
103 // inlineKlass queries, kills temp_reg
104 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
105
106 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
107 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
108 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
109 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
110
111 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
112 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
113 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
114 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
115 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
116 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
117
118 // Check array klass layout helper for flat or null-free arrays...
119 void test_flat_array_layout(Register lh, Label& is_flat_array);
120 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
121
122 // Required platform-specific helpers for Label::patch_instructions.
123 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
124 void pd_patch_instruction(address branch, address target, const char* file, int line) {
125 unsigned char op = branch[0];
126 assert(op == 0xE8 /* call */ ||
127 op == 0xE9 /* jmp */ ||
128 op == 0xEB /* short jmp */ ||
129 (op & 0xF0) == 0x70 /* short jcc */ ||
130 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
131 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
132 (op == 0x8D) /* lea */,
133 "Invalid opcode at patch point");
134
135 if (op == 0xEB || (op & 0xF0) == 0x70) {
136 // short offset operators (jmp and jcc)
137 char* disp = (char*) &branch[1];
138 int imm8 = checked_cast<int>(target - (address) &disp[1]);
139 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
140 file == nullptr ? "<null>" : file, line);
141 *disp = (char)imm8;
142 } else {
143 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
144 int imm32 = checked_cast<int>(target - (address) &disp[1]);
145 *disp = imm32;
146 }
147 }
148
149 // The following 4 methods return the offset of the appropriate move instruction
150
151 // Support for fast byte/short loading with zero extension (depending on particular CPU)
152 int load_unsigned_byte(Register dst, Address src);
153 int load_unsigned_short(Register dst, Address src);
154
155 // Support for fast byte/short loading with sign extension (depending on particular CPU)
156 int load_signed_byte(Register dst, Address src);
157 int load_signed_short(Register dst, Address src);
158
159 // Support for sign-extension (hi:lo = extend_sign(lo))
160 void extend_sign(Register hi, Register lo);
161
162 // Load and store values by size and signed-ness
163 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
164 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
165
166 // Support for inc/dec with optimal instruction selection depending on value
167
168 void increment(Register reg, int value = 1) { incrementq(reg, value); }
169 void decrement(Register reg, int value = 1) { decrementq(reg, value); }
170 void increment(Address dst, int value = 1) { incrementq(dst, value); }
171 void decrement(Address dst, int value = 1) { decrementq(dst, value); }
172
173 void decrementl(Address dst, int value = 1);
174 void decrementl(Register reg, int value = 1);
175
176 void decrementq(Register reg, int value = 1);
177 void decrementq(Address dst, int value = 1);
178
179 void incrementl(Address dst, int value = 1);
180 void incrementl(Register reg, int value = 1);
181
182 void incrementq(Register reg, int value = 1);
183 void incrementq(Address dst, int value = 1);
184
185 void incrementl(AddressLiteral dst, Register rscratch = noreg);
186 void incrementl(ArrayAddress dst, Register rscratch);
187
188 void incrementq(AddressLiteral dst, Register rscratch = noreg);
189
190 void movhlf(XMMRegister dst, XMMRegister src, Register rscratch = noreg);
191
192 // Support optimal SSE move instructions.
193 void movflt(XMMRegister dst, XMMRegister src) {
194 if (dst-> encoding() == src->encoding()) return;
195 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
196 else { movss (dst, src); return; }
197 }
198 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
199 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
200 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
201
202 // Move with zero extension
203 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
204
205 void movdbl(XMMRegister dst, XMMRegister src) {
206 if (dst-> encoding() == src->encoding()) return;
207 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
208 else { movsd (dst, src); return; }
209 }
210
211 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
212
213 void movdbl(XMMRegister dst, Address src) {
214 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
215 else { movlpd(dst, src); return; }
216 }
217 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
218
219 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
220 // Use separate tmp XMM register because caller may
221 // requires src XMM register to be unchanged (as in x86.ad).
222 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
223 movdl(dst, tmp);
224 movswl(dst, dst);
225 }
226
227 void flt16_to_flt(XMMRegister dst, Register src) {
228 movdl(dst, src);
229 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
230 }
231
232 // Alignment
233 void align32();
234 void align64();
235 void align(uint modulus);
236 void align(uint modulus, uint target);
237
238 void post_call_nop();
239
240 // Stack frame creation/removal
241 void enter();
242 void leave();
243
244 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
245 // The pointer will be loaded into the thread register. This is a slow version that does native call.
246 // Normally, JavaThread pointer is available in r15_thread, use that where possible.
247 void get_thread_slow(Register thread);
248
249 // Support for argument shuffling
250
251 // bias in bytes
252 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
253 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
254 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
255 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
256 void move_ptr(VMRegPair src, VMRegPair dst);
257 void object_move(OopMap* map,
258 int oop_handle_offset,
259 int framesize_in_slots,
260 VMRegPair src,
261 VMRegPair dst,
262 bool is_receiver,
263 int* receiver_offset);
264
265 // Support for VM calls
266 //
267 // It is imperative that all calls into the VM are handled via the call_VM macros.
268 // They make sure that the stack linkage is setup correctly. call_VM's correspond
269 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
270
271
272 void call_VM(Register oop_result,
273 address entry_point,
274 bool check_exceptions = true);
275 void call_VM(Register oop_result,
276 address entry_point,
277 Register arg_1,
278 bool check_exceptions = true);
279 void call_VM(Register oop_result,
280 address entry_point,
281 Register arg_1, Register arg_2,
282 bool check_exceptions = true);
283 void call_VM(Register oop_result,
284 address entry_point,
285 Register arg_1, Register arg_2, Register arg_3,
286 bool check_exceptions = true);
287
288 // Overloadings with last_Java_sp
289 void call_VM(Register oop_result,
290 Register last_java_sp,
291 address entry_point,
292 int number_of_arguments = 0,
293 bool check_exceptions = true);
294 void call_VM(Register oop_result,
295 Register last_java_sp,
296 address entry_point,
297 Register arg_1, bool
298 check_exceptions = true);
299 void call_VM(Register oop_result,
300 Register last_java_sp,
301 address entry_point,
302 Register arg_1, Register arg_2,
303 bool check_exceptions = true);
304 void call_VM(Register oop_result,
305 Register last_java_sp,
306 address entry_point,
307 Register arg_1, Register arg_2, Register arg_3,
308 bool check_exceptions = true);
309
310 void get_vm_result_oop(Register oop_result);
311 void get_vm_result_metadata(Register metadata_result);
312
313 // These always tightly bind to MacroAssembler::call_VM_base
314 // bypassing the virtual implementation
315 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
316 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
317 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
318 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
319 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
320
321 void call_VM_leaf0(address entry_point);
322 void call_VM_leaf(address entry_point,
323 int number_of_arguments = 0);
324 void call_VM_leaf(address entry_point,
325 Register arg_1);
326 void call_VM_leaf(address entry_point,
327 Register arg_1, Register arg_2);
328 void call_VM_leaf(address entry_point,
329 Register arg_1, Register arg_2, Register arg_3);
330
331 void call_VM_leaf(address entry_point,
332 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
333
334 // These always tightly bind to MacroAssembler::call_VM_leaf_base
335 // bypassing the virtual implementation
336 void super_call_VM_leaf(address entry_point);
337 void super_call_VM_leaf(address entry_point, Register arg_1);
338 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
339 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
340 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
341
342 void set_last_Java_frame(Register last_java_sp,
343 Register last_java_fp,
344 address last_java_pc,
345 Register rscratch);
346
347 void set_last_Java_frame(Register last_java_sp,
348 Register last_java_fp,
349 Label &last_java_pc,
350 Register scratch);
351
352 void reset_last_Java_frame(bool clear_fp);
353
354 // jobjects
355 void clear_jobject_tag(Register possibly_non_local);
356 void resolve_jobject(Register value, Register tmp);
357 void resolve_global_jobject(Register value, Register tmp);
358
359 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
360 void c2bool(Register x);
361
362 // C++ bool manipulation
363
364 void movbool(Register dst, Address src);
365 void movbool(Address dst, bool boolconst);
366 void movbool(Address dst, Register src);
367 void testbool(Register dst);
368
369 void resolve_oop_handle(Register result, Register tmp);
370 void resolve_weak_handle(Register result, Register tmp);
371 void load_mirror(Register mirror, Register method, Register tmp);
372 void load_method_holder_cld(Register rresult, Register rmethod);
373
374 void load_method_holder(Register holder, Register method);
375
376 // oop manipulations
377
378 // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
379 void load_metadata(Register dst, Register src);
380 void load_narrow_klass_compact(Register dst, Register src);
381 void load_klass(Register dst, Register src, Register tmp);
382 void store_klass(Register dst, Register src, Register tmp);
383
384 // Compares the narrow Klass pointer of an object to a given narrow Klass.
385 void cmp_klass(Register klass, Register obj, Register tmp);
386
387 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
388 // Uses tmp1 and tmp2 as temporary registers.
389 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
390
391 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
392 Register tmp1);
393 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
394 Register tmp1, Register tmp2, Register tmp3);
395
396 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
397
398 // inline type data payload offsets...
399 void payload_offset(Register inline_klass, Register offset);
400 void payload_addr(Register oop, Register data, Register inline_klass);
401 // get data payload ptr a flat value array at index, kills rcx and index
402 void data_for_value_array_index(Register array, Register array_klass,
403 Register index, Register data);
404
405 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
406 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
407 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
408 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
409
410 // Used for storing null. All other oop constants should be
411 // stored using routines that take a jobject.
412 void store_heap_oop_null(Address dst);
413
414 void load_prototype_header(Register dst, Register src, Register tmp);
415
416 void store_klass_gap(Register dst, Register src);
417
418 // This dummy is to prevent a call to store_heap_oop from
419 // converting a zero (like null) into a Register by giving
420 // the compiler two choices it can't resolve
421
422 void store_heap_oop(Address dst, void* dummy);
423
424 void encode_heap_oop(Register r);
425 void decode_heap_oop(Register r);
426 void encode_heap_oop_not_null(Register r);
427 void decode_heap_oop_not_null(Register r);
428 void encode_heap_oop_not_null(Register dst, Register src);
429 void decode_heap_oop_not_null(Register dst, Register src);
430
431 void set_narrow_oop(Register dst, jobject obj);
432 void set_narrow_oop(Address dst, jobject obj);
433 void cmp_narrow_oop(Register dst, jobject obj);
434 void cmp_narrow_oop(Address dst, jobject obj);
435
436 void encode_klass_not_null(Register r, Register tmp);
437 void decode_klass_not_null(Register r, Register tmp);
438 void encode_and_move_klass_not_null(Register dst, Register src);
439 void decode_and_move_klass_not_null(Register dst, Register src);
440 void set_narrow_klass(Register dst, Klass* k);
441 void set_narrow_klass(Address dst, Klass* k);
442 void cmp_narrow_klass(Register dst, Klass* k);
443 void cmp_narrow_klass(Address dst, Klass* k);
444
445 // if heap base register is used - reinit it with the correct value
446 void reinit_heapbase();
447
448 DEBUG_ONLY(void verify_heapbase(const char* msg);)
449
450 // Int division/remainder for Java
451 // (as idivl, but checks for special case as described in JVM spec.)
452 // returns idivl instruction offset for implicit exception handling
453 int corrected_idivl(Register reg);
454
455 // Long division/remainder for Java
456 // (as idivq, but checks for special case as described in JVM spec.)
457 // returns idivq instruction offset for implicit exception handling
458 int corrected_idivq(Register reg);
459
460 void int3();
461
462 // Long operation macros for a 32bit cpu
463 // Long negation for Java
464 void lneg(Register hi, Register lo);
465
466 // Long multiplication for Java
467 // (destroys contents of eax, ebx, ecx and edx)
468 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
469
470 // Long shifts for Java
471 // (semantics as described in JVM spec.)
472 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
473 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
474
475 // Long compare for Java
476 // (semantics as described in JVM spec.)
477 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
478
479
480 // misc
481
482 // Sign extension
483 void sign_extend_short(Register reg);
484 void sign_extend_byte(Register reg);
485
486 // Division by power of 2, rounding towards 0
487 void division_with_shift(Register reg, int shift_value);
488
489 // dst = c = a * b + c
490 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
491 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
492
493 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
494 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
495 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
496 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
497
498
499 // same as fcmp2int, but using SSE2
500 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
501 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
502
503 void push_IU_state();
504 void pop_IU_state();
505
506 void push_FPU_state();
507 void pop_FPU_state();
508
509 void push_CPU_state();
510 void pop_CPU_state();
511
512 void push_cont_fastpath();
513 void pop_cont_fastpath();
514
515 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
516
517 // Round up to a power of two
518 void round_to(Register reg, int modulus);
519
520 private:
521 // General purpose and XMM registers potentially clobbered by native code; there
522 // is no need for FPU or AVX opmask related methods because C1/interpreter
523 // - we save/restore FPU state as a whole always
524 // - do not care about AVX-512 opmask
525 static RegSet call_clobbered_gp_registers();
526 static XMMRegSet call_clobbered_xmm_registers();
527
528 void push_set(XMMRegSet set, int offset);
529 void pop_set(XMMRegSet set, int offset);
530
531 public:
532 void push_set(RegSet set, int offset = -1);
533 void pop_set(RegSet set, int offset = -1);
534
535 // Push and pop everything that might be clobbered by a native
536 // runtime call.
537 // Only save the lower 64 bits of each vector register.
538 // Additional registers can be excluded in a passed RegSet.
539 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
540 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
541
542 void push_call_clobbered_registers(bool save_fpu = true) {
543 push_call_clobbered_registers_except(RegSet(), save_fpu);
544 }
545 void pop_call_clobbered_registers(bool restore_fpu = true) {
546 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
547 }
548
549 // allocation
550
551 // Object / value buffer allocation...
552 // Allocate instance of klass, assumes klass initialized by caller
553 // new_obj prefers to be rax
554 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
555 void allocate_instance(Register klass, Register new_obj,
556 Register t1, Register t2,
557 bool clear_fields, Label& alloc_failed);
558
559 void tlab_allocate(
560 Register obj, // result: pointer to object after successful allocation
561 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
562 int con_size_in_bytes, // object size in bytes if known at compile time
563 Register t1, // temp register
564 Register t2, // temp register
565 Label& slow_case // continuation point if fast allocation fails
566 );
567 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
568
569 void inline_layout_info(Register klass, Register index, Register layout_info);
570
571 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
572
573 // interface method calling
574 void lookup_interface_method(Register recv_klass,
575 Register intf_klass,
576 RegisterOrConstant itable_index,
577 Register method_result,
578 Register scan_temp,
579 Label& no_such_interface,
580 bool return_method = true);
581
582 void lookup_interface_method_stub(Register recv_klass,
583 Register holder_klass,
584 Register resolved_klass,
585 Register method_result,
586 Register scan_temp,
587 Register temp_reg2,
588 Register receiver,
589 int itable_index,
590 Label& L_no_such_interface);
591
592 // virtual method calling
593 void lookup_virtual_method(Register recv_klass,
594 RegisterOrConstant vtable_index,
595 Register method_result);
596
597 // Test sub_klass against super_klass, with fast and slow paths.
598
599 // The fast path produces a tri-state answer: yes / no / maybe-slow.
600 // One of the three labels can be null, meaning take the fall-through.
601 // If super_check_offset is -1, the value is loaded up from super_klass.
602 // No registers are killed, except temp_reg.
603 void check_klass_subtype_fast_path(Register sub_klass,
604 Register super_klass,
605 Register temp_reg,
606 Label* L_success,
607 Label* L_failure,
608 Label* L_slow_path,
609 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
610
611 // The rest of the type check; must be wired to a corresponding fast path.
612 // It does not repeat the fast path logic, so don't use it standalone.
613 // The temp_reg and temp2_reg can be noreg, if no temps are available.
614 // Updates the sub's secondary super cache as necessary.
615 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
616 void check_klass_subtype_slow_path(Register sub_klass,
617 Register super_klass,
618 Register temp_reg,
619 Register temp2_reg,
620 Label* L_success,
621 Label* L_failure,
622 bool set_cond_codes = false);
623
624 // The 64-bit version, which may do a hashed subclass lookup.
625 void check_klass_subtype_slow_path(Register sub_klass,
626 Register super_klass,
627 Register temp_reg,
628 Register temp2_reg,
629 Register temp3_reg,
630 Register temp4_reg,
631 Label* L_success,
632 Label* L_failure);
633
634 // Three parts of a hashed subclass lookup: a simple linear search,
635 // a table lookup, and a fallback that does linear probing in the
636 // event of a hash collision.
637 void check_klass_subtype_slow_path_linear(Register sub_klass,
638 Register super_klass,
639 Register temp_reg,
640 Register temp2_reg,
641 Label* L_success,
642 Label* L_failure,
643 bool set_cond_codes = false);
644 void check_klass_subtype_slow_path_table(Register sub_klass,
645 Register super_klass,
646 Register temp_reg,
647 Register temp2_reg,
648 Register temp3_reg,
649 Register result_reg,
650 Label* L_success,
651 Label* L_failure);
652 void hashed_check_klass_subtype_slow_path(Register sub_klass,
653 Register super_klass,
654 Register temp_reg,
655 Label* L_success,
656 Label* L_failure);
657
658 // As above, but with a constant super_klass.
659 // The result is in Register result, not the condition codes.
660 void lookup_secondary_supers_table_const(Register sub_klass,
661 Register super_klass,
662 Register temp1,
663 Register temp2,
664 Register temp3,
665 Register temp4,
666 Register result,
667 u1 super_klass_slot);
668
669 using Assembler::salq;
670 void salq(Register dest, Register count);
671 using Assembler::rorq;
672 void rorq(Register dest, Register count);
673 void lookup_secondary_supers_table_var(Register sub_klass,
674 Register super_klass,
675 Register temp1,
676 Register temp2,
677 Register temp3,
678 Register temp4,
679 Register result);
680
681 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
682 Register r_array_base,
683 Register r_array_index,
684 Register r_bitmap,
685 Register temp1,
686 Register temp2,
687 Label* L_success,
688 Label* L_failure = nullptr);
689
690 void verify_secondary_supers_table(Register r_sub_klass,
691 Register r_super_klass,
692 Register expected,
693 Register temp1,
694 Register temp2,
695 Register temp3);
696
697 void repne_scanq(Register addr, Register value, Register count, Register limit,
698 Label* L_success,
699 Label* L_failure = nullptr);
700
701 // If r is valid, return r.
702 // If r is invalid, remove a register r2 from available_regs, add r2
703 // to regs_to_push, then return r2.
704 Register allocate_if_noreg(const Register r,
705 RegSetIterator<Register> &available_regs,
706 RegSet ®s_to_push);
707
708 // Simplified, combined version, good for typical uses.
709 // Falls through on failure.
710 void check_klass_subtype(Register sub_klass,
711 Register super_klass,
712 Register temp_reg,
713 Label& L_success);
714
715 void clinit_barrier(Register klass,
716 Label* L_fast_path = nullptr,
717 Label* L_slow_path = nullptr);
718
719 // method handles (JSR 292)
720 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
721
722 void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
723
724 // Debugging
725
726 // only if +VerifyOops
727 void _verify_oop(Register reg, const char* s, const char* file, int line);
728 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
729
730 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
731 if (VerifyOops) {
732 _verify_oop(reg, s, file, line);
733 }
734 }
735 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
736 if (VerifyOops) {
737 _verify_oop_addr(reg, s, file, line);
738 }
739 }
740
741 // TODO: verify method and klass metadata (compare against vptr?)
742 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
743 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
744
745 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
746 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
747 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
748 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
749 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
750
751 // Verify or restore cpu control state after JNI call
752 void restore_cpu_control_state_after_jni(Register rscratch);
753
754 // prints msg, dumps registers and stops execution
755 void stop(const char* msg);
756
757 // prints msg and continues
758 void warn(const char* msg);
759
760 // dumps registers and other state
761 void print_state();
762
763 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
764 static void debug64(char* msg, int64_t pc, int64_t regs[]);
765 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
766 static void print_state64(int64_t pc, int64_t regs[]);
767
768 void os_breakpoint();
769
770 void untested() { stop("untested"); }
771
772 void unimplemented(const char* what = "");
773
774 void should_not_reach_here() { stop("should not reach here"); }
775
776 void print_CPU_state();
777
778 // Stack overflow checking
779 void bang_stack_with_offset(int offset) {
780 // stack grows down, caller passes positive offset
781 assert(offset > 0, "must bang with negative offset");
782 movl(Address(rsp, (-offset)), rax);
783 }
784
785 // Writes to stack successive pages until offset reached to check for
786 // stack overflow + shadow pages. Also, clobbers tmp
787 void bang_stack_size(Register size, Register tmp);
788
789 // Check for reserved stack access in method being exited (for JIT)
790 void reserved_stack_check();
791
792 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
793
794 void verify_tlab();
795
796 static Condition negate_condition(Condition cond);
797
798 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
799 // operands. In general the names are modified to avoid hiding the instruction in Assembler
800 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
801 // here in MacroAssembler. The major exception to this rule is call
802
803 // Arithmetics
804
805
806 void addptr(Address dst, int32_t src) { addq(dst, src); }
807 void addptr(Address dst, Register src);
808
809 void addptr(Register dst, Address src) { addq(dst, src); }
810 void addptr(Register dst, int32_t src);
811 void addptr(Register dst, Register src);
812 void addptr(Register dst, RegisterOrConstant src) {
813 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
814 else addptr(dst, src.as_register());
815 }
816
817 void andptr(Register dst, int32_t src);
818 void andptr(Register src1, Register src2) { andq(src1, src2); }
819 void andptr(Register dst, Address src) { andq(dst, src); }
820
821 using Assembler::andq;
822 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
823
824 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
825
826 // renamed to drag out the casting of address to int32_t/intptr_t
827 void cmp32(Register src1, int32_t imm);
828
829 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
830 // compare reg - mem, or reg - &mem
831 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
832
833 void cmp32(Register src1, Address src2);
834
835 void cmpoop(Register src1, Register src2);
836 void cmpoop(Register src1, Address src2);
837 void cmpoop(Register dst, jobject obj, Register rscratch);
838
839 // NOTE src2 must be the lval. This is NOT an mem-mem compare
840 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
841
842 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
843
844 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
845 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
846
847 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
848 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
849
850 // cmp64 to avoild hiding cmpq
851 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
852
853 void cmpxchgptr(Register reg, Address adr);
854
855 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
856
857 void imulptr(Register dst, Register src) { imulq(dst, src); }
858 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
859
860
861 void negptr(Register dst) { negq(dst); }
862
863 void notptr(Register dst) { notq(dst); }
864
865 void shlptr(Register dst, int32_t shift);
866 void shlptr(Register dst) { shlq(dst); }
867
868 void shrptr(Register dst, int32_t shift);
869 void shrptr(Register dst) { shrq(dst); }
870
871 void sarptr(Register dst) { sarq(dst); }
872 void sarptr(Register dst, int32_t src) { sarq(dst, src); }
873
874 void subptr(Address dst, int32_t src) { subq(dst, src); }
875
876 void subptr(Register dst, Address src) { subq(dst, src); }
877 void subptr(Register dst, int32_t src);
878 // Force generation of a 4 byte immediate value even if it fits into 8bit
879 void subptr_imm32(Register dst, int32_t src);
880 void subptr(Register dst, Register src);
881 void subptr(Register dst, RegisterOrConstant src) {
882 if (src.is_constant()) subptr(dst, (int) src.as_constant());
883 else subptr(dst, src.as_register());
884 }
885
886 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
887 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
888
889 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
890 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
891
892 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
893
894
895
896 // Helper functions for statistics gathering.
897 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
898 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
899 // Unconditional atomic increment.
900 void atomic_incl(Address counter_addr);
901 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
902 void atomic_incq(Address counter_addr);
903 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
904 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
905 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
906
907 using Assembler::lea;
908 void lea(Register dst, AddressLiteral adr);
909 void lea(Address dst, AddressLiteral adr, Register rscratch);
910
911 void leal32(Register dst, Address src) { leal(dst, src); }
912
913 // Import other testl() methods from the parent class or else
914 // they will be hidden by the following overriding declaration.
915 using Assembler::testl;
916 void testl(Address dst, int32_t imm32);
917 void testl(Register dst, int32_t imm32);
918 void testl(Register dst, AddressLiteral src); // requires reachable address
919 using Assembler::testq;
920 void testq(Address dst, int32_t imm32);
921 void testq(Register dst, int32_t imm32);
922
923 void orptr(Register dst, Address src) { orq(dst, src); }
924 void orptr(Register dst, Register src) { orq(dst, src); }
925 void orptr(Register dst, int32_t src) { orq(dst, src); }
926 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
927
928 void testptr(Register src, int32_t imm32) { testq(src, imm32); }
929 void testptr(Register src1, Address src2) { testq(src1, src2); }
930 void testptr(Address src, int32_t imm32) { testq(src, imm32); }
931 void testptr(Register src1, Register src2);
932
933 void xorptr(Register dst, Register src) { xorq(dst, src); }
934 void xorptr(Register dst, Address src) { xorq(dst, src); }
935
936 // Calls
937
938 void call(Label& L, relocInfo::relocType rtype);
939 void call(Register entry);
940 void call(Address addr) { Assembler::call(addr); }
941
942 // NOTE: this call transfers to the effective address of entry NOT
943 // the address contained by entry. This is because this is more natural
944 // for jumps/calls.
945 void call(AddressLiteral entry, Register rscratch = rax);
946
947 // Emit the CompiledIC call idiom
948 void ic_call(address entry, jint method_index = 0);
949 static int ic_check_size();
950 int ic_check(int end_alignment);
951
952 void emit_static_call_stub();
953
954 // Jumps
955
956 // NOTE: these jumps transfer to the effective address of dst NOT
957 // the address contained by dst. This is because this is more natural
958 // for jumps/calls.
959 void jump(AddressLiteral dst, Register rscratch = noreg);
960
961 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
962
963 // 32bit can do a case table jump in one instruction but we no longer allow the base
964 // to be installed in the Address class. This jump will transfer to the address
965 // contained in the location described by entry (not the address of entry)
966 void jump(ArrayAddress entry, Register rscratch);
967
968 // Adding more natural conditional jump instructions
969 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
970 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
971 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
972 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
973 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
974 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
975 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
976 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
977 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
978 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
979 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
980 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
981 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
982 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
983 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
984 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
985 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
986 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
987 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
988 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
989 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
990 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
991 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
992 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
993 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
994 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
995 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
996 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
997 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
998 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
999 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1000 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1001
1002 // Short versions of the above
1003 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1004 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1005 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1006 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1007 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1008 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1009 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1010 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1011 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1012 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1013 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1014 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1015 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1016 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1017 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1018 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1019 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1020 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1021 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1022 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1023 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1024 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1025 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1026 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1027 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1028 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1029 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1030 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1031 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1032 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1033 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1034 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1035
1036 // Floating
1037
1038 void push_f(XMMRegister r);
1039 void pop_f(XMMRegister r);
1040 void push_d(XMMRegister r);
1041 void pop_d(XMMRegister r);
1042
1043 void push_ppx(Register src);
1044 void pop_ppx(Register dst);
1045
1046 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
1047 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
1048 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1049
1050 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1051
1052 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
1053 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
1054 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1055
1056 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
1057 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
1058 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1059
1060 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
1061 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
1062 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1063
1064 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1065
1066 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1067 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1068 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1069
1070 private:
1071 void sha256_AVX2_one_round_compute(
1072 Register reg_old_h,
1073 Register reg_a,
1074 Register reg_b,
1075 Register reg_c,
1076 Register reg_d,
1077 Register reg_e,
1078 Register reg_f,
1079 Register reg_g,
1080 Register reg_h,
1081 int iter);
1082 void sha256_AVX2_four_rounds_compute_first(int start);
1083 void sha256_AVX2_four_rounds_compute_last(int start);
1084 void sha256_AVX2_one_round_and_sched(
1085 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1086 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
1087 XMMRegister xmm_2, /* ymm6 */
1088 XMMRegister xmm_3, /* ymm7 */
1089 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1090 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
1091 Register reg_c, /* edi */
1092 Register reg_d, /* esi */
1093 Register reg_e, /* r8d */
1094 Register reg_f, /* r9d */
1095 Register reg_g, /* r10d */
1096 Register reg_h, /* r11d */
1097 int iter);
1098
1099 void addm(int disp, Register r1, Register r2);
1100
1101 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1102 Register e, Register f, Register g, Register h, int iteration);
1103
1104 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1105 Register a, Register b, Register c, Register d, Register e, Register f,
1106 Register g, Register h, int iteration);
1107
1108 void addmq(int disp, Register r1, Register r2);
1109 public:
1110 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1111 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1112 Register buf, Register state, Register ofs, Register limit, Register rsp,
1113 bool multi_block, XMMRegister shuf_mask);
1114 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1115 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1116 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1117 XMMRegister shuf_mask);
1118 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1119
1120 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1121 bool multi_block);
1122
1123 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1124 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1125 Register buf, Register state, Register ofs, Register limit, Register rsp,
1126 bool multi_block);
1127
1128 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1129 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1130 Register buf, Register state, Register ofs, Register limit, Register rsp,
1131 bool multi_block, XMMRegister shuf_mask);
1132
1133 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1134 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1135 Register rax, Register rcx, Register rdx, Register tmp);
1136
1137 private:
1138
1139 // these are private because users should be doing movflt/movdbl
1140
1141 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1142 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1143 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1144 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1145
1146 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1147 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1148
1149 public:
1150
1151 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1152 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1153 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1154
1155 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1156 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1157 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1158
1159 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1160 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1161 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1162
1163 using Assembler::vbroadcasti128;
1164 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1165
1166 using Assembler::vbroadcastsd;
1167 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1168
1169 using Assembler::vbroadcastss;
1170 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1171
1172 // Vector float blend
1173 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1174 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1175
1176 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1177 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1178 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1179
1180 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1181 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1182 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1183
1184 // Move Unaligned Double Quadword
1185 void movdqu(Address dst, XMMRegister src);
1186 void movdqu(XMMRegister dst, XMMRegister src);
1187 void movdqu(XMMRegister dst, Address src);
1188 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1189
1190 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1191 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1192 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1193 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1194 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1195 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1196
1197 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1198 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1199 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1200 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1201 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1202 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1203
1204 // Safe move operation, lowers down to 16bit moves for targets supporting
1205 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1206 void kmov(Address dst, KRegister src);
1207 void kmov(KRegister dst, Address src);
1208 void kmov(KRegister dst, KRegister src);
1209 void kmov(Register dst, KRegister src);
1210 void kmov(KRegister dst, Register src);
1211
1212 using Assembler::movddup;
1213 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1214
1215 using Assembler::vmovddup;
1216 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1217
1218 // AVX Unaligned forms
1219 void vmovdqu(Address dst, XMMRegister src);
1220 void vmovdqu(XMMRegister dst, Address src);
1221 void vmovdqu(XMMRegister dst, XMMRegister src);
1222 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1223 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1224 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len);
1225 void vmovdqu(XMMRegister dst, Address src, int vector_len);
1226 void vmovdqu(Address dst, XMMRegister src, int vector_len);
1227
1228 // AVX Aligned forms
1229 using Assembler::vmovdqa;
1230 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1231 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1232
1233 // AVX512 Unaligned
1234 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1235 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1236 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1237
1238 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1239 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1240
1241 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1242 if (dst->encoding() != src->encoding() || mask != k0) {
1243 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1244 }
1245 }
1246 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1247 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1248 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1249
1250 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1251 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1252 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1253
1254 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1255 if (dst->encoding() != src->encoding() || mask != k0) {
1256 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1257 }
1258 }
1259 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1260 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1261 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1262
1263 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1264 if (dst->encoding() != src->encoding()) {
1265 Assembler::evmovdqul(dst, src, vector_len);
1266 }
1267 }
1268 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1269 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1270
1271 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1272 if (dst->encoding() != src->encoding() || mask != k0) {
1273 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1274 }
1275 }
1276 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1277 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1278 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1279
1280 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1281 if (dst->encoding() != src->encoding()) {
1282 Assembler::evmovdquq(dst, src, vector_len);
1283 }
1284 }
1285 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1286 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1287 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1288 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1289
1290 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1291 if (dst->encoding() != src->encoding() || mask != k0) {
1292 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1293 }
1294 }
1295 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1296 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1297 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1298 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1299
1300 using Assembler::movapd;
1301 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1302
1303 // Move Aligned Double Quadword
1304 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1305 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1306 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1307
1308 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1309 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1310 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1311 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1312
1313 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1314 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1315 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1316
1317 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1318 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1319 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1320
1321 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1322 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1323 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1324
1325 // Carry-Less Multiplication Quadword
1326 void pclmulldq(XMMRegister dst, XMMRegister src) {
1327 // 0x00 - multiply lower 64 bits [0:63]
1328 Assembler::pclmulqdq(dst, src, 0x00);
1329 }
1330 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1331 // 0x11 - multiply upper 64 bits [64:127]
1332 Assembler::pclmulqdq(dst, src, 0x11);
1333 }
1334
1335 void pcmpeqb(XMMRegister dst, XMMRegister src);
1336 void pcmpeqw(XMMRegister dst, XMMRegister src);
1337
1338 void pcmpestri(XMMRegister dst, Address src, int imm8);
1339 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1340
1341 void pmovzxbw(XMMRegister dst, XMMRegister src);
1342 void pmovzxbw(XMMRegister dst, Address src);
1343
1344 void pmovmskb(Register dst, XMMRegister src);
1345
1346 void ptest(XMMRegister dst, XMMRegister src);
1347
1348 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1349 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1350 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1351
1352 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1353 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1354 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1355
1356 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1357 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1358 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1359
1360 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1361 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1362 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1363
1364 void evucomish(XMMRegister dst, XMMRegister src) { Assembler::evucomish(dst, src); }
1365 void evucomish(XMMRegister dst, Address src) { Assembler::evucomish(dst, src); }
1366 void evucomish(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1367
1368 void evucomxsh(XMMRegister dst, XMMRegister src) { Assembler::evucomxsh(dst, src); }
1369 void evucomxsh(XMMRegister dst, Address src) { Assembler::evucomxsh(dst, src); }
1370 void evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1371
1372 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1373 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1374 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1375
1376 void evucomxss(XMMRegister dst, XMMRegister src) { Assembler::evucomxss(dst, src); }
1377 void evucomxss(XMMRegister dst, Address src) { Assembler::evucomxss(dst, src); }
1378 void evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1379
1380 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1381 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1382 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1383
1384 void evucomxsd(XMMRegister dst, XMMRegister src) { Assembler::evucomxsd(dst, src); }
1385 void evucomxsd(XMMRegister dst, Address src) { Assembler::evucomxsd(dst, src); }
1386 void evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1387
1388 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1389 void xorpd(XMMRegister dst, XMMRegister src);
1390 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1391 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1392
1393 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1394 void xorps(XMMRegister dst, XMMRegister src);
1395 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1396 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1397
1398 // Shuffle Bytes
1399 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1400 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1401 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1402 // AVX 3-operands instructions
1403
1404 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1405 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1406 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1407
1408 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1409 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1410 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1411
1412 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1413 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1414
1415 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1416 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1417 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1418
1419 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1420 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1421
1422 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1423 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1424 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1425
1426 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1427 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1428 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1429
1430 using Assembler::vpbroadcastd;
1431 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1432
1433 using Assembler::vpbroadcastq;
1434 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1435
1436 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1437 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1438
1439 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1440 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1441 using Assembler::evpcmpeqd;
1442 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1443
1444 // Vector compares
1445 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1446 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1447 }
1448 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1449
1450 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1451 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1452 }
1453 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1454
1455 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1456 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1457 }
1458 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1459
1460 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1461 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1462 }
1463 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1464
1465 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1466
1467 // Emit comparison instruction for the specified comparison predicate.
1468 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1469 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1470
1471 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1472 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1473
1474 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1475
1476 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1477 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1478
1479 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1480 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1481 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1482
1483 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1484
1485 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1486 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1487
1488 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1489 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1490
1491 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1492 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1493
1494 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1495 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1496
1497 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1498 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1499
1500 using Assembler::evpsllw;
1501 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1502 if (!is_varshift) {
1503 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1504 } else {
1505 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1506 }
1507 }
1508 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1509 if (!is_varshift) {
1510 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1511 } else {
1512 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1513 }
1514 }
1515 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1516 if (!is_varshift) {
1517 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1518 } else {
1519 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1520 }
1521 }
1522 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1523 if (!is_varshift) {
1524 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1525 } else {
1526 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1527 }
1528 }
1529 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1530 if (!is_varshift) {
1531 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1532 } else {
1533 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1534 }
1535 }
1536
1537 using Assembler::evpsrlq;
1538 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1539 if (!is_varshift) {
1540 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1541 } else {
1542 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1543 }
1544 }
1545 using Assembler::evpsraw;
1546 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1547 if (!is_varshift) {
1548 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1549 } else {
1550 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1551 }
1552 }
1553 using Assembler::evpsrad;
1554 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1555 if (!is_varshift) {
1556 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1557 } else {
1558 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1559 }
1560 }
1561 using Assembler::evpsraq;
1562 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1563 if (!is_varshift) {
1564 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1565 } else {
1566 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1567 }
1568 }
1569
1570 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1571 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1572 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1573 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1574
1575 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1576 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1577 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1578 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1579
1580 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1581 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1582
1583 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1584 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1585
1586 void vptest(XMMRegister dst, XMMRegister src);
1587 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1588
1589 void punpcklbw(XMMRegister dst, XMMRegister src);
1590 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1591
1592 void pshufd(XMMRegister dst, Address src, int mode);
1593 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1594
1595 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1596 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1597
1598 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1599 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1600 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1601
1602 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1603 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1604 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1605
1606 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1607
1608 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1609 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1610 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1611
1612 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1613 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1614 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1615
1616 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1617 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1618 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1619
1620 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1621 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1622 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1623
1624 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1625 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1626 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1627
1628 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1629 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1630 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1631
1632 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1633 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1634
1635 // AVX Vector instructions
1636
1637 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1638 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1639 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1640
1641 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1642 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1643 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1644
1645 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1646 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1647 Assembler::vpxor(dst, nds, src, vector_len);
1648 else
1649 Assembler::vxorpd(dst, nds, src, vector_len);
1650 }
1651 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1652 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1653 Assembler::vpxor(dst, nds, src, vector_len);
1654 else
1655 Assembler::vxorpd(dst, nds, src, vector_len);
1656 }
1657 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1658
1659 // Simple version for AVX2 256bit vectors
1660 void vpxor(XMMRegister dst, XMMRegister src) {
1661 assert(UseAVX >= 2, "Should be at least AVX2");
1662 Assembler::vpxor(dst, dst, src, AVX_256bit);
1663 }
1664 void vpxor(XMMRegister dst, Address src) {
1665 assert(UseAVX >= 2, "Should be at least AVX2");
1666 Assembler::vpxor(dst, dst, src, AVX_256bit);
1667 }
1668
1669 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1670 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1671
1672 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1673 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1674 Assembler::vinserti32x4(dst, nds, src, imm8);
1675 } else if (UseAVX > 1) {
1676 // vinserti128 is available only in AVX2
1677 Assembler::vinserti128(dst, nds, src, imm8);
1678 } else {
1679 Assembler::vinsertf128(dst, nds, src, imm8);
1680 }
1681 }
1682
1683 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1684 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1685 Assembler::vinserti32x4(dst, nds, src, imm8);
1686 } else if (UseAVX > 1) {
1687 // vinserti128 is available only in AVX2
1688 Assembler::vinserti128(dst, nds, src, imm8);
1689 } else {
1690 Assembler::vinsertf128(dst, nds, src, imm8);
1691 }
1692 }
1693
1694 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1695 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1696 Assembler::vextracti32x4(dst, src, imm8);
1697 } else if (UseAVX > 1) {
1698 // vextracti128 is available only in AVX2
1699 Assembler::vextracti128(dst, src, imm8);
1700 } else {
1701 Assembler::vextractf128(dst, src, imm8);
1702 }
1703 }
1704
1705 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1706 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1707 Assembler::vextracti32x4(dst, src, imm8);
1708 } else if (UseAVX > 1) {
1709 // vextracti128 is available only in AVX2
1710 Assembler::vextracti128(dst, src, imm8);
1711 } else {
1712 Assembler::vextractf128(dst, src, imm8);
1713 }
1714 }
1715
1716 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1717 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1718 vinserti128(dst, dst, src, 1);
1719 }
1720 void vinserti128_high(XMMRegister dst, Address src) {
1721 vinserti128(dst, dst, src, 1);
1722 }
1723 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1724 vextracti128(dst, src, 1);
1725 }
1726 void vextracti128_high(Address dst, XMMRegister src) {
1727 vextracti128(dst, src, 1);
1728 }
1729
1730 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1731 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1732 Assembler::vinsertf32x4(dst, dst, src, 1);
1733 } else {
1734 Assembler::vinsertf128(dst, dst, src, 1);
1735 }
1736 }
1737
1738 void vinsertf128_high(XMMRegister dst, Address src) {
1739 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1740 Assembler::vinsertf32x4(dst, dst, src, 1);
1741 } else {
1742 Assembler::vinsertf128(dst, dst, src, 1);
1743 }
1744 }
1745
1746 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1747 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1748 Assembler::vextractf32x4(dst, src, 1);
1749 } else {
1750 Assembler::vextractf128(dst, src, 1);
1751 }
1752 }
1753
1754 void vextractf128_high(Address dst, XMMRegister src) {
1755 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1756 Assembler::vextractf32x4(dst, src, 1);
1757 } else {
1758 Assembler::vextractf128(dst, src, 1);
1759 }
1760 }
1761
1762 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1763 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1764 Assembler::vinserti64x4(dst, dst, src, 1);
1765 }
1766 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1767 Assembler::vinsertf64x4(dst, dst, src, 1);
1768 }
1769 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1770 Assembler::vextracti64x4(dst, src, 1);
1771 }
1772 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1773 Assembler::vextractf64x4(dst, src, 1);
1774 }
1775 void vextractf64x4_high(Address dst, XMMRegister src) {
1776 Assembler::vextractf64x4(dst, src, 1);
1777 }
1778 void vinsertf64x4_high(XMMRegister dst, Address src) {
1779 Assembler::vinsertf64x4(dst, dst, src, 1);
1780 }
1781
1782 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1783 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1784 vinserti128(dst, dst, src, 0);
1785 }
1786 void vinserti128_low(XMMRegister dst, Address src) {
1787 vinserti128(dst, dst, src, 0);
1788 }
1789 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1790 vextracti128(dst, src, 0);
1791 }
1792 void vextracti128_low(Address dst, XMMRegister src) {
1793 vextracti128(dst, src, 0);
1794 }
1795
1796 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1797 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1798 Assembler::vinsertf32x4(dst, dst, src, 0);
1799 } else {
1800 Assembler::vinsertf128(dst, dst, src, 0);
1801 }
1802 }
1803
1804 void vinsertf128_low(XMMRegister dst, Address src) {
1805 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1806 Assembler::vinsertf32x4(dst, dst, src, 0);
1807 } else {
1808 Assembler::vinsertf128(dst, dst, src, 0);
1809 }
1810 }
1811
1812 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1813 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1814 Assembler::vextractf32x4(dst, src, 0);
1815 } else {
1816 Assembler::vextractf128(dst, src, 0);
1817 }
1818 }
1819
1820 void vextractf128_low(Address dst, XMMRegister src) {
1821 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1822 Assembler::vextractf32x4(dst, src, 0);
1823 } else {
1824 Assembler::vextractf128(dst, src, 0);
1825 }
1826 }
1827
1828 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1829 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1830 Assembler::vinserti64x4(dst, dst, src, 0);
1831 }
1832 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1833 Assembler::vinsertf64x4(dst, dst, src, 0);
1834 }
1835 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1836 Assembler::vextracti64x4(dst, src, 0);
1837 }
1838 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1839 Assembler::vextractf64x4(dst, src, 0);
1840 }
1841 void vextractf64x4_low(Address dst, XMMRegister src) {
1842 Assembler::vextractf64x4(dst, src, 0);
1843 }
1844 void vinsertf64x4_low(XMMRegister dst, Address src) {
1845 Assembler::vinsertf64x4(dst, dst, src, 0);
1846 }
1847
1848 // Carry-Less Multiplication Quadword
1849 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1850 // 0x00 - multiply lower 64 bits [0:63]
1851 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1852 }
1853 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1854 // 0x11 - multiply upper 64 bits [64:127]
1855 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1856 }
1857 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1858 // 0x10 - multiply nds[0:63] and src[64:127]
1859 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1860 }
1861 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1862 //0x01 - multiply nds[64:127] and src[0:63]
1863 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1864 }
1865
1866 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1867 // 0x00 - multiply lower 64 bits [0:63]
1868 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1869 }
1870 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1871 // 0x11 - multiply upper 64 bits [64:127]
1872 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1873 }
1874
1875 // AVX-512 mask operations.
1876 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1877 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1878 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1879 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1880 void kortest(uint masklen, KRegister src1, KRegister src2);
1881 void ktest(uint masklen, KRegister src1, KRegister src2);
1882
1883 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1884 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1885
1886 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1887 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1888
1889 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1890 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1891
1892 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1893 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1894
1895 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1896 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1897 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1898 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1899
1900 using Assembler::evpandq;
1901 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1902
1903 using Assembler::evpaddq;
1904 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1905
1906 using Assembler::evporq;
1907 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1908
1909 using Assembler::vpshufb;
1910 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1911
1912 using Assembler::vpor;
1913 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1914
1915 using Assembler::vpternlogq;
1916 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1917
1918 void cmov32( Condition cc, Register dst, Address src);
1919 void cmov32( Condition cc, Register dst, Register src);
1920
1921 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1922
1923 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); }
1924 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1925
1926 void movoop(Register dst, jobject obj);
1927 void movoop(Address dst, jobject obj, Register rscratch);
1928
1929 void mov_metadata(Register dst, Metadata* obj);
1930 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1931
1932 void mov64(Register dst, int64_t imm64);
1933 void mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format);
1934
1935 void movptr(Register dst, Register src);
1936 void movptr(Register dst, Address src);
1937 void movptr(Register dst, AddressLiteral src);
1938 void movptr(Register dst, ArrayAddress src);
1939 void movptr(Register dst, intptr_t src);
1940 void movptr(Address dst, Register src);
1941 void movptr(Address dst, int32_t imm);
1942 void movptr(Address dst, intptr_t src, Register rscratch);
1943 void movptr(ArrayAddress dst, Register src, Register rscratch);
1944
1945 void movptr(Register dst, RegisterOrConstant src) {
1946 if (src.is_constant()) movptr(dst, src.as_constant());
1947 else movptr(dst, src.as_register());
1948 }
1949
1950
1951 // to avoid hiding movl
1952 void mov32(Register dst, AddressLiteral src);
1953 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1954
1955 // Import other mov() methods from the parent class or else
1956 // they will be hidden by the following overriding declaration.
1957 using Assembler::movdl;
1958 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1959
1960 using Assembler::movq;
1961 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1962
1963 // Can push value or effective address
1964 void pushptr(AddressLiteral src, Register rscratch);
1965
1966 void pushptr(Address src) { pushq(src); }
1967 void popptr(Address src) { popq(src); }
1968
1969 void pushoop(jobject obj, Register rscratch);
1970 void pushklass(Metadata* obj, Register rscratch);
1971
1972 // sign extend as need a l to ptr sized element
1973 void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1974 void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1975
1976
1977 public:
1978 // Inline type specific methods
1979 #include "asm/macroAssembler_common.hpp"
1980
1981 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1982 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1983 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1984 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1985 RegState reg_state[]);
1986 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1987 VMRegPair* from, int from_count, int& from_index, VMReg to,
1988 RegState reg_state[], Register val_array);
1989 int extend_stack_for_inline_args(int args_on_stack);
1990 void remove_frame(int initial_framesize, bool needs_stack_repair);
1991 VMReg spill_reg_for(VMReg reg);
1992
1993 // clear memory of size 'cnt' qwords, starting at 'base';
1994 // if 'is_large' is set, do not try to produce short loop
1995 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1996
1997 // clear memory initialization sequence for constant size;
1998 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1999
2000 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2001 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2002
2003 // Fill primitive arrays
2004 void generate_fill(BasicType t, bool aligned,
2005 Register to, Register value, Register count,
2006 Register rtmp, XMMRegister xtmp);
2007
2008 void encode_iso_array(Register src, Register dst, Register len,
2009 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2010 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2011
2012 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2013 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2014 Register y, Register y_idx, Register z,
2015 Register carry, Register product,
2016 Register idx, Register kdx);
2017 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
2018 Register yz_idx, Register idx,
2019 Register carry, Register product, int offset);
2020 void multiply_128_x_128_bmi2_loop(Register y, Register z,
2021 Register carry, Register carry2,
2022 Register idx, Register jdx,
2023 Register yz_idx1, Register yz_idx2,
2024 Register tmp, Register tmp3, Register tmp4);
2025 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2026 Register yz_idx, Register idx, Register jdx,
2027 Register carry, Register product,
2028 Register carry2);
2029 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2030 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2031 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2032 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2033 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2034 Register tmp2);
2035 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2036 Register rdxReg, Register raxReg);
2037 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2038 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2039 Register tmp3, Register tmp4);
2040 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2041 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2042
2043 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2044 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2045 Register raxReg);
2046 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2047 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2048 Register raxReg);
2049 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2050 Register result, Register tmp1, Register tmp2,
2051 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2052
2053 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2054 void update_byte_crc32(Register crc, Register val, Register table);
2055 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2056
2057 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2058 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2059 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2060 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2061
2062 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2063 // Note on a naming convention:
2064 // Prefix w = register only used on a Westmere+ architecture
2065 // Prefix n = register only used on a Nehalem architecture
2066 void crc32c_ipl_alg4(Register in_out, uint32_t n,
2067 Register tmp1, Register tmp2, Register tmp3);
2068 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2069 Register in_out,
2070 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2071 XMMRegister w_xtmp2,
2072 Register tmp1,
2073 Register n_tmp2, Register n_tmp3);
2074 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2075 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2076 Register tmp1, Register tmp2,
2077 Register n_tmp3);
2078 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2079 Register in_out1, Register in_out2, Register in_out3,
2080 Register tmp1, Register tmp2, Register tmp3,
2081 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2082 Register tmp4, Register tmp5,
2083 Register n_tmp6);
2084 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2085 Register tmp1, Register tmp2, Register tmp3,
2086 Register tmp4, Register tmp5, Register tmp6,
2087 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2088 bool is_pclmulqdq_supported);
2089 // Fold 128-bit data chunk
2090 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2091 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2092 // Fold 512-bit data chunk
2093 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2094 // Fold 8-bit data
2095 void fold_8bit_crc32(Register crc, Register table, Register tmp);
2096 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2097
2098 // Compress char[] array to byte[].
2099 void char_array_compress(Register src, Register dst, Register len,
2100 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2101 XMMRegister tmp4, Register tmp5, Register result,
2102 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2103
2104 // Inflate byte[] array to char[].
2105 void byte_array_inflate(Register src, Register dst, Register len,
2106 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2107
2108 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2109 Register length, Register temp, int vec_enc);
2110
2111 void fill64_masked(uint shift, Register dst, int disp,
2112 XMMRegister xmm, KRegister mask, Register length,
2113 Register temp, bool use64byteVector = false);
2114
2115 void fill32_masked(uint shift, Register dst, int disp,
2116 XMMRegister xmm, KRegister mask, Register length,
2117 Register temp);
2118
2119 void fill32(Address dst, XMMRegister xmm);
2120
2121 void fill32(Register dst, int disp, XMMRegister xmm);
2122
2123 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2124
2125 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2126
2127 void convert_f2i(Register dst, XMMRegister src);
2128 void convert_d2i(Register dst, XMMRegister src);
2129 void convert_f2l(Register dst, XMMRegister src);
2130 void convert_d2l(Register dst, XMMRegister src);
2131 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2132 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2133
2134 void cache_wb(Address line);
2135 void cache_wbsync(bool is_pre);
2136
2137 #ifdef COMPILER2_OR_JVMCI
2138 void generate_fill_avx3(BasicType type, Register to, Register value,
2139 Register count, Register rtmp, XMMRegister xtmp);
2140 #endif // COMPILER2_OR_JVMCI
2141
2142 void vallones(XMMRegister dst, int vector_len);
2143
2144 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2145
2146 void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2147 void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2148
2149 void save_legacy_gprs();
2150 void restore_legacy_gprs();
2151 void load_aotrc_address(Register reg, address a);
2152 void setcc(Assembler::Condition comparison, Register dst);
2153 };
2154
2155 #endif // CPU_X86_MACROASSEMBLER_X86_HPP