1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/vm_version.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
56
57 protected:
58 // This is the base routine called by the different versions of call_VM. The interpreter
59 // may customize this version by overriding it for its purposes (e.g., to save/restore
60 // additional registers when doing a VM call).
61 //
62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
63 // returns the register which contains the thread upon return. If a thread register has been
64 // specified, the return value will correspond to that register. If no last_java_sp is specified
65 // (noreg) than rsp will be used instead.
66 virtual void call_VM_base( // returns the register containing the thread upon return
67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
68 Register java_thread, // the thread if computed before ; use noreg otherwise
69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
70 address entry_point, // the entry point
71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
72 bool check_exceptions // whether to check for pending exceptions after return
73 );
74
75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
76
77 // helpers for FPU flag access
78 // tmp is a temporary register, if none is available use noreg
79 void save_rax (Register tmp);
80 void restore_rax(Register tmp);
81
82 public:
83 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
84
85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
86 // The implementation is only non-empty for the InterpreterMacroAssembler,
87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
88 virtual void check_and_handle_popframe(Register java_thread);
89 virtual void check_and_handle_earlyret(Register java_thread);
90
91 Address as_Address(AddressLiteral adr);
92 Address as_Address(ArrayAddress adr, Register rscratch);
93
94 // Support for null-checks
95 //
96 // Generates code that causes a null OS exception if the content of reg is null.
97 // If the accessed location is M[reg + offset] and the offset is known, provide the
98 // offset. No explicit code generation is needed if the offset is within a certain
99 // range (0 <= offset <= page_size).
100
101 void null_check(Register reg, int offset = -1);
102 static bool needs_explicit_null_check(intptr_t offset);
103 static bool uses_implicit_null_check(void* address);
104
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target, const char* file, int line) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
114 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = checked_cast<int>(target - (address) &disp[1]);
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
122 file == nullptr ? "<null>" : file, line);
123 *disp = (char)imm8;
124 } else {
125 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
126 int imm32 = checked_cast<int>(target - (address) &disp[1]);
127 *disp = imm32;
128 }
129 }
130
131 // The following 4 methods return the offset of the appropriate move instruction
132
133 // Support for fast byte/short loading with zero extension (depending on particular CPU)
134 int load_unsigned_byte(Register dst, Address src);
135 int load_unsigned_short(Register dst, Address src);
136
137 // Support for fast byte/short loading with sign extension (depending on particular CPU)
138 int load_signed_byte(Register dst, Address src);
139 int load_signed_short(Register dst, Address src);
140
141 // Support for sign-extension (hi:lo = extend_sign(lo))
142 void extend_sign(Register hi, Register lo);
143
144 // Load and store values by size and signed-ness
145 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
146 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
147
148 // Support for inc/dec with optimal instruction selection depending on value
149
150 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
151 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
152 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; }
153 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; }
154
155 void decrementl(Address dst, int value = 1);
156 void decrementl(Register reg, int value = 1);
157
158 void decrementq(Register reg, int value = 1);
159 void decrementq(Address dst, int value = 1);
160
161 void incrementl(Address dst, int value = 1);
162 void incrementl(Register reg, int value = 1);
163
164 void incrementq(Register reg, int value = 1);
165 void incrementq(Address dst, int value = 1);
166
167 void incrementl(AddressLiteral dst, Register rscratch = noreg);
168 void incrementl(ArrayAddress dst, Register rscratch);
169
170 void incrementq(AddressLiteral dst, Register rscratch = noreg);
171
172 // Support optimal SSE move instructions.
173 void movflt(XMMRegister dst, XMMRegister src) {
174 if (dst-> encoding() == src->encoding()) return;
175 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
176 else { movss (dst, src); return; }
177 }
178 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
179 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
180 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
181
182 // Move with zero extension
183 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
184
185 void movdbl(XMMRegister dst, XMMRegister src) {
186 if (dst-> encoding() == src->encoding()) return;
187 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
188 else { movsd (dst, src); return; }
189 }
190
191 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
192
193 void movdbl(XMMRegister dst, Address src) {
194 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
195 else { movlpd(dst, src); return; }
196 }
197 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
198
199 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
200 // Use separate tmp XMM register because caller may
201 // requires src XMM register to be unchanged (as in x86.ad).
202 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
203 movdl(dst, tmp);
204 movswl(dst, dst);
205 }
206
207 void flt16_to_flt(XMMRegister dst, Register src) {
208 movdl(dst, src);
209 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
210 }
211
212 // Alignment
213 void align32();
214 void align64();
215 void align(int modulus);
216 void align(int modulus, int target);
217
218 void post_call_nop();
219 // A 5 byte nop that is safe for patching (see patch_verified_entry)
220 void fat_nop();
221
222 // Stack frame creation/removal
223 void enter();
224 void leave();
225
226 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
227 // The pointer will be loaded into the thread register.
228 void get_thread(Register thread);
229
230 #ifdef _LP64
231 // Support for argument shuffling
232
233 // bias in bytes
234 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
235 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
236 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
237 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
238 void move_ptr(VMRegPair src, VMRegPair dst);
239 void object_move(OopMap* map,
240 int oop_handle_offset,
241 int framesize_in_slots,
242 VMRegPair src,
243 VMRegPair dst,
244 bool is_receiver,
245 int* receiver_offset);
246 #endif // _LP64
247
248 // Support for VM calls
249 //
250 // It is imperative that all calls into the VM are handled via the call_VM macros.
251 // They make sure that the stack linkage is setup correctly. call_VM's correspond
252 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
253
254
255 void call_VM(Register oop_result,
256 address entry_point,
257 bool check_exceptions = true);
258 void call_VM(Register oop_result,
259 address entry_point,
260 Register arg_1,
261 bool check_exceptions = true);
262 void call_VM(Register oop_result,
263 address entry_point,
264 Register arg_1, Register arg_2,
265 bool check_exceptions = true);
266 void call_VM(Register oop_result,
267 address entry_point,
268 Register arg_1, Register arg_2, Register arg_3,
269 bool check_exceptions = true);
270
271 // Overloadings with last_Java_sp
272 void call_VM(Register oop_result,
273 Register last_java_sp,
274 address entry_point,
275 int number_of_arguments = 0,
276 bool check_exceptions = true);
277 void call_VM(Register oop_result,
278 Register last_java_sp,
279 address entry_point,
280 Register arg_1, bool
281 check_exceptions = true);
282 void call_VM(Register oop_result,
283 Register last_java_sp,
284 address entry_point,
285 Register arg_1, Register arg_2,
286 bool check_exceptions = true);
287 void call_VM(Register oop_result,
288 Register last_java_sp,
289 address entry_point,
290 Register arg_1, Register arg_2, Register arg_3,
291 bool check_exceptions = true);
292
293 void get_vm_result (Register oop_result, Register thread);
294 void get_vm_result_2(Register metadata_result, Register thread);
295
296 // These always tightly bind to MacroAssembler::call_VM_base
297 // bypassing the virtual implementation
298 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
303
304 void call_VM_leaf0(address entry_point);
305 void call_VM_leaf(address entry_point,
306 int number_of_arguments = 0);
307 void call_VM_leaf(address entry_point,
308 Register arg_1);
309 void call_VM_leaf(address entry_point,
310 Register arg_1, Register arg_2);
311 void call_VM_leaf(address entry_point,
312 Register arg_1, Register arg_2, Register arg_3);
313
314 void call_VM_leaf(address entry_point,
315 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
316
317 // These always tightly bind to MacroAssembler::call_VM_leaf_base
318 // bypassing the virtual implementation
319 void super_call_VM_leaf(address entry_point);
320 void super_call_VM_leaf(address entry_point, Register arg_1);
321 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
324
325 // last Java Frame (fills frame anchor)
326 void set_last_Java_frame(Register thread,
327 Register last_java_sp,
328 Register last_java_fp,
329 address last_java_pc,
330 Register rscratch);
331
332 // thread in the default location (r15_thread on 64bit)
333 void set_last_Java_frame(Register last_java_sp,
334 Register last_java_fp,
335 address last_java_pc,
336 Register rscratch);
337
338 void reset_last_Java_frame(Register thread, bool clear_fp);
339
340 // thread in the default location (r15_thread on 64bit)
341 void reset_last_Java_frame(bool clear_fp);
342
343 // jobjects
344 void clear_jobject_tag(Register possibly_non_local);
345 void resolve_jobject(Register value, Register thread, Register tmp);
346 void resolve_global_jobject(Register value, Register thread, Register tmp);
347
348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
349 void c2bool(Register x);
350
351 // C++ bool manipulation
352
353 void movbool(Register dst, Address src);
354 void movbool(Address dst, bool boolconst);
355 void movbool(Address dst, Register src);
356 void testbool(Register dst);
357
358 void resolve_oop_handle(Register result, Register tmp);
359 void resolve_weak_handle(Register result, Register tmp);
360 void load_mirror(Register mirror, Register method, Register tmp);
361 void load_method_holder_cld(Register rresult, Register rmethod);
362
363 void load_method_holder(Register holder, Register method);
364
365 // oop manipulations
366 #ifdef _LP64
367 void load_nklass_compact(Register dst, Register src);
368 #endif
369 void load_klass(Register dst, Register src, Register tmp);
370 void store_klass(Register dst, Register src, Register tmp);
371
372 // Compares the Klass pointer of an object to a given Klass (which might be narrow,
373 // depending on UseCompressedClassPointers).
374 void cmp_klass(Register klass, Register dst, Register tmp);
375
376 // Compares the Klass pointer of two objects o1 and o2. Result is in the condition flags.
377 // Uses tmp1 and tmp2 as temporary registers.
378 void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2);
379
380 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
381 Register tmp1, Register thread_tmp);
382 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
383 Register tmp1, Register tmp2, Register tmp3);
384
385 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
386 Register thread_tmp = noreg, DecoratorSet decorators = 0);
387 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
388 Register thread_tmp = noreg, DecoratorSet decorators = 0);
389 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
390 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
391
392 // Used for storing null. All other oop constants should be
393 // stored using routines that take a jobject.
394 void store_heap_oop_null(Address dst);
395
396 #ifdef _LP64
397 void store_klass_gap(Register dst, Register src);
398
399 // This dummy is to prevent a call to store_heap_oop from
400 // converting a zero (like null) into a Register by giving
401 // the compiler two choices it can't resolve
402
403 void store_heap_oop(Address dst, void* dummy);
404
405 void encode_heap_oop(Register r);
406 void decode_heap_oop(Register r);
407 void encode_heap_oop_not_null(Register r);
408 void decode_heap_oop_not_null(Register r);
409 void encode_heap_oop_not_null(Register dst, Register src);
410 void decode_heap_oop_not_null(Register dst, Register src);
411
412 void set_narrow_oop(Register dst, jobject obj);
413 void set_narrow_oop(Address dst, jobject obj);
414 void cmp_narrow_oop(Register dst, jobject obj);
415 void cmp_narrow_oop(Address dst, jobject obj);
416
417 void encode_klass_not_null(Register r, Register tmp);
418 void decode_klass_not_null(Register r, Register tmp);
419 void encode_and_move_klass_not_null(Register dst, Register src);
420 void decode_and_move_klass_not_null(Register dst, Register src);
421 void set_narrow_klass(Register dst, Klass* k);
422 void set_narrow_klass(Address dst, Klass* k);
423 void cmp_narrow_klass(Register dst, Klass* k);
424 void cmp_narrow_klass(Address dst, Klass* k);
425
426 // if heap base register is used - reinit it with the correct value
427 void reinit_heapbase();
428
429 DEBUG_ONLY(void verify_heapbase(const char* msg);)
430
431 #endif // _LP64
432
433 // Int division/remainder for Java
434 // (as idivl, but checks for special case as described in JVM spec.)
435 // returns idivl instruction offset for implicit exception handling
436 int corrected_idivl(Register reg);
437
438 // Long division/remainder for Java
439 // (as idivq, but checks for special case as described in JVM spec.)
440 // returns idivq instruction offset for implicit exception handling
441 int corrected_idivq(Register reg);
442
443 void int3();
444
445 // Long operation macros for a 32bit cpu
446 // Long negation for Java
447 void lneg(Register hi, Register lo);
448
449 // Long multiplication for Java
450 // (destroys contents of eax, ebx, ecx and edx)
451 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
452
453 // Long shifts for Java
454 // (semantics as described in JVM spec.)
455 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
456 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
457
458 // Long compare for Java
459 // (semantics as described in JVM spec.)
460 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
461
462
463 // misc
464
465 // Sign extension
466 void sign_extend_short(Register reg);
467 void sign_extend_byte(Register reg);
468
469 // Division by power of 2, rounding towards 0
470 void division_with_shift(Register reg, int shift_value);
471
472 #ifndef _LP64
473 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
474 //
475 // CF (corresponds to C0) if x < y
476 // PF (corresponds to C2) if unordered
477 // ZF (corresponds to C3) if x = y
478 //
479 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
480 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
481 void fcmp(Register tmp);
482 // Variant of the above which allows y to be further down the stack
483 // and which only pops x and y if specified. If pop_right is
484 // specified then pop_left must also be specified.
485 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
486
487 // Floating-point comparison for Java
488 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
489 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
490 // (semantics as described in JVM spec.)
491 void fcmp2int(Register dst, bool unordered_is_less);
492 // Variant of the above which allows y to be further down the stack
493 // and which only pops x and y if specified. If pop_right is
494 // specified then pop_left must also be specified.
495 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
496
497 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
498 // tmp is a temporary register, if none is available use noreg
499 void fremr(Register tmp);
500
501 // only if +VerifyFPU
502 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
503 #endif // !LP64
504
505 // dst = c = a * b + c
506 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
507 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
508
509 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
510 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
511 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
512 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
513
514
515 // same as fcmp2int, but using SSE2
516 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
517 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
518
519 // branch to L if FPU flag C2 is set/not set
520 // tmp is a temporary register, if none is available use noreg
521 void jC2 (Register tmp, Label& L);
522 void jnC2(Register tmp, Label& L);
523
524 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
525 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
526 void load_float(Address src);
527
528 // Store float value to 'address'. If UseSSE >= 1, the value is stored
529 // from register xmm0. Otherwise, the value is stored from the FPU stack.
530 void store_float(Address dst);
531
532 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
533 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
534 void load_double(Address src);
535
536 // Store double value to 'address'. If UseSSE >= 2, the value is stored
537 // from register xmm0. Otherwise, the value is stored from the FPU stack.
538 void store_double(Address dst);
539
540 #ifndef _LP64
541 // Pop ST (ffree & fincstp combined)
542 void fpop();
543
544 void empty_FPU_stack();
545 #endif // !_LP64
546
547 void push_IU_state();
548 void pop_IU_state();
549
550 void push_FPU_state();
551 void pop_FPU_state();
552
553 void push_CPU_state();
554 void pop_CPU_state();
555
556 void push_cont_fastpath();
557 void pop_cont_fastpath();
558
559 void inc_held_monitor_count();
560 void dec_held_monitor_count();
561
562 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
563
564 // Round up to a power of two
565 void round_to(Register reg, int modulus);
566
567 private:
568 // General purpose and XMM registers potentially clobbered by native code; there
569 // is no need for FPU or AVX opmask related methods because C1/interpreter
570 // - we save/restore FPU state as a whole always
571 // - do not care about AVX-512 opmask
572 static RegSet call_clobbered_gp_registers();
573 static XMMRegSet call_clobbered_xmm_registers();
574
575 void push_set(XMMRegSet set, int offset);
576 void pop_set(XMMRegSet set, int offset);
577
578 public:
579 void push_set(RegSet set, int offset = -1);
580 void pop_set(RegSet set, int offset = -1);
581
582 // Push and pop everything that might be clobbered by a native
583 // runtime call.
584 // Only save the lower 64 bits of each vector register.
585 // Additional registers can be excluded in a passed RegSet.
586 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
587 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
588
589 void push_call_clobbered_registers(bool save_fpu = true) {
590 push_call_clobbered_registers_except(RegSet(), save_fpu);
591 }
592 void pop_call_clobbered_registers(bool restore_fpu = true) {
593 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
594 }
595
596 // allocation
597 void tlab_allocate(
598 Register thread, // Current thread
599 Register obj, // result: pointer to object after successful allocation
600 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
601 int con_size_in_bytes, // object size in bytes if known at compile time
602 Register t1, // temp register
603 Register t2, // temp register
604 Label& slow_case // continuation point if fast allocation fails
605 );
606 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
607
608 // interface method calling
609 void lookup_interface_method(Register recv_klass,
610 Register intf_klass,
611 RegisterOrConstant itable_index,
612 Register method_result,
613 Register scan_temp,
614 Label& no_such_interface,
615 bool return_method = true);
616
617 void lookup_interface_method_stub(Register recv_klass,
618 Register holder_klass,
619 Register resolved_klass,
620 Register method_result,
621 Register scan_temp,
622 Register temp_reg2,
623 Register receiver,
624 int itable_index,
625 Label& L_no_such_interface);
626
627 // virtual method calling
628 void lookup_virtual_method(Register recv_klass,
629 RegisterOrConstant vtable_index,
630 Register method_result);
631
632 // Test sub_klass against super_klass, with fast and slow paths.
633
634 // The fast path produces a tri-state answer: yes / no / maybe-slow.
635 // One of the three labels can be null, meaning take the fall-through.
636 // If super_check_offset is -1, the value is loaded up from super_klass.
637 // No registers are killed, except temp_reg.
638 void check_klass_subtype_fast_path(Register sub_klass,
639 Register super_klass,
640 Register temp_reg,
641 Label* L_success,
642 Label* L_failure,
643 Label* L_slow_path,
644 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
645
646 // The rest of the type check; must be wired to a corresponding fast path.
647 // It does not repeat the fast path logic, so don't use it standalone.
648 // The temp_reg and temp2_reg can be noreg, if no temps are available.
649 // Updates the sub's secondary super cache as necessary.
650 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
651 void check_klass_subtype_slow_path(Register sub_klass,
652 Register super_klass,
653 Register temp_reg,
654 Register temp2_reg,
655 Label* L_success,
656 Label* L_failure,
657 bool set_cond_codes = false);
658
659 // Simplified, combined version, good for typical uses.
660 // Falls through on failure.
661 void check_klass_subtype(Register sub_klass,
662 Register super_klass,
663 Register temp_reg,
664 Label& L_success);
665
666 void clinit_barrier(Register klass,
667 Register thread,
668 Label* L_fast_path = nullptr,
669 Label* L_slow_path = nullptr);
670
671 // method handles (JSR 292)
672 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
673
674 // Debugging
675
676 // only if +VerifyOops
677 void _verify_oop(Register reg, const char* s, const char* file, int line);
678 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
679
680 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
681 if (VerifyOops) {
682 _verify_oop(reg, s, file, line);
683 }
684 }
685 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
686 if (VerifyOops) {
687 _verify_oop_addr(reg, s, file, line);
688 }
689 }
690
691 // TODO: verify method and klass metadata (compare against vptr?)
692 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
693 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
694
695 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
696 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
697 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
698 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
699 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
700
701 // Verify or restore cpu control state after JNI call
702 void restore_cpu_control_state_after_jni(Register rscratch);
703
704 // prints msg, dumps registers and stops execution
705 void stop(const char* msg);
706
707 // prints msg and continues
708 void warn(const char* msg);
709
710 // dumps registers and other state
711 void print_state();
712
713 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
714 static void debug64(char* msg, int64_t pc, int64_t regs[]);
715 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
716 static void print_state64(int64_t pc, int64_t regs[]);
717
718 void os_breakpoint();
719
720 void untested() { stop("untested"); }
721
722 void unimplemented(const char* what = "");
723
724 void should_not_reach_here() { stop("should not reach here"); }
725
726 void print_CPU_state();
727
728 // Stack overflow checking
729 void bang_stack_with_offset(int offset) {
730 // stack grows down, caller passes positive offset
731 assert(offset > 0, "must bang with negative offset");
732 movl(Address(rsp, (-offset)), rax);
733 }
734
735 // Writes to stack successive pages until offset reached to check for
736 // stack overflow + shadow pages. Also, clobbers tmp
737 void bang_stack_size(Register size, Register tmp);
738
739 // Check for reserved stack access in method being exited (for JIT)
740 void reserved_stack_check();
741
742 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
743
744 void verify_tlab();
745
746 static Condition negate_condition(Condition cond);
747
748 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
749 // operands. In general the names are modified to avoid hiding the instruction in Assembler
750 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
751 // here in MacroAssembler. The major exception to this rule is call
752
753 // Arithmetics
754
755
756 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
757 void addptr(Address dst, Register src);
758
759 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
760 void addptr(Register dst, int32_t src);
761 void addptr(Register dst, Register src);
762 void addptr(Register dst, RegisterOrConstant src) {
763 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
764 else addptr(dst, src.as_register());
765 }
766
767 void andptr(Register dst, int32_t src);
768 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
769
770 #ifdef _LP64
771 using Assembler::andq;
772 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
773 #endif
774
775 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
776
777 // renamed to drag out the casting of address to int32_t/intptr_t
778 void cmp32(Register src1, int32_t imm);
779
780 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
781 // compare reg - mem, or reg - &mem
782 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
783
784 void cmp32(Register src1, Address src2);
785
786 #ifndef _LP64
787 void cmpklass(Address dst, Metadata* obj);
788 void cmpklass(Register dst, Metadata* obj);
789 void cmpoop(Address dst, jobject obj);
790 #endif // _LP64
791
792 void cmpoop(Register src1, Register src2);
793 void cmpoop(Register src1, Address src2);
794 void cmpoop(Register dst, jobject obj, Register rscratch);
795
796 // NOTE src2 must be the lval. This is NOT an mem-mem compare
797 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
798
799 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
800
801 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
802 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
803 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
804
805 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
806 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
807
808 // cmp64 to avoild hiding cmpq
809 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
810
811 void cmpxchgptr(Register reg, Address adr);
812
813 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
814
815 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
816 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
817
818
819 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
820
821 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
822
823 void shlptr(Register dst, int32_t shift);
824 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
825
826 void shrptr(Register dst, int32_t shift);
827 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
828
829 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
830 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
831
832 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
833
834 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
835 void subptr(Register dst, int32_t src);
836 // Force generation of a 4 byte immediate value even if it fits into 8bit
837 void subptr_imm32(Register dst, int32_t src);
838 void subptr(Register dst, Register src);
839 void subptr(Register dst, RegisterOrConstant src) {
840 if (src.is_constant()) subptr(dst, (int) src.as_constant());
841 else subptr(dst, src.as_register());
842 }
843
844 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
845 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
846
847 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
848 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
849
850 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
851
852
853
854 // Helper functions for statistics gathering.
855 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
856 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
857 // Unconditional atomic increment.
858 void atomic_incl(Address counter_addr);
859 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
860 #ifdef _LP64
861 void atomic_incq(Address counter_addr);
862 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
863 #endif
864 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
865 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
866
867 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
868 void lea(Register dst, AddressLiteral adr);
869 void lea(Address dst, AddressLiteral adr, Register rscratch);
870
871 void leal32(Register dst, Address src) { leal(dst, src); }
872
873 // Import other testl() methods from the parent class or else
874 // they will be hidden by the following overriding declaration.
875 using Assembler::testl;
876 void testl(Address dst, int32_t imm32);
877 void testl(Register dst, int32_t imm32);
878 void testl(Register dst, AddressLiteral src); // requires reachable address
879 using Assembler::testq;
880 void testq(Address dst, int32_t imm32);
881 void testq(Register dst, int32_t imm32);
882
883 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
884 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
885 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
886 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
887
888 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
889 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
890 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
891 void testptr(Register src1, Register src2);
892
893 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
894 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
895
896 // Calls
897
898 void call(Label& L, relocInfo::relocType rtype);
899 void call(Register entry);
900 void call(Address addr) { Assembler::call(addr); }
901
902 // NOTE: this call transfers to the effective address of entry NOT
903 // the address contained by entry. This is because this is more natural
904 // for jumps/calls.
905 void call(AddressLiteral entry, Register rscratch = rax);
906
907 // Emit the CompiledIC call idiom
908 void ic_call(address entry, jint method_index = 0);
909
910 void emit_static_call_stub();
911
912 // Jumps
913
914 // NOTE: these jumps transfer to the effective address of dst NOT
915 // the address contained by dst. This is because this is more natural
916 // for jumps/calls.
917 void jump(AddressLiteral dst, Register rscratch = noreg);
918
919 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
920
921 // 32bit can do a case table jump in one instruction but we no longer allow the base
922 // to be installed in the Address class. This jump will transfer to the address
923 // contained in the location described by entry (not the address of entry)
924 void jump(ArrayAddress entry, Register rscratch);
925
926 // Floating
927
928 void push_f(XMMRegister r);
929 void pop_f(XMMRegister r);
930 void push_d(XMMRegister r);
931 void pop_d(XMMRegister r);
932
933 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
934 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
935 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
936
937 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
938 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
939 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
940
941 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
942 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
943 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
944
945 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
946 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
947 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
948
949 #ifndef _LP64
950 void fadd_s(Address src) { Assembler::fadd_s(src); }
951 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
952
953 void fldcw(Address src) { Assembler::fldcw(src); }
954 void fldcw(AddressLiteral src);
955
956 void fld_s(int index) { Assembler::fld_s(index); }
957 void fld_s(Address src) { Assembler::fld_s(src); }
958 void fld_s(AddressLiteral src);
959
960 void fld_d(Address src) { Assembler::fld_d(src); }
961 void fld_d(AddressLiteral src);
962
963 void fld_x(Address src) { Assembler::fld_x(src); }
964 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); }
965
966 void fmul_s(Address src) { Assembler::fmul_s(src); }
967 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
968 #endif // !_LP64
969
970 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
971 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
972
973 #ifdef _LP64
974 private:
975 void sha256_AVX2_one_round_compute(
976 Register reg_old_h,
977 Register reg_a,
978 Register reg_b,
979 Register reg_c,
980 Register reg_d,
981 Register reg_e,
982 Register reg_f,
983 Register reg_g,
984 Register reg_h,
985 int iter);
986 void sha256_AVX2_four_rounds_compute_first(int start);
987 void sha256_AVX2_four_rounds_compute_last(int start);
988 void sha256_AVX2_one_round_and_sched(
989 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
990 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
991 XMMRegister xmm_2, /* ymm6 */
992 XMMRegister xmm_3, /* ymm7 */
993 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
994 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
995 Register reg_c, /* edi */
996 Register reg_d, /* esi */
997 Register reg_e, /* r8d */
998 Register reg_f, /* r9d */
999 Register reg_g, /* r10d */
1000 Register reg_h, /* r11d */
1001 int iter);
1002
1003 void addm(int disp, Register r1, Register r2);
1004
1005 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1006 Register e, Register f, Register g, Register h, int iteration);
1007
1008 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1009 Register a, Register b, Register c, Register d, Register e, Register f,
1010 Register g, Register h, int iteration);
1011
1012 void addmq(int disp, Register r1, Register r2);
1013 public:
1014 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1015 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1016 Register buf, Register state, Register ofs, Register limit, Register rsp,
1017 bool multi_block, XMMRegister shuf_mask);
1018 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1019 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1020 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1021 XMMRegister shuf_mask);
1022 #endif // _LP64
1023
1024 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1025 bool multi_block);
1026
1027 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1028 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1029 Register buf, Register state, Register ofs, Register limit, Register rsp,
1030 bool multi_block);
1031
1032 #ifdef _LP64
1033 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1034 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1035 Register buf, Register state, Register ofs, Register limit, Register rsp,
1036 bool multi_block, XMMRegister shuf_mask);
1037 #else
1038 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1039 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1040 Register buf, Register state, Register ofs, Register limit, Register rsp,
1041 bool multi_block);
1042 #endif
1043
1044 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1045 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1046 Register rax, Register rcx, Register rdx, Register tmp);
1047
1048 #ifndef _LP64
1049 private:
1050 // Initialized in macroAssembler_x86_constants.cpp
1051 static address ONES;
1052 static address L_2IL0FLOATPACKET_0;
1053 static address PI4_INV;
1054 static address PI4X3;
1055 static address PI4X4;
1056
1057 public:
1058 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1059 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1060 Register rax, Register rcx, Register rdx, Register tmp1);
1061
1062 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1063 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1064 Register rax, Register rcx, Register rdx, Register tmp);
1065
1066 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1067 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1068 Register rdx, Register tmp);
1069
1070 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1071 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1072 Register rax, Register rbx, Register rdx);
1073
1074 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1075 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1076 Register rax, Register rcx, Register rdx, Register tmp);
1077
1078 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1079 Register edx, Register ebx, Register esi, Register edi,
1080 Register ebp, Register esp);
1081
1082 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1083 Register esi, Register edi, Register ebp, Register esp);
1084
1085 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1086 Register edx, Register ebx, Register esi, Register edi,
1087 Register ebp, Register esp);
1088
1089 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1090 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1091 Register rax, Register rcx, Register rdx, Register tmp);
1092 #endif // !_LP64
1093
1094 private:
1095
1096 // these are private because users should be doing movflt/movdbl
1097
1098 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1099 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1100 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1101 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1102
1103 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1104 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1105
1106 public:
1107
1108 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1109 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1110 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1111
1112 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1113 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1114 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1115
1116 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1117 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1118 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1119
1120 using Assembler::vbroadcastsd;
1121 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1122
1123 using Assembler::vbroadcastss;
1124 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1125
1126 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1127 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1128 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1129
1130 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1131 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1132 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1133
1134 // Move Unaligned Double Quadword
1135 void movdqu(Address dst, XMMRegister src);
1136 void movdqu(XMMRegister dst, XMMRegister src);
1137 void movdqu(XMMRegister dst, Address src);
1138 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1139
1140 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1141 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1142 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1143 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1144 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1145 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1146
1147 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1148 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1149 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1150 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1151 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1152 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1153
1154 // Safe move operation, lowers down to 16bit moves for targets supporting
1155 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1156 void kmov(Address dst, KRegister src);
1157 void kmov(KRegister dst, Address src);
1158 void kmov(KRegister dst, KRegister src);
1159 void kmov(Register dst, KRegister src);
1160 void kmov(KRegister dst, Register src);
1161
1162 using Assembler::movddup;
1163 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1164
1165 using Assembler::vmovddup;
1166 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1167
1168 // AVX Unaligned forms
1169 void vmovdqu(Address dst, XMMRegister src);
1170 void vmovdqu(XMMRegister dst, Address src);
1171 void vmovdqu(XMMRegister dst, XMMRegister src);
1172 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1173 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1174
1175 // AVX512 Unaligned
1176 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1177 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1178
1179 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1180 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1181
1182 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1183 if (dst->encoding() != src->encoding() || mask != k0) {
1184 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1185 }
1186 }
1187 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1188 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1189 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1190
1191 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1192 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1193
1194 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1195 if (dst->encoding() != src->encoding() || mask != k0) {
1196 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1197 }
1198 }
1199 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1200 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1201 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1202
1203 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1204 if (dst->encoding() != src->encoding()) {
1205 Assembler::evmovdqul(dst, src, vector_len);
1206 }
1207 }
1208 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1209 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1210
1211 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1212 if (dst->encoding() != src->encoding() || mask != k0) {
1213 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1214 }
1215 }
1216 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1217 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1218 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1219
1220 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1221 if (dst->encoding() != src->encoding()) {
1222 Assembler::evmovdquq(dst, src, vector_len);
1223 }
1224 }
1225 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1226 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1227 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1228
1229 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1230 if (dst->encoding() != src->encoding() || mask != k0) {
1231 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1232 }
1233 }
1234 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1235 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1236 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1237
1238 // Move Aligned Double Quadword
1239 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1240 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1241 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1242
1243 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1244 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1245 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1246 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1247
1248 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1249 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1250 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1251
1252 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1253 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1254 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1255
1256 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1257 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1258 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1259
1260 // Carry-Less Multiplication Quadword
1261 void pclmulldq(XMMRegister dst, XMMRegister src) {
1262 // 0x00 - multiply lower 64 bits [0:63]
1263 Assembler::pclmulqdq(dst, src, 0x00);
1264 }
1265 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1266 // 0x11 - multiply upper 64 bits [64:127]
1267 Assembler::pclmulqdq(dst, src, 0x11);
1268 }
1269
1270 void pcmpeqb(XMMRegister dst, XMMRegister src);
1271 void pcmpeqw(XMMRegister dst, XMMRegister src);
1272
1273 void pcmpestri(XMMRegister dst, Address src, int imm8);
1274 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1275
1276 void pmovzxbw(XMMRegister dst, XMMRegister src);
1277 void pmovzxbw(XMMRegister dst, Address src);
1278
1279 void pmovmskb(Register dst, XMMRegister src);
1280
1281 void ptest(XMMRegister dst, XMMRegister src);
1282
1283 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1284 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1285 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1286
1287 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1288 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1289 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1290
1291 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1292 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1293 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1294
1295 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1296 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1297 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1298
1299 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1300 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1301 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1302
1303 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1304 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1305 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1306
1307 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1308 void xorpd(XMMRegister dst, XMMRegister src);
1309 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1310 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1311
1312 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1313 void xorps(XMMRegister dst, XMMRegister src);
1314 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1315 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1316
1317 // Shuffle Bytes
1318 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1319 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1320 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1321 // AVX 3-operands instructions
1322
1323 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1324 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1325 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1326
1327 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1328 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1329 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1330
1331 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1332 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1333
1334 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1335 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1336 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1337
1338 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1339 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1340
1341 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1342 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1343 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1344
1345 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1346 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1347 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1348
1349 using Assembler::vpbroadcastd;
1350 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1351
1352 using Assembler::vpbroadcastq;
1353 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1354
1355 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1356
1357 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1358 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1359
1360 // Vector compares
1361 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1362 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1363 }
1364 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1365
1366 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1367 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1368 }
1369 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1370
1371 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1372 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1373 }
1374 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1375
1376 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1377 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1378 }
1379 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1380
1381 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1382
1383 // Emit comparison instruction for the specified comparison predicate.
1384 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1385 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1386
1387 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1388 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1389
1390 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1391
1392 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1393 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1394
1395 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1396 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1397 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1398
1399 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1400 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1401
1402 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1403 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1404
1405 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1406 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1407
1408 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1409 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1410
1411 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1412 if (!is_varshift) {
1413 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1414 } else {
1415 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1416 }
1417 }
1418 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1419 if (!is_varshift) {
1420 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1421 } else {
1422 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1423 }
1424 }
1425 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1426 if (!is_varshift) {
1427 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1428 } else {
1429 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1430 }
1431 }
1432 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1433 if (!is_varshift) {
1434 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1435 } else {
1436 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1437 }
1438 }
1439 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1440 if (!is_varshift) {
1441 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1442 } else {
1443 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1444 }
1445 }
1446 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1447 if (!is_varshift) {
1448 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1449 } else {
1450 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1451 }
1452 }
1453 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1454 if (!is_varshift) {
1455 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1456 } else {
1457 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1458 }
1459 }
1460 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1461 if (!is_varshift) {
1462 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1463 } else {
1464 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1465 }
1466 }
1467 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1468 if (!is_varshift) {
1469 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1470 } else {
1471 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1472 }
1473 }
1474
1475 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1476 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1477 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1478 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1479
1480 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1481 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1482
1483 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1484 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1485
1486 void vptest(XMMRegister dst, XMMRegister src);
1487 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1488
1489 void punpcklbw(XMMRegister dst, XMMRegister src);
1490 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1491
1492 void pshufd(XMMRegister dst, Address src, int mode);
1493 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1494
1495 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1496 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1497
1498 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1499 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1500 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1501
1502 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1503 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1504 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1505
1506 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1507
1508 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1509 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1510 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1511
1512 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1513 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1514 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1515
1516 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1517 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1518 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1519
1520 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1521 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1522 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1523
1524 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1525 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1526 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1527
1528 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1529 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1530 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1531
1532 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1533 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1534
1535 // AVX Vector instructions
1536
1537 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1538 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1539 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1540
1541 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1542 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1543 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1544
1545 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1546 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1547 Assembler::vpxor(dst, nds, src, vector_len);
1548 else
1549 Assembler::vxorpd(dst, nds, src, vector_len);
1550 }
1551 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1552 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1553 Assembler::vpxor(dst, nds, src, vector_len);
1554 else
1555 Assembler::vxorpd(dst, nds, src, vector_len);
1556 }
1557 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1558
1559 // Simple version for AVX2 256bit vectors
1560 void vpxor(XMMRegister dst, XMMRegister src) {
1561 assert(UseAVX >= 2, "Should be at least AVX2");
1562 Assembler::vpxor(dst, dst, src, AVX_256bit);
1563 }
1564 void vpxor(XMMRegister dst, Address src) {
1565 assert(UseAVX >= 2, "Should be at least AVX2");
1566 Assembler::vpxor(dst, dst, src, AVX_256bit);
1567 }
1568
1569 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1570 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1571
1572 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1573 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1574 Assembler::vinserti32x4(dst, nds, src, imm8);
1575 } else if (UseAVX > 1) {
1576 // vinserti128 is available only in AVX2
1577 Assembler::vinserti128(dst, nds, src, imm8);
1578 } else {
1579 Assembler::vinsertf128(dst, nds, src, imm8);
1580 }
1581 }
1582
1583 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1584 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1585 Assembler::vinserti32x4(dst, nds, src, imm8);
1586 } else if (UseAVX > 1) {
1587 // vinserti128 is available only in AVX2
1588 Assembler::vinserti128(dst, nds, src, imm8);
1589 } else {
1590 Assembler::vinsertf128(dst, nds, src, imm8);
1591 }
1592 }
1593
1594 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1595 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1596 Assembler::vextracti32x4(dst, src, imm8);
1597 } else if (UseAVX > 1) {
1598 // vextracti128 is available only in AVX2
1599 Assembler::vextracti128(dst, src, imm8);
1600 } else {
1601 Assembler::vextractf128(dst, src, imm8);
1602 }
1603 }
1604
1605 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1606 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1607 Assembler::vextracti32x4(dst, src, imm8);
1608 } else if (UseAVX > 1) {
1609 // vextracti128 is available only in AVX2
1610 Assembler::vextracti128(dst, src, imm8);
1611 } else {
1612 Assembler::vextractf128(dst, src, imm8);
1613 }
1614 }
1615
1616 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1617 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1618 vinserti128(dst, dst, src, 1);
1619 }
1620 void vinserti128_high(XMMRegister dst, Address src) {
1621 vinserti128(dst, dst, src, 1);
1622 }
1623 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1624 vextracti128(dst, src, 1);
1625 }
1626 void vextracti128_high(Address dst, XMMRegister src) {
1627 vextracti128(dst, src, 1);
1628 }
1629
1630 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1631 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1632 Assembler::vinsertf32x4(dst, dst, src, 1);
1633 } else {
1634 Assembler::vinsertf128(dst, dst, src, 1);
1635 }
1636 }
1637
1638 void vinsertf128_high(XMMRegister dst, Address src) {
1639 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1640 Assembler::vinsertf32x4(dst, dst, src, 1);
1641 } else {
1642 Assembler::vinsertf128(dst, dst, src, 1);
1643 }
1644 }
1645
1646 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1647 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1648 Assembler::vextractf32x4(dst, src, 1);
1649 } else {
1650 Assembler::vextractf128(dst, src, 1);
1651 }
1652 }
1653
1654 void vextractf128_high(Address dst, XMMRegister src) {
1655 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1656 Assembler::vextractf32x4(dst, src, 1);
1657 } else {
1658 Assembler::vextractf128(dst, src, 1);
1659 }
1660 }
1661
1662 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1663 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1664 Assembler::vinserti64x4(dst, dst, src, 1);
1665 }
1666 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1667 Assembler::vinsertf64x4(dst, dst, src, 1);
1668 }
1669 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1670 Assembler::vextracti64x4(dst, src, 1);
1671 }
1672 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1673 Assembler::vextractf64x4(dst, src, 1);
1674 }
1675 void vextractf64x4_high(Address dst, XMMRegister src) {
1676 Assembler::vextractf64x4(dst, src, 1);
1677 }
1678 void vinsertf64x4_high(XMMRegister dst, Address src) {
1679 Assembler::vinsertf64x4(dst, dst, src, 1);
1680 }
1681
1682 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1683 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1684 vinserti128(dst, dst, src, 0);
1685 }
1686 void vinserti128_low(XMMRegister dst, Address src) {
1687 vinserti128(dst, dst, src, 0);
1688 }
1689 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1690 vextracti128(dst, src, 0);
1691 }
1692 void vextracti128_low(Address dst, XMMRegister src) {
1693 vextracti128(dst, src, 0);
1694 }
1695
1696 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1697 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1698 Assembler::vinsertf32x4(dst, dst, src, 0);
1699 } else {
1700 Assembler::vinsertf128(dst, dst, src, 0);
1701 }
1702 }
1703
1704 void vinsertf128_low(XMMRegister dst, Address src) {
1705 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1706 Assembler::vinsertf32x4(dst, dst, src, 0);
1707 } else {
1708 Assembler::vinsertf128(dst, dst, src, 0);
1709 }
1710 }
1711
1712 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1713 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1714 Assembler::vextractf32x4(dst, src, 0);
1715 } else {
1716 Assembler::vextractf128(dst, src, 0);
1717 }
1718 }
1719
1720 void vextractf128_low(Address dst, XMMRegister src) {
1721 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1722 Assembler::vextractf32x4(dst, src, 0);
1723 } else {
1724 Assembler::vextractf128(dst, src, 0);
1725 }
1726 }
1727
1728 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1729 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1730 Assembler::vinserti64x4(dst, dst, src, 0);
1731 }
1732 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1733 Assembler::vinsertf64x4(dst, dst, src, 0);
1734 }
1735 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1736 Assembler::vextracti64x4(dst, src, 0);
1737 }
1738 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1739 Assembler::vextractf64x4(dst, src, 0);
1740 }
1741 void vextractf64x4_low(Address dst, XMMRegister src) {
1742 Assembler::vextractf64x4(dst, src, 0);
1743 }
1744 void vinsertf64x4_low(XMMRegister dst, Address src) {
1745 Assembler::vinsertf64x4(dst, dst, src, 0);
1746 }
1747
1748 // Carry-Less Multiplication Quadword
1749 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1750 // 0x00 - multiply lower 64 bits [0:63]
1751 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1752 }
1753 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1754 // 0x11 - multiply upper 64 bits [64:127]
1755 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1756 }
1757 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1758 // 0x10 - multiply nds[0:63] and src[64:127]
1759 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1760 }
1761 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1762 //0x01 - multiply nds[64:127] and src[0:63]
1763 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1764 }
1765
1766 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1767 // 0x00 - multiply lower 64 bits [0:63]
1768 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1769 }
1770 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1771 // 0x11 - multiply upper 64 bits [64:127]
1772 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1773 }
1774
1775 // AVX-512 mask operations.
1776 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1777 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1778 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1779 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1780 void kortest(uint masklen, KRegister src1, KRegister src2);
1781 void ktest(uint masklen, KRegister src1, KRegister src2);
1782
1783 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1784 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1785
1786 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1787 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1788
1789 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1790 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1791
1792 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1793 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1794
1795 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1796 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1797 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1798 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1799
1800 using Assembler::evpandq;
1801 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1802
1803 using Assembler::evpaddq;
1804 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1805
1806 using Assembler::evporq;
1807 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1808
1809 using Assembler::vpternlogq;
1810 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1811
1812 void cmov32( Condition cc, Register dst, Address src);
1813 void cmov32( Condition cc, Register dst, Register src);
1814
1815 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1816
1817 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1818 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1819
1820 void movoop(Register dst, jobject obj);
1821 void movoop(Address dst, jobject obj, Register rscratch);
1822
1823 void mov_metadata(Register dst, Metadata* obj);
1824 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1825
1826 void movptr(Register dst, Register src);
1827 void movptr(Register dst, Address src);
1828 void movptr(Register dst, AddressLiteral src);
1829 void movptr(Register dst, ArrayAddress src);
1830 void movptr(Register dst, intptr_t src);
1831 void movptr(Address dst, Register src);
1832 void movptr(Address dst, int32_t imm);
1833 void movptr(Address dst, intptr_t src, Register rscratch);
1834 void movptr(ArrayAddress dst, Register src, Register rscratch);
1835
1836 void movptr(Register dst, RegisterOrConstant src) {
1837 if (src.is_constant()) movptr(dst, src.as_constant());
1838 else movptr(dst, src.as_register());
1839 }
1840
1841
1842 // to avoid hiding movl
1843 void mov32(Register dst, AddressLiteral src);
1844 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1845
1846 // Import other mov() methods from the parent class or else
1847 // they will be hidden by the following overriding declaration.
1848 using Assembler::movdl;
1849 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1850
1851 using Assembler::movq;
1852 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1853
1854 // Can push value or effective address
1855 void pushptr(AddressLiteral src, Register rscratch);
1856
1857 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1858 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1859
1860 void pushoop(jobject obj, Register rscratch);
1861 void pushklass(Metadata* obj, Register rscratch);
1862
1863 // sign extend as need a l to ptr sized element
1864 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1865 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1866
1867
1868 public:
1869 // clear memory of size 'cnt' qwords, starting at 'base';
1870 // if 'is_large' is set, do not try to produce short loop
1871 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1872
1873 // clear memory initialization sequence for constant size;
1874 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1875
1876 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1877 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1878
1879 // Fill primitive arrays
1880 void generate_fill(BasicType t, bool aligned,
1881 Register to, Register value, Register count,
1882 Register rtmp, XMMRegister xtmp);
1883
1884 void encode_iso_array(Register src, Register dst, Register len,
1885 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1886 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1887
1888 #ifdef _LP64
1889 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1890 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1891 Register y, Register y_idx, Register z,
1892 Register carry, Register product,
1893 Register idx, Register kdx);
1894 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1895 Register yz_idx, Register idx,
1896 Register carry, Register product, int offset);
1897 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1898 Register carry, Register carry2,
1899 Register idx, Register jdx,
1900 Register yz_idx1, Register yz_idx2,
1901 Register tmp, Register tmp3, Register tmp4);
1902 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1903 Register yz_idx, Register idx, Register jdx,
1904 Register carry, Register product,
1905 Register carry2);
1906 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1907 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1908 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1909 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1910 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1911 Register tmp2);
1912 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1913 Register rdxReg, Register raxReg);
1914 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1915 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1916 Register tmp3, Register tmp4);
1917 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1918 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1919
1920 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1921 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1922 Register raxReg);
1923 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1924 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1925 Register raxReg);
1926 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1927 Register result, Register tmp1, Register tmp2,
1928 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1929 #endif
1930
1931 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1932 void update_byte_crc32(Register crc, Register val, Register table);
1933 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1934
1935
1936 #ifdef _LP64
1937 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1938 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1939 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1940 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1941 #endif // _LP64
1942
1943 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1944 // Note on a naming convention:
1945 // Prefix w = register only used on a Westmere+ architecture
1946 // Prefix n = register only used on a Nehalem architecture
1947 #ifdef _LP64
1948 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1949 Register tmp1, Register tmp2, Register tmp3);
1950 #else
1951 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1952 Register tmp1, Register tmp2, Register tmp3,
1953 XMMRegister xtmp1, XMMRegister xtmp2);
1954 #endif
1955 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1956 Register in_out,
1957 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1958 XMMRegister w_xtmp2,
1959 Register tmp1,
1960 Register n_tmp2, Register n_tmp3);
1961 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1962 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1963 Register tmp1, Register tmp2,
1964 Register n_tmp3);
1965 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1966 Register in_out1, Register in_out2, Register in_out3,
1967 Register tmp1, Register tmp2, Register tmp3,
1968 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1969 Register tmp4, Register tmp5,
1970 Register n_tmp6);
1971 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1972 Register tmp1, Register tmp2, Register tmp3,
1973 Register tmp4, Register tmp5, Register tmp6,
1974 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1975 bool is_pclmulqdq_supported);
1976 // Fold 128-bit data chunk
1977 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1978 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1979 #ifdef _LP64
1980 // Fold 512-bit data chunk
1981 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1982 #endif // _LP64
1983 // Fold 8-bit data
1984 void fold_8bit_crc32(Register crc, Register table, Register tmp);
1985 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1986
1987 // Compress char[] array to byte[].
1988 void char_array_compress(Register src, Register dst, Register len,
1989 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1990 XMMRegister tmp4, Register tmp5, Register result,
1991 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1992
1993 // Inflate byte[] array to char[].
1994 void byte_array_inflate(Register src, Register dst, Register len,
1995 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
1996
1997 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
1998 Register length, Register temp, int vec_enc);
1999
2000 void fill64_masked(uint shift, Register dst, int disp,
2001 XMMRegister xmm, KRegister mask, Register length,
2002 Register temp, bool use64byteVector = false);
2003
2004 void fill32_masked(uint shift, Register dst, int disp,
2005 XMMRegister xmm, KRegister mask, Register length,
2006 Register temp);
2007
2008 void fill32(Address dst, XMMRegister xmm);
2009
2010 void fill32(Register dst, int disp, XMMRegister xmm);
2011
2012 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2013
2014 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2015
2016 #ifdef _LP64
2017 void convert_f2i(Register dst, XMMRegister src);
2018 void convert_d2i(Register dst, XMMRegister src);
2019 void convert_f2l(Register dst, XMMRegister src);
2020 void convert_d2l(Register dst, XMMRegister src);
2021 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2022 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2023
2024 void cache_wb(Address line);
2025 void cache_wbsync(bool is_pre);
2026
2027 #ifdef COMPILER2_OR_JVMCI
2028 void generate_fill_avx3(BasicType type, Register to, Register value,
2029 Register count, Register rtmp, XMMRegister xtmp);
2030 #endif // COMPILER2_OR_JVMCI
2031 #endif // _LP64
2032
2033 void vallones(XMMRegister dst, int vector_len);
2034
2035 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2036
2037 void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
2038 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
2039 };
2040
2041 /**
2042 * class SkipIfEqual:
2043 *
2044 * Instantiating this class will result in assembly code being output that will
2045 * jump around any code emitted between the creation of the instance and it's
2046 * automatic destruction at the end of a scope block, depending on the value of
2047 * the flag passed to the constructor, which will be checked at run-time.
2048 */
2049 class SkipIfEqual {
2050 private:
2051 MacroAssembler* _masm;
2052 Label _label;
2053
2054 public:
2055 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch);
2056 ~SkipIfEqual();
2057 };
2058
2059 #endif // CPU_X86_MACROASSEMBLER_X86_HPP