1 /*
2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/rtmLocking.hpp"
34 #include "runtime/vm_version.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
56
57 protected:
58 // This is the base routine called by the different versions of call_VM. The interpreter
59 // may customize this version by overriding it for its purposes (e.g., to save/restore
60 // additional registers when doing a VM call).
61 //
62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
63 // returns the register which contains the thread upon return. If a thread register has been
64 // specified, the return value will correspond to that register. If no last_java_sp is specified
65 // (noreg) than rsp will be used instead.
66 virtual void call_VM_base( // returns the register containing the thread upon return
67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
68 Register java_thread, // the thread if computed before ; use noreg otherwise
69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
70 address entry_point, // the entry point
71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
72 bool check_exceptions // whether to check for pending exceptions after return
73 );
74
75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
76
77 // helpers for FPU flag access
78 // tmp is a temporary register, if none is available use noreg
79 void save_rax (Register tmp);
80 void restore_rax(Register tmp);
81
82 public:
83 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
84
85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
86 // The implementation is only non-empty for the InterpreterMacroAssembler,
87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
88 virtual void check_and_handle_popframe(Register java_thread);
89 virtual void check_and_handle_earlyret(Register java_thread);
90
91 Address as_Address(AddressLiteral adr);
92 Address as_Address(ArrayAddress adr, Register rscratch);
93
94 // Support for null-checks
95 //
96 // Generates code that causes a null OS exception if the content of reg is null.
97 // If the accessed location is M[reg + offset] and the offset is known, provide the
98 // offset. No explicit code generation is needed if the offset is within a certain
99 // range (0 <= offset <= page_size).
100
101 void null_check(Register reg, int offset = -1);
102 static bool needs_explicit_null_check(intptr_t offset);
103 static bool uses_implicit_null_check(void* address);
104
105 // Required platform-specific helpers for Label::patch_instructions.
106 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
107 void pd_patch_instruction(address branch, address target, const char* file, int line) {
108 unsigned char op = branch[0];
109 assert(op == 0xE8 /* call */ ||
110 op == 0xE9 /* jmp */ ||
111 op == 0xEB /* short jmp */ ||
112 (op & 0xF0) == 0x70 /* short jcc */ ||
113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
114 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
115 "Invalid opcode at patch point");
116
117 if (op == 0xEB || (op & 0xF0) == 0x70) {
118 // short offset operators (jmp and jcc)
119 char* disp = (char*) &branch[1];
120 int imm8 = checked_cast<int>(target - (address) &disp[1]);
121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
122 file == nullptr ? "<null>" : file, line);
123 *disp = (char)imm8;
124 } else {
125 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
126 int imm32 = checked_cast<int>(target - (address) &disp[1]);
127 *disp = imm32;
128 }
129 }
130
131 // The following 4 methods return the offset of the appropriate move instruction
132
133 // Support for fast byte/short loading with zero extension (depending on particular CPU)
134 int load_unsigned_byte(Register dst, Address src);
135 int load_unsigned_short(Register dst, Address src);
136
137 // Support for fast byte/short loading with sign extension (depending on particular CPU)
138 int load_signed_byte(Register dst, Address src);
139 int load_signed_short(Register dst, Address src);
140
141 // Support for sign-extension (hi:lo = extend_sign(lo))
142 void extend_sign(Register hi, Register lo);
143
144 // Load and store values by size and signed-ness
145 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
146 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
147
148 // Support for inc/dec with optimal instruction selection depending on value
149
150 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
151 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
152 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; }
153 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; }
154
155 void decrementl(Address dst, int value = 1);
156 void decrementl(Register reg, int value = 1);
157
158 void decrementq(Register reg, int value = 1);
159 void decrementq(Address dst, int value = 1);
160
161 void incrementl(Address dst, int value = 1);
162 void incrementl(Register reg, int value = 1);
163
164 void incrementq(Register reg, int value = 1);
165 void incrementq(Address dst, int value = 1);
166
167 void incrementl(AddressLiteral dst, Register rscratch = noreg);
168 void incrementl(ArrayAddress dst, Register rscratch);
169
170 void incrementq(AddressLiteral dst, Register rscratch = noreg);
171
172 // Support optimal SSE move instructions.
173 void movflt(XMMRegister dst, XMMRegister src) {
174 if (dst-> encoding() == src->encoding()) return;
175 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
176 else { movss (dst, src); return; }
177 }
178 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
179 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
180 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
181
182 // Move with zero extension
183 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
184
185 void movdbl(XMMRegister dst, XMMRegister src) {
186 if (dst-> encoding() == src->encoding()) return;
187 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
188 else { movsd (dst, src); return; }
189 }
190
191 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
192
193 void movdbl(XMMRegister dst, Address src) {
194 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
195 else { movlpd(dst, src); return; }
196 }
197 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
198
199 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
200 // Use separate tmp XMM register because caller may
201 // requires src XMM register to be unchanged (as in x86.ad).
202 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
203 movdl(dst, tmp);
204 movswl(dst, dst);
205 }
206
207 void flt16_to_flt(XMMRegister dst, Register src) {
208 movdl(dst, src);
209 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
210 }
211
212 // Alignment
213 void align32();
214 void align64();
215 void align(int modulus);
216 void align(int modulus, int target);
217
218 void post_call_nop();
219 // A 5 byte nop that is safe for patching (see patch_verified_entry)
220 void fat_nop();
221
222 // Stack frame creation/removal
223 void enter();
224 void leave();
225
226 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
227 // The pointer will be loaded into the thread register.
228 void get_thread(Register thread);
229
230 #ifdef _LP64
231 // Support for argument shuffling
232
233 // bias in bytes
234 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
235 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
236 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
237 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
238 void move_ptr(VMRegPair src, VMRegPair dst);
239 void object_move(OopMap* map,
240 int oop_handle_offset,
241 int framesize_in_slots,
242 VMRegPair src,
243 VMRegPair dst,
244 bool is_receiver,
245 int* receiver_offset);
246 #endif // _LP64
247
248 // Support for VM calls
249 //
250 // It is imperative that all calls into the VM are handled via the call_VM macros.
251 // They make sure that the stack linkage is setup correctly. call_VM's correspond
252 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
253
254
255 void call_VM(Register oop_result,
256 address entry_point,
257 bool check_exceptions = true);
258 void call_VM(Register oop_result,
259 address entry_point,
260 Register arg_1,
261 bool check_exceptions = true);
262 void call_VM(Register oop_result,
263 address entry_point,
264 Register arg_1, Register arg_2,
265 bool check_exceptions = true);
266 void call_VM(Register oop_result,
267 address entry_point,
268 Register arg_1, Register arg_2, Register arg_3,
269 bool check_exceptions = true);
270
271 // Overloadings with last_Java_sp
272 void call_VM(Register oop_result,
273 Register last_java_sp,
274 address entry_point,
275 int number_of_arguments = 0,
276 bool check_exceptions = true);
277 void call_VM(Register oop_result,
278 Register last_java_sp,
279 address entry_point,
280 Register arg_1, bool
281 check_exceptions = true);
282 void call_VM(Register oop_result,
283 Register last_java_sp,
284 address entry_point,
285 Register arg_1, Register arg_2,
286 bool check_exceptions = true);
287 void call_VM(Register oop_result,
288 Register last_java_sp,
289 address entry_point,
290 Register arg_1, Register arg_2, Register arg_3,
291 bool check_exceptions = true);
292
293 void get_vm_result (Register oop_result, Register thread);
294 void get_vm_result_2(Register metadata_result, Register thread);
295
296 // These always tightly bind to MacroAssembler::call_VM_base
297 // bypassing the virtual implementation
298 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
303
304 void call_VM_leaf0(address entry_point);
305 void call_VM_leaf(address entry_point,
306 int number_of_arguments = 0);
307 void call_VM_leaf(address entry_point,
308 Register arg_1);
309 void call_VM_leaf(address entry_point,
310 Register arg_1, Register arg_2);
311 void call_VM_leaf(address entry_point,
312 Register arg_1, Register arg_2, Register arg_3);
313
314 void call_VM_leaf(address entry_point,
315 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
316
317 // These always tightly bind to MacroAssembler::call_VM_leaf_base
318 // bypassing the virtual implementation
319 void super_call_VM_leaf(address entry_point);
320 void super_call_VM_leaf(address entry_point, Register arg_1);
321 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
324
325 // last Java Frame (fills frame anchor)
326 void set_last_Java_frame(Register thread,
327 Register last_java_sp,
328 Register last_java_fp,
329 address last_java_pc,
330 Register rscratch);
331
332 // thread in the default location (r15_thread on 64bit)
333 void set_last_Java_frame(Register last_java_sp,
334 Register last_java_fp,
335 address last_java_pc,
336 Register rscratch);
337
338 void reset_last_Java_frame(Register thread, bool clear_fp);
339
340 // thread in the default location (r15_thread on 64bit)
341 void reset_last_Java_frame(bool clear_fp);
342
343 // jobjects
344 void clear_jobject_tag(Register possibly_non_local);
345 void resolve_jobject(Register value, Register thread, Register tmp);
346 void resolve_global_jobject(Register value, Register thread, Register tmp);
347
348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
349 void c2bool(Register x);
350
351 // C++ bool manipulation
352
353 void movbool(Register dst, Address src);
354 void movbool(Address dst, bool boolconst);
355 void movbool(Address dst, Register src);
356 void testbool(Register dst);
357
358 void resolve_oop_handle(Register result, Register tmp);
359 void resolve_weak_handle(Register result, Register tmp);
360 void load_mirror(Register mirror, Register method, Register tmp);
361 void load_method_holder_cld(Register rresult, Register rmethod);
362
363 void load_method_holder(Register holder, Register method);
364
365 // oop manipulations
366 void load_klass(Register dst, Register src, Register tmp);
367 void store_klass(Register dst, Register src, Register tmp);
368
369 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
370 Register tmp1, Register thread_tmp);
371 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
372 Register tmp1, Register tmp2, Register tmp3);
373
374 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
375 Register thread_tmp = noreg, DecoratorSet decorators = 0);
376 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
377 Register thread_tmp = noreg, DecoratorSet decorators = 0);
378 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
379 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
380
381 // Used for storing null. All other oop constants should be
382 // stored using routines that take a jobject.
383 void store_heap_oop_null(Address dst);
384
385 #ifdef _LP64
386 void store_klass_gap(Register dst, Register src);
387
388 // This dummy is to prevent a call to store_heap_oop from
389 // converting a zero (like null) into a Register by giving
390 // the compiler two choices it can't resolve
391
392 void store_heap_oop(Address dst, void* dummy);
393
394 void encode_heap_oop(Register r);
395 void decode_heap_oop(Register r);
396 void encode_heap_oop_not_null(Register r);
397 void decode_heap_oop_not_null(Register r);
398 void encode_heap_oop_not_null(Register dst, Register src);
399 void decode_heap_oop_not_null(Register dst, Register src);
400
401 void set_narrow_oop(Register dst, jobject obj);
402 void set_narrow_oop(Address dst, jobject obj);
403 void cmp_narrow_oop(Register dst, jobject obj);
404 void cmp_narrow_oop(Address dst, jobject obj);
405
406 void encode_klass_not_null(Register r, Register tmp);
407 void decode_klass_not_null(Register r, Register tmp);
408 void encode_and_move_klass_not_null(Register dst, Register src);
409 void decode_and_move_klass_not_null(Register dst, Register src);
410 void set_narrow_klass(Register dst, Klass* k);
411 void set_narrow_klass(Address dst, Klass* k);
412 void cmp_narrow_klass(Register dst, Klass* k);
413 void cmp_narrow_klass(Address dst, Klass* k);
414
415 // if heap base register is used - reinit it with the correct value
416 void reinit_heapbase();
417
418 DEBUG_ONLY(void verify_heapbase(const char* msg);)
419
420 #endif // _LP64
421
422 // Int division/remainder for Java
423 // (as idivl, but checks for special case as described in JVM spec.)
424 // returns idivl instruction offset for implicit exception handling
425 int corrected_idivl(Register reg);
426
427 // Long division/remainder for Java
428 // (as idivq, but checks for special case as described in JVM spec.)
429 // returns idivq instruction offset for implicit exception handling
430 int corrected_idivq(Register reg);
431
432 void int3();
433
434 // Long operation macros for a 32bit cpu
435 // Long negation for Java
436 void lneg(Register hi, Register lo);
437
438 // Long multiplication for Java
439 // (destroys contents of eax, ebx, ecx and edx)
440 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
441
442 // Long shifts for Java
443 // (semantics as described in JVM spec.)
444 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
445 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
446
447 // Long compare for Java
448 // (semantics as described in JVM spec.)
449 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
450
451
452 // misc
453
454 // Sign extension
455 void sign_extend_short(Register reg);
456 void sign_extend_byte(Register reg);
457
458 // Division by power of 2, rounding towards 0
459 void division_with_shift(Register reg, int shift_value);
460
461 #ifndef _LP64
462 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
463 //
464 // CF (corresponds to C0) if x < y
465 // PF (corresponds to C2) if unordered
466 // ZF (corresponds to C3) if x = y
467 //
468 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
469 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
470 void fcmp(Register tmp);
471 // Variant of the above which allows y to be further down the stack
472 // and which only pops x and y if specified. If pop_right is
473 // specified then pop_left must also be specified.
474 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
475
476 // Floating-point comparison for Java
477 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
478 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
479 // (semantics as described in JVM spec.)
480 void fcmp2int(Register dst, bool unordered_is_less);
481 // Variant of the above which allows y to be further down the stack
482 // and which only pops x and y if specified. If pop_right is
483 // specified then pop_left must also be specified.
484 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
485
486 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
487 // tmp is a temporary register, if none is available use noreg
488 void fremr(Register tmp);
489
490 // only if +VerifyFPU
491 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
492 #endif // !LP64
493
494 // dst = c = a * b + c
495 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
496 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
497
498 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
499 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
500 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
501 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
502
503
504 // same as fcmp2int, but using SSE2
505 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
506 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
507
508 // branch to L if FPU flag C2 is set/not set
509 // tmp is a temporary register, if none is available use noreg
510 void jC2 (Register tmp, Label& L);
511 void jnC2(Register tmp, Label& L);
512
513 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
514 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
515 void load_float(Address src);
516
517 // Store float value to 'address'. If UseSSE >= 1, the value is stored
518 // from register xmm0. Otherwise, the value is stored from the FPU stack.
519 void store_float(Address dst);
520
521 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
522 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
523 void load_double(Address src);
524
525 // Store double value to 'address'. If UseSSE >= 2, the value is stored
526 // from register xmm0. Otherwise, the value is stored from the FPU stack.
527 void store_double(Address dst);
528
529 #ifndef _LP64
530 // Pop ST (ffree & fincstp combined)
531 void fpop();
532
533 void empty_FPU_stack();
534 #endif // !_LP64
535
536 void push_IU_state();
537 void pop_IU_state();
538
539 void push_FPU_state();
540 void pop_FPU_state();
541
542 void push_CPU_state();
543 void pop_CPU_state();
544
545 void push_cont_fastpath();
546 void pop_cont_fastpath();
547
548 void inc_held_monitor_count();
549 void dec_held_monitor_count();
550
551 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
552
553 // Round up to a power of two
554 void round_to(Register reg, int modulus);
555
556 private:
557 // General purpose and XMM registers potentially clobbered by native code; there
558 // is no need for FPU or AVX opmask related methods because C1/interpreter
559 // - we save/restore FPU state as a whole always
560 // - do not care about AVX-512 opmask
561 static RegSet call_clobbered_gp_registers();
562 static XMMRegSet call_clobbered_xmm_registers();
563
564 void push_set(XMMRegSet set, int offset);
565 void pop_set(XMMRegSet set, int offset);
566
567 public:
568 void push_set(RegSet set, int offset = -1);
569 void pop_set(RegSet set, int offset = -1);
570
571 // Push and pop everything that might be clobbered by a native
572 // runtime call.
573 // Only save the lower 64 bits of each vector register.
574 // Additional registers can be excluded in a passed RegSet.
575 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
576 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
577
578 void push_call_clobbered_registers(bool save_fpu = true) {
579 push_call_clobbered_registers_except(RegSet(), save_fpu);
580 }
581 void pop_call_clobbered_registers(bool restore_fpu = true) {
582 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
583 }
584
585 // allocation
586 void tlab_allocate(
587 Register thread, // Current thread
588 Register obj, // result: pointer to object after successful allocation
589 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
590 int con_size_in_bytes, // object size in bytes if known at compile time
591 Register t1, // temp register
592 Register t2, // temp register
593 Label& slow_case // continuation point if fast allocation fails
594 );
595 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
596
597 // interface method calling
598 void lookup_interface_method(Register recv_klass,
599 Register intf_klass,
600 RegisterOrConstant itable_index,
601 Register method_result,
602 Register scan_temp,
603 Label& no_such_interface,
604 bool return_method = true);
605
606 void lookup_interface_method_stub(Register recv_klass,
607 Register holder_klass,
608 Register resolved_klass,
609 Register method_result,
610 Register scan_temp,
611 Register temp_reg2,
612 Register receiver,
613 int itable_index,
614 Label& L_no_such_interface);
615
616 // virtual method calling
617 void lookup_virtual_method(Register recv_klass,
618 RegisterOrConstant vtable_index,
619 Register method_result);
620
621 // Test sub_klass against super_klass, with fast and slow paths.
622
623 // The fast path produces a tri-state answer: yes / no / maybe-slow.
624 // One of the three labels can be null, meaning take the fall-through.
625 // If super_check_offset is -1, the value is loaded up from super_klass.
626 // No registers are killed, except temp_reg.
627 void check_klass_subtype_fast_path(Register sub_klass,
628 Register super_klass,
629 Register temp_reg,
630 Label* L_success,
631 Label* L_failure,
632 Label* L_slow_path,
633 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
634
635 // The rest of the type check; must be wired to a corresponding fast path.
636 // It does not repeat the fast path logic, so don't use it standalone.
637 // The temp_reg and temp2_reg can be noreg, if no temps are available.
638 // Updates the sub's secondary super cache as necessary.
639 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
640 void check_klass_subtype_slow_path(Register sub_klass,
641 Register super_klass,
642 Register temp_reg,
643 Register temp2_reg,
644 Label* L_success,
645 Label* L_failure,
646 bool set_cond_codes = false);
647
648 // Simplified, combined version, good for typical uses.
649 // Falls through on failure.
650 void check_klass_subtype(Register sub_klass,
651 Register super_klass,
652 Register temp_reg,
653 Label& L_success);
654
655 void clinit_barrier(Register klass,
656 Register thread,
657 Label* L_fast_path = nullptr,
658 Label* L_slow_path = nullptr);
659
660 // method handles (JSR 292)
661 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
662
663 // Debugging
664
665 // only if +VerifyOops
666 void _verify_oop(Register reg, const char* s, const char* file, int line);
667 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
668
669 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
670 if (VerifyOops) {
671 _verify_oop(reg, s, file, line);
672 }
673 }
674 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
675 if (VerifyOops) {
676 _verify_oop_addr(reg, s, file, line);
677 }
678 }
679
680 // TODO: verify method and klass metadata (compare against vptr?)
681 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
682 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
683
684 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
685 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
686 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
687 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
688 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
689
690 // Verify or restore cpu control state after JNI call
691 void restore_cpu_control_state_after_jni(Register rscratch);
692
693 // prints msg, dumps registers and stops execution
694 void stop(const char* msg);
695
696 // prints msg and continues
697 void warn(const char* msg);
698
699 // dumps registers and other state
700 void print_state();
701
702 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
703 static void debug64(char* msg, int64_t pc, int64_t regs[]);
704 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
705 static void print_state64(int64_t pc, int64_t regs[]);
706
707 void os_breakpoint();
708
709 void untested() { stop("untested"); }
710
711 void unimplemented(const char* what = "");
712
713 void should_not_reach_here() { stop("should not reach here"); }
714
715 void print_CPU_state();
716
717 // Stack overflow checking
718 void bang_stack_with_offset(int offset) {
719 // stack grows down, caller passes positive offset
720 assert(offset > 0, "must bang with negative offset");
721 movl(Address(rsp, (-offset)), rax);
722 }
723
724 // Writes to stack successive pages until offset reached to check for
725 // stack overflow + shadow pages. Also, clobbers tmp
726 void bang_stack_size(Register size, Register tmp);
727
728 // Check for reserved stack access in method being exited (for JIT)
729 void reserved_stack_check();
730
731 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
732
733 void verify_tlab();
734
735 static Condition negate_condition(Condition cond);
736
737 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
738 // operands. In general the names are modified to avoid hiding the instruction in Assembler
739 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
740 // here in MacroAssembler. The major exception to this rule is call
741
742 // Arithmetics
743
744
745 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
746 void addptr(Address dst, Register src);
747
748 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
749 void addptr(Register dst, int32_t src);
750 void addptr(Register dst, Register src);
751 void addptr(Register dst, RegisterOrConstant src) {
752 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
753 else addptr(dst, src.as_register());
754 }
755
756 void andptr(Register dst, int32_t src);
757 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
758
759 #ifdef _LP64
760 using Assembler::andq;
761 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
762 #endif
763
764 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
765
766 // renamed to drag out the casting of address to int32_t/intptr_t
767 void cmp32(Register src1, int32_t imm);
768
769 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
770 // compare reg - mem, or reg - &mem
771 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
772
773 void cmp32(Register src1, Address src2);
774
775 #ifndef _LP64
776 void cmpklass(Address dst, Metadata* obj);
777 void cmpklass(Register dst, Metadata* obj);
778 void cmpoop(Address dst, jobject obj);
779 #endif // _LP64
780
781 void cmpoop(Register src1, Register src2);
782 void cmpoop(Register src1, Address src2);
783 void cmpoop(Register dst, jobject obj, Register rscratch);
784
785 // NOTE src2 must be the lval. This is NOT an mem-mem compare
786 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
787
788 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
789
790 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
791 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
792 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
793
794 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
795 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
796
797 // cmp64 to avoild hiding cmpq
798 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
799
800 void cmpxchgptr(Register reg, Address adr);
801
802 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
803
804 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
805 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
806
807
808 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
809
810 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
811
812 void shlptr(Register dst, int32_t shift);
813 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
814
815 void shrptr(Register dst, int32_t shift);
816 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
817
818 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
819 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
820
821 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
822
823 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
824 void subptr(Register dst, int32_t src);
825 // Force generation of a 4 byte immediate value even if it fits into 8bit
826 void subptr_imm32(Register dst, int32_t src);
827 void subptr(Register dst, Register src);
828 void subptr(Register dst, RegisterOrConstant src) {
829 if (src.is_constant()) subptr(dst, (int) src.as_constant());
830 else subptr(dst, src.as_register());
831 }
832
833 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
834 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
835
836 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
837 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
838
839 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
840
841
842
843 // Helper functions for statistics gathering.
844 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
845 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
846 // Unconditional atomic increment.
847 void atomic_incl(Address counter_addr);
848 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
849 #ifdef _LP64
850 void atomic_incq(Address counter_addr);
851 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
852 #endif
853 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; }
854 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
855
856 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
857 void lea(Register dst, AddressLiteral adr);
858 void lea(Address dst, AddressLiteral adr, Register rscratch);
859
860 void leal32(Register dst, Address src) { leal(dst, src); }
861
862 // Import other testl() methods from the parent class or else
863 // they will be hidden by the following overriding declaration.
864 using Assembler::testl;
865 void testl(Address dst, int32_t imm32);
866 void testl(Register dst, int32_t imm32);
867 void testl(Register dst, AddressLiteral src); // requires reachable address
868 using Assembler::testq;
869 void testq(Address dst, int32_t imm32);
870 void testq(Register dst, int32_t imm32);
871
872 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
873 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
874 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
875 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
876
877 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
878 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
879 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
880 void testptr(Register src1, Register src2);
881
882 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
883 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
884
885 // Calls
886
887 void call(Label& L, relocInfo::relocType rtype);
888 void call(Register entry);
889 void call(Address addr) { Assembler::call(addr); }
890
891 // NOTE: this call transfers to the effective address of entry NOT
892 // the address contained by entry. This is because this is more natural
893 // for jumps/calls.
894 void call(AddressLiteral entry, Register rscratch = rax);
895
896 // Emit the CompiledIC call idiom
897 void ic_call(address entry, jint method_index = 0);
898
899 void emit_static_call_stub();
900
901 // Jumps
902
903 // NOTE: these jumps transfer to the effective address of dst NOT
904 // the address contained by dst. This is because this is more natural
905 // for jumps/calls.
906 void jump(AddressLiteral dst, Register rscratch = noreg);
907
908 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
909
910 // 32bit can do a case table jump in one instruction but we no longer allow the base
911 // to be installed in the Address class. This jump will transfer to the address
912 // contained in the location described by entry (not the address of entry)
913 void jump(ArrayAddress entry, Register rscratch);
914
915 // Floating
916
917 void push_f(XMMRegister r);
918 void pop_f(XMMRegister r);
919 void push_d(XMMRegister r);
920 void pop_d(XMMRegister r);
921
922 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
923 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
924 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
925
926 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
927 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
928 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
929
930 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
931 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
932 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
933
934 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
935 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
936 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
937
938 #ifndef _LP64
939 void fadd_s(Address src) { Assembler::fadd_s(src); }
940 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
941
942 void fldcw(Address src) { Assembler::fldcw(src); }
943 void fldcw(AddressLiteral src);
944
945 void fld_s(int index) { Assembler::fld_s(index); }
946 void fld_s(Address src) { Assembler::fld_s(src); }
947 void fld_s(AddressLiteral src);
948
949 void fld_d(Address src) { Assembler::fld_d(src); }
950 void fld_d(AddressLiteral src);
951
952 void fld_x(Address src) { Assembler::fld_x(src); }
953 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); }
954
955 void fmul_s(Address src) { Assembler::fmul_s(src); }
956 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
957 #endif // !_LP64
958
959 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
960 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
961
962 #ifdef _LP64
963 private:
964 void sha256_AVX2_one_round_compute(
965 Register reg_old_h,
966 Register reg_a,
967 Register reg_b,
968 Register reg_c,
969 Register reg_d,
970 Register reg_e,
971 Register reg_f,
972 Register reg_g,
973 Register reg_h,
974 int iter);
975 void sha256_AVX2_four_rounds_compute_first(int start);
976 void sha256_AVX2_four_rounds_compute_last(int start);
977 void sha256_AVX2_one_round_and_sched(
978 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
979 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
980 XMMRegister xmm_2, /* ymm6 */
981 XMMRegister xmm_3, /* ymm7 */
982 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
983 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
984 Register reg_c, /* edi */
985 Register reg_d, /* esi */
986 Register reg_e, /* r8d */
987 Register reg_f, /* r9d */
988 Register reg_g, /* r10d */
989 Register reg_h, /* r11d */
990 int iter);
991
992 void addm(int disp, Register r1, Register r2);
993
994 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
995 Register e, Register f, Register g, Register h, int iteration);
996
997 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
998 Register a, Register b, Register c, Register d, Register e, Register f,
999 Register g, Register h, int iteration);
1000
1001 void addmq(int disp, Register r1, Register r2);
1002 public:
1003 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1004 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1005 Register buf, Register state, Register ofs, Register limit, Register rsp,
1006 bool multi_block, XMMRegister shuf_mask);
1007 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1008 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1009 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1010 XMMRegister shuf_mask);
1011 #endif // _LP64
1012
1013 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1014 bool multi_block);
1015
1016 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1017 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1018 Register buf, Register state, Register ofs, Register limit, Register rsp,
1019 bool multi_block);
1020
1021 #ifdef _LP64
1022 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1023 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1024 Register buf, Register state, Register ofs, Register limit, Register rsp,
1025 bool multi_block, XMMRegister shuf_mask);
1026 #else
1027 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1028 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1029 Register buf, Register state, Register ofs, Register limit, Register rsp,
1030 bool multi_block);
1031 #endif
1032
1033 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1034 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1035 Register rax, Register rcx, Register rdx, Register tmp);
1036
1037 #ifndef _LP64
1038 private:
1039 // Initialized in macroAssembler_x86_constants.cpp
1040 static address ONES;
1041 static address L_2IL0FLOATPACKET_0;
1042 static address PI4_INV;
1043 static address PI4X3;
1044 static address PI4X4;
1045
1046 public:
1047 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1048 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1049 Register rax, Register rcx, Register rdx, Register tmp1);
1050
1051 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1052 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1053 Register rax, Register rcx, Register rdx, Register tmp);
1054
1055 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1056 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1057 Register rdx, Register tmp);
1058
1059 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1060 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1061 Register rax, Register rbx, Register rdx);
1062
1063 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1064 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1065 Register rax, Register rcx, Register rdx, Register tmp);
1066
1067 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1068 Register edx, Register ebx, Register esi, Register edi,
1069 Register ebp, Register esp);
1070
1071 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1072 Register esi, Register edi, Register ebp, Register esp);
1073
1074 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1075 Register edx, Register ebx, Register esi, Register edi,
1076 Register ebp, Register esp);
1077
1078 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1079 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1080 Register rax, Register rcx, Register rdx, Register tmp);
1081 #endif // !_LP64
1082
1083 private:
1084
1085 // these are private because users should be doing movflt/movdbl
1086
1087 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1088 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1089 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1090 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1091
1092 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1093 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1094
1095 public:
1096
1097 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1098 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1099 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1100
1101 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1102 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1103 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1104
1105 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1106 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1107 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1108
1109 using Assembler::vbroadcastsd;
1110 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1111
1112 using Assembler::vbroadcastss;
1113 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1114
1115 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1116 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1117 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1118
1119 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1120 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1121 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1122
1123 // Move Unaligned Double Quadword
1124 void movdqu(Address dst, XMMRegister src);
1125 void movdqu(XMMRegister dst, XMMRegister src);
1126 void movdqu(XMMRegister dst, Address src);
1127 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1128
1129 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1130 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1131 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1132 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1133 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1134 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1135
1136 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1137 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1138 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1139 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1140 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1141 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1142
1143 // Safe move operation, lowers down to 16bit moves for targets supporting
1144 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1145 void kmov(Address dst, KRegister src);
1146 void kmov(KRegister dst, Address src);
1147 void kmov(KRegister dst, KRegister src);
1148 void kmov(Register dst, KRegister src);
1149 void kmov(KRegister dst, Register src);
1150
1151 using Assembler::movddup;
1152 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1153
1154 using Assembler::vmovddup;
1155 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1156
1157 // AVX Unaligned forms
1158 void vmovdqu(Address dst, XMMRegister src);
1159 void vmovdqu(XMMRegister dst, Address src);
1160 void vmovdqu(XMMRegister dst, XMMRegister src);
1161 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1162 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1163
1164 // AVX512 Unaligned
1165 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1166 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1167
1168 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1169 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1170
1171 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1172 if (dst->encoding() != src->encoding() || mask != k0) {
1173 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1174 }
1175 }
1176 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1177 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1178 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1179
1180 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1181 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1182
1183 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1184 if (dst->encoding() != src->encoding() || mask != k0) {
1185 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1186 }
1187 }
1188 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1189 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1190 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1191
1192 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1193 if (dst->encoding() != src->encoding()) {
1194 Assembler::evmovdqul(dst, src, vector_len);
1195 }
1196 }
1197 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1198 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1199
1200 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1201 if (dst->encoding() != src->encoding() || mask != k0) {
1202 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1203 }
1204 }
1205 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1206 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1207 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1208
1209 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1210 if (dst->encoding() != src->encoding()) {
1211 Assembler::evmovdquq(dst, src, vector_len);
1212 }
1213 }
1214 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1215 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1216 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1217
1218 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1219 if (dst->encoding() != src->encoding() || mask != k0) {
1220 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1221 }
1222 }
1223 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1224 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1225 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1226
1227 // Move Aligned Double Quadword
1228 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1229 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1230 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1231
1232 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1233 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1234 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1235 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1236
1237 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1238 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1239 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1240
1241 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1242 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1243 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1244
1245 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1246 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1247 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1248
1249 // Carry-Less Multiplication Quadword
1250 void pclmulldq(XMMRegister dst, XMMRegister src) {
1251 // 0x00 - multiply lower 64 bits [0:63]
1252 Assembler::pclmulqdq(dst, src, 0x00);
1253 }
1254 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1255 // 0x11 - multiply upper 64 bits [64:127]
1256 Assembler::pclmulqdq(dst, src, 0x11);
1257 }
1258
1259 void pcmpeqb(XMMRegister dst, XMMRegister src);
1260 void pcmpeqw(XMMRegister dst, XMMRegister src);
1261
1262 void pcmpestri(XMMRegister dst, Address src, int imm8);
1263 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1264
1265 void pmovzxbw(XMMRegister dst, XMMRegister src);
1266 void pmovzxbw(XMMRegister dst, Address src);
1267
1268 void pmovmskb(Register dst, XMMRegister src);
1269
1270 void ptest(XMMRegister dst, XMMRegister src);
1271
1272 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1273 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1274 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1275
1276 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1277 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1278 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1279
1280 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1281 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1282 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1283
1284 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1285 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1286 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1287
1288 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1289 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1290 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1291
1292 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1293 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1294 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1295
1296 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1297 void xorpd(XMMRegister dst, XMMRegister src);
1298 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1299 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1300
1301 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1302 void xorps(XMMRegister dst, XMMRegister src);
1303 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1304 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1305
1306 // Shuffle Bytes
1307 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1308 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1309 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1310 // AVX 3-operands instructions
1311
1312 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1313 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1314 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1315
1316 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1317 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1318 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1319
1320 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1321 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1322
1323 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1324 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1325 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1326
1327 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1328 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1329
1330 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1331 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1332 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1333
1334 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1335 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1336 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1337
1338 using Assembler::vpbroadcastd;
1339 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1340
1341 using Assembler::vpbroadcastq;
1342 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1343
1344 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1345
1346 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1347 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1348
1349 // Vector compares
1350 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1351 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1352 }
1353 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1354
1355 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1356 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1357 }
1358 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1359
1360 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1361 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1362 }
1363 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1364
1365 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1366 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1367 }
1368 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1369
1370 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1371
1372 // Emit comparison instruction for the specified comparison predicate.
1373 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1374 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1375
1376 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1377 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1378
1379 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1380
1381 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1382 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1383
1384 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1385 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1386 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1387
1388 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1389 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1390
1391 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1392 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1393
1394 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1395 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1396
1397 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1398 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1399
1400 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1401 if (!is_varshift) {
1402 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1403 } else {
1404 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1405 }
1406 }
1407 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1408 if (!is_varshift) {
1409 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1410 } else {
1411 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1412 }
1413 }
1414 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1415 if (!is_varshift) {
1416 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1417 } else {
1418 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1419 }
1420 }
1421 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1422 if (!is_varshift) {
1423 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1424 } else {
1425 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1426 }
1427 }
1428 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1429 if (!is_varshift) {
1430 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1431 } else {
1432 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1433 }
1434 }
1435 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1436 if (!is_varshift) {
1437 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1438 } else {
1439 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1440 }
1441 }
1442 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1443 if (!is_varshift) {
1444 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1445 } else {
1446 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1447 }
1448 }
1449 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1450 if (!is_varshift) {
1451 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1452 } else {
1453 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1454 }
1455 }
1456 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1457 if (!is_varshift) {
1458 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1459 } else {
1460 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1461 }
1462 }
1463
1464 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1465 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1466 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1467 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1468
1469 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1470 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1471
1472 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1473 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1474
1475 void vptest(XMMRegister dst, XMMRegister src);
1476 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1477
1478 void punpcklbw(XMMRegister dst, XMMRegister src);
1479 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1480
1481 void pshufd(XMMRegister dst, Address src, int mode);
1482 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1483
1484 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1485 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1486
1487 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1488 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1489 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1490
1491 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1492 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1493 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1494
1495 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1496
1497 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1498 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1499 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1500
1501 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1502 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1503 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1504
1505 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1506 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1507 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1508
1509 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1510 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1511 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1512
1513 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1514 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1515 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1516
1517 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1518 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1519 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1520
1521 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1522 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1523
1524 // AVX Vector instructions
1525
1526 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1527 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1528 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1529
1530 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1531 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1532 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1533
1534 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1535 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1536 Assembler::vpxor(dst, nds, src, vector_len);
1537 else
1538 Assembler::vxorpd(dst, nds, src, vector_len);
1539 }
1540 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1541 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1542 Assembler::vpxor(dst, nds, src, vector_len);
1543 else
1544 Assembler::vxorpd(dst, nds, src, vector_len);
1545 }
1546 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1547
1548 // Simple version for AVX2 256bit vectors
1549 void vpxor(XMMRegister dst, XMMRegister src) {
1550 assert(UseAVX >= 2, "Should be at least AVX2");
1551 Assembler::vpxor(dst, dst, src, AVX_256bit);
1552 }
1553 void vpxor(XMMRegister dst, Address src) {
1554 assert(UseAVX >= 2, "Should be at least AVX2");
1555 Assembler::vpxor(dst, dst, src, AVX_256bit);
1556 }
1557
1558 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1559 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1560
1561 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1562 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1563 Assembler::vinserti32x4(dst, nds, src, imm8);
1564 } else if (UseAVX > 1) {
1565 // vinserti128 is available only in AVX2
1566 Assembler::vinserti128(dst, nds, src, imm8);
1567 } else {
1568 Assembler::vinsertf128(dst, nds, src, imm8);
1569 }
1570 }
1571
1572 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1573 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1574 Assembler::vinserti32x4(dst, nds, src, imm8);
1575 } else if (UseAVX > 1) {
1576 // vinserti128 is available only in AVX2
1577 Assembler::vinserti128(dst, nds, src, imm8);
1578 } else {
1579 Assembler::vinsertf128(dst, nds, src, imm8);
1580 }
1581 }
1582
1583 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1584 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1585 Assembler::vextracti32x4(dst, src, imm8);
1586 } else if (UseAVX > 1) {
1587 // vextracti128 is available only in AVX2
1588 Assembler::vextracti128(dst, src, imm8);
1589 } else {
1590 Assembler::vextractf128(dst, src, imm8);
1591 }
1592 }
1593
1594 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1595 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1596 Assembler::vextracti32x4(dst, src, imm8);
1597 } else if (UseAVX > 1) {
1598 // vextracti128 is available only in AVX2
1599 Assembler::vextracti128(dst, src, imm8);
1600 } else {
1601 Assembler::vextractf128(dst, src, imm8);
1602 }
1603 }
1604
1605 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1606 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1607 vinserti128(dst, dst, src, 1);
1608 }
1609 void vinserti128_high(XMMRegister dst, Address src) {
1610 vinserti128(dst, dst, src, 1);
1611 }
1612 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1613 vextracti128(dst, src, 1);
1614 }
1615 void vextracti128_high(Address dst, XMMRegister src) {
1616 vextracti128(dst, src, 1);
1617 }
1618
1619 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1620 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1621 Assembler::vinsertf32x4(dst, dst, src, 1);
1622 } else {
1623 Assembler::vinsertf128(dst, dst, src, 1);
1624 }
1625 }
1626
1627 void vinsertf128_high(XMMRegister dst, Address src) {
1628 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1629 Assembler::vinsertf32x4(dst, dst, src, 1);
1630 } else {
1631 Assembler::vinsertf128(dst, dst, src, 1);
1632 }
1633 }
1634
1635 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1636 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1637 Assembler::vextractf32x4(dst, src, 1);
1638 } else {
1639 Assembler::vextractf128(dst, src, 1);
1640 }
1641 }
1642
1643 void vextractf128_high(Address dst, XMMRegister src) {
1644 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1645 Assembler::vextractf32x4(dst, src, 1);
1646 } else {
1647 Assembler::vextractf128(dst, src, 1);
1648 }
1649 }
1650
1651 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1652 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1653 Assembler::vinserti64x4(dst, dst, src, 1);
1654 }
1655 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1656 Assembler::vinsertf64x4(dst, dst, src, 1);
1657 }
1658 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1659 Assembler::vextracti64x4(dst, src, 1);
1660 }
1661 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1662 Assembler::vextractf64x4(dst, src, 1);
1663 }
1664 void vextractf64x4_high(Address dst, XMMRegister src) {
1665 Assembler::vextractf64x4(dst, src, 1);
1666 }
1667 void vinsertf64x4_high(XMMRegister dst, Address src) {
1668 Assembler::vinsertf64x4(dst, dst, src, 1);
1669 }
1670
1671 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1672 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1673 vinserti128(dst, dst, src, 0);
1674 }
1675 void vinserti128_low(XMMRegister dst, Address src) {
1676 vinserti128(dst, dst, src, 0);
1677 }
1678 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1679 vextracti128(dst, src, 0);
1680 }
1681 void vextracti128_low(Address dst, XMMRegister src) {
1682 vextracti128(dst, src, 0);
1683 }
1684
1685 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1686 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1687 Assembler::vinsertf32x4(dst, dst, src, 0);
1688 } else {
1689 Assembler::vinsertf128(dst, dst, src, 0);
1690 }
1691 }
1692
1693 void vinsertf128_low(XMMRegister dst, Address src) {
1694 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1695 Assembler::vinsertf32x4(dst, dst, src, 0);
1696 } else {
1697 Assembler::vinsertf128(dst, dst, src, 0);
1698 }
1699 }
1700
1701 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1702 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1703 Assembler::vextractf32x4(dst, src, 0);
1704 } else {
1705 Assembler::vextractf128(dst, src, 0);
1706 }
1707 }
1708
1709 void vextractf128_low(Address dst, XMMRegister src) {
1710 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1711 Assembler::vextractf32x4(dst, src, 0);
1712 } else {
1713 Assembler::vextractf128(dst, src, 0);
1714 }
1715 }
1716
1717 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1718 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1719 Assembler::vinserti64x4(dst, dst, src, 0);
1720 }
1721 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1722 Assembler::vinsertf64x4(dst, dst, src, 0);
1723 }
1724 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1725 Assembler::vextracti64x4(dst, src, 0);
1726 }
1727 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1728 Assembler::vextractf64x4(dst, src, 0);
1729 }
1730 void vextractf64x4_low(Address dst, XMMRegister src) {
1731 Assembler::vextractf64x4(dst, src, 0);
1732 }
1733 void vinsertf64x4_low(XMMRegister dst, Address src) {
1734 Assembler::vinsertf64x4(dst, dst, src, 0);
1735 }
1736
1737 // Carry-Less Multiplication Quadword
1738 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1739 // 0x00 - multiply lower 64 bits [0:63]
1740 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1741 }
1742 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1743 // 0x11 - multiply upper 64 bits [64:127]
1744 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1745 }
1746 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1747 // 0x10 - multiply nds[0:63] and src[64:127]
1748 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1749 }
1750 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1751 //0x01 - multiply nds[64:127] and src[0:63]
1752 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1753 }
1754
1755 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1756 // 0x00 - multiply lower 64 bits [0:63]
1757 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1758 }
1759 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1760 // 0x11 - multiply upper 64 bits [64:127]
1761 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1762 }
1763
1764 // AVX-512 mask operations.
1765 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1766 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1767 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1768 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1769 void kortest(uint masklen, KRegister src1, KRegister src2);
1770 void ktest(uint masklen, KRegister src1, KRegister src2);
1771
1772 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1773 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1774
1775 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1776 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1777
1778 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1779 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1780
1781 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1782 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1783
1784 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1785 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1786 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1787 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1788
1789 using Assembler::evpandq;
1790 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1791
1792 using Assembler::evpaddq;
1793 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1794
1795 using Assembler::evporq;
1796 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1797
1798 using Assembler::vpternlogq;
1799 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1800
1801 void cmov32( Condition cc, Register dst, Address src);
1802 void cmov32( Condition cc, Register dst, Register src);
1803
1804 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1805
1806 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1807 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1808
1809 void movoop(Register dst, jobject obj);
1810 void movoop(Address dst, jobject obj, Register rscratch);
1811
1812 void mov_metadata(Register dst, Metadata* obj);
1813 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1814
1815 void movptr(Register dst, Register src);
1816 void movptr(Register dst, Address src);
1817 void movptr(Register dst, AddressLiteral src);
1818 void movptr(Register dst, ArrayAddress src);
1819 void movptr(Register dst, intptr_t src);
1820 void movptr(Address dst, Register src);
1821 void movptr(Address dst, int32_t imm);
1822 void movptr(Address dst, intptr_t src, Register rscratch);
1823 void movptr(ArrayAddress dst, Register src, Register rscratch);
1824
1825 void movptr(Register dst, RegisterOrConstant src) {
1826 if (src.is_constant()) movptr(dst, src.as_constant());
1827 else movptr(dst, src.as_register());
1828 }
1829
1830
1831 // to avoid hiding movl
1832 void mov32(Register dst, AddressLiteral src);
1833 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1834
1835 // Import other mov() methods from the parent class or else
1836 // they will be hidden by the following overriding declaration.
1837 using Assembler::movdl;
1838 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1839
1840 using Assembler::movq;
1841 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1842
1843 // Can push value or effective address
1844 void pushptr(AddressLiteral src, Register rscratch);
1845
1846 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1847 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1848
1849 void pushoop(jobject obj, Register rscratch);
1850 void pushklass(Metadata* obj, Register rscratch);
1851
1852 // sign extend as need a l to ptr sized element
1853 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1854 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1855
1856
1857 public:
1858 // clear memory of size 'cnt' qwords, starting at 'base';
1859 // if 'is_large' is set, do not try to produce short loop
1860 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1861
1862 // clear memory initialization sequence for constant size;
1863 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1864
1865 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1866 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1867
1868 // Fill primitive arrays
1869 void generate_fill(BasicType t, bool aligned,
1870 Register to, Register value, Register count,
1871 Register rtmp, XMMRegister xtmp);
1872
1873 void encode_iso_array(Register src, Register dst, Register len,
1874 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1875 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1876
1877 #ifdef _LP64
1878 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1879 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1880 Register y, Register y_idx, Register z,
1881 Register carry, Register product,
1882 Register idx, Register kdx);
1883 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1884 Register yz_idx, Register idx,
1885 Register carry, Register product, int offset);
1886 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1887 Register carry, Register carry2,
1888 Register idx, Register jdx,
1889 Register yz_idx1, Register yz_idx2,
1890 Register tmp, Register tmp3, Register tmp4);
1891 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1892 Register yz_idx, Register idx, Register jdx,
1893 Register carry, Register product,
1894 Register carry2);
1895 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1896 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1897 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1898 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1899 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1900 Register tmp2);
1901 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1902 Register rdxReg, Register raxReg);
1903 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1904 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1905 Register tmp3, Register tmp4);
1906 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1907 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1908
1909 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1910 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1911 Register raxReg);
1912 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1913 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1914 Register raxReg);
1915 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1916 Register result, Register tmp1, Register tmp2,
1917 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1918 #endif
1919
1920 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1921 void update_byte_crc32(Register crc, Register val, Register table);
1922 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1923
1924
1925 #ifdef _LP64
1926 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1927 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1928 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1929 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1930 #endif // _LP64
1931
1932 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1933 // Note on a naming convention:
1934 // Prefix w = register only used on a Westmere+ architecture
1935 // Prefix n = register only used on a Nehalem architecture
1936 #ifdef _LP64
1937 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1938 Register tmp1, Register tmp2, Register tmp3);
1939 #else
1940 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1941 Register tmp1, Register tmp2, Register tmp3,
1942 XMMRegister xtmp1, XMMRegister xtmp2);
1943 #endif
1944 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1945 Register in_out,
1946 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1947 XMMRegister w_xtmp2,
1948 Register tmp1,
1949 Register n_tmp2, Register n_tmp3);
1950 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1951 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1952 Register tmp1, Register tmp2,
1953 Register n_tmp3);
1954 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1955 Register in_out1, Register in_out2, Register in_out3,
1956 Register tmp1, Register tmp2, Register tmp3,
1957 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1958 Register tmp4, Register tmp5,
1959 Register n_tmp6);
1960 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1961 Register tmp1, Register tmp2, Register tmp3,
1962 Register tmp4, Register tmp5, Register tmp6,
1963 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1964 bool is_pclmulqdq_supported);
1965 // Fold 128-bit data chunk
1966 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1967 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1968 #ifdef _LP64
1969 // Fold 512-bit data chunk
1970 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1971 #endif // _LP64
1972 // Fold 8-bit data
1973 void fold_8bit_crc32(Register crc, Register table, Register tmp);
1974 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1975
1976 // Compress char[] array to byte[].
1977 void char_array_compress(Register src, Register dst, Register len,
1978 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1979 XMMRegister tmp4, Register tmp5, Register result,
1980 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1981
1982 // Inflate byte[] array to char[].
1983 void byte_array_inflate(Register src, Register dst, Register len,
1984 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
1985
1986 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
1987 Register length, Register temp, int vec_enc);
1988
1989 void fill64_masked(uint shift, Register dst, int disp,
1990 XMMRegister xmm, KRegister mask, Register length,
1991 Register temp, bool use64byteVector = false);
1992
1993 void fill32_masked(uint shift, Register dst, int disp,
1994 XMMRegister xmm, KRegister mask, Register length,
1995 Register temp);
1996
1997 void fill32(Address dst, XMMRegister xmm);
1998
1999 void fill32(Register dst, int disp, XMMRegister xmm);
2000
2001 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2002
2003 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2004
2005 #ifdef _LP64
2006 void convert_f2i(Register dst, XMMRegister src);
2007 void convert_d2i(Register dst, XMMRegister src);
2008 void convert_f2l(Register dst, XMMRegister src);
2009 void convert_d2l(Register dst, XMMRegister src);
2010 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2011 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2012
2013 void cache_wb(Address line);
2014 void cache_wbsync(bool is_pre);
2015
2016 #ifdef COMPILER2_OR_JVMCI
2017 void generate_fill_avx3(BasicType type, Register to, Register value,
2018 Register count, Register rtmp, XMMRegister xtmp);
2019 #endif // COMPILER2_OR_JVMCI
2020 #endif // _LP64
2021
2022 void vallones(XMMRegister dst, int vector_len);
2023
2024 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2025
2026 void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
2027 void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
2028 };
2029
2030 /**
2031 * class SkipIfEqual:
2032 *
2033 * Instantiating this class will result in assembly code being output that will
2034 * jump around any code emitted between the creation of the instance and it's
2035 * automatic destruction at the end of a scope block, depending on the value of
2036 * the flag passed to the constructor, which will be checked at run-time.
2037 */
2038 class SkipIfEqual {
2039 private:
2040 MacroAssembler* _masm;
2041 Label _label;
2042
2043 public:
2044 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch);
2045 ~SkipIfEqual();
2046 };
2047
2048 #endif // CPU_X86_MACROASSEMBLER_X86_HPP