1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/vm_version.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
56
57 protected:
58 // This is the base routine called by the different versions of call_VM. The interpreter
59 // may customize this version by overriding it for its purposes (e.g., to save/restore
60 // additional registers when doing a VM call).
61 //
62 // call_VM_base returns the register which contains the thread upon return.
63 // If no last_java_sp is specified (noreg) than rsp will be used instead.
64 virtual void call_VM_base( // returns the register containing the thread upon return
65 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
66 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
67 address entry_point, // the entry point
68 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
69 bool check_exceptions // whether to check for pending exceptions after return
70 );
71
72 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
73
74 public:
75 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
76
77 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
78 // The implementation is only non-empty for the InterpreterMacroAssembler,
79 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
80 virtual void check_and_handle_popframe();
81 virtual void check_and_handle_earlyret();
82
83 Address as_Address(AddressLiteral adr);
84 Address as_Address(ArrayAddress adr, Register rscratch);
85
86 // Support for null-checks
87 //
88 // Generates code that causes a null OS exception if the content of reg is null.
89 // If the accessed location is M[reg + offset] and the offset is known, provide the
90 // offset. No explicit code generation is needed if the offset is within a certain
91 // range (0 <= offset <= page_size).
92
93 void null_check(Register reg, int offset = -1);
94 static bool needs_explicit_null_check(intptr_t offset);
95 static bool uses_implicit_null_check(void* address);
96
97 // Required platform-specific helpers for Label::patch_instructions.
98 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
99 void pd_patch_instruction(address branch, address target, const char* file, int line) {
100 unsigned char op = branch[0];
101 assert(op == 0xE8 /* call */ ||
102 op == 0xE9 /* jmp */ ||
103 op == 0xEB /* short jmp */ ||
104 (op & 0xF0) == 0x70 /* short jcc */ ||
105 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
106 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
107 (op == 0x8D) /* lea */,
108 "Invalid opcode at patch point");
109
110 if (op == 0xEB || (op & 0xF0) == 0x70) {
111 // short offset operators (jmp and jcc)
112 char* disp = (char*) &branch[1];
113 int imm8 = checked_cast<int>(target - (address) &disp[1]);
114 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
115 file == nullptr ? "<null>" : file, line);
116 *disp = (char)imm8;
117 } else {
118 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
119 int imm32 = checked_cast<int>(target - (address) &disp[1]);
120 *disp = imm32;
121 }
122 }
123
124 // The following 4 methods return the offset of the appropriate move instruction
125
126 // Support for fast byte/short loading with zero extension (depending on particular CPU)
127 int load_unsigned_byte(Register dst, Address src);
128 int load_unsigned_short(Register dst, Address src);
129
130 // Support for fast byte/short loading with sign extension (depending on particular CPU)
131 int load_signed_byte(Register dst, Address src);
132 int load_signed_short(Register dst, Address src);
133
134 // Support for sign-extension (hi:lo = extend_sign(lo))
135 void extend_sign(Register hi, Register lo);
136
137 // Load and store values by size and signed-ness
138 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
139 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
140
141 // Support for inc/dec with optimal instruction selection depending on value
142
143 void increment(Register reg, int value = 1) { incrementq(reg, value); }
144 void decrement(Register reg, int value = 1) { decrementq(reg, value); }
145 void increment(Address dst, int value = 1) { incrementq(dst, value); }
146 void decrement(Address dst, int value = 1) { decrementq(dst, value); }
147
148 void decrementl(Address dst, int value = 1);
149 void decrementl(Register reg, int value = 1);
150
151 void decrementq(Register reg, int value = 1);
152 void decrementq(Address dst, int value = 1);
153
154 void incrementl(Address dst, int value = 1);
155 void incrementl(Register reg, int value = 1);
156
157 void incrementq(Register reg, int value = 1);
158 void incrementq(Address dst, int value = 1);
159
160 void incrementl(AddressLiteral dst, Register rscratch = noreg);
161 void incrementl(ArrayAddress dst, Register rscratch);
162
163 void incrementq(AddressLiteral dst, Register rscratch = noreg);
164
165 // Support optimal SSE move instructions.
166 void movflt(XMMRegister dst, XMMRegister src) {
167 if (dst-> encoding() == src->encoding()) return;
168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
169 else { movss (dst, src); return; }
170 }
171 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
172 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
173 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
174
175 // Move with zero extension
176 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
177
178 void movdbl(XMMRegister dst, XMMRegister src) {
179 if (dst-> encoding() == src->encoding()) return;
180 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
181 else { movsd (dst, src); return; }
182 }
183
184 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
185
186 void movdbl(XMMRegister dst, Address src) {
187 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
188 else { movlpd(dst, src); return; }
189 }
190 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
191
192 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
193 // Use separate tmp XMM register because caller may
194 // requires src XMM register to be unchanged (as in x86.ad).
195 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
196 movdl(dst, tmp);
197 movswl(dst, dst);
198 }
199
200 void flt16_to_flt(XMMRegister dst, Register src) {
201 movdl(dst, src);
202 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
203 }
204
205 // Alignment
206 void align32();
207 void align64();
208 void align(uint modulus);
209 void align(uint modulus, uint target);
210
211 void post_call_nop();
212
213 // Stack frame creation/removal
214 void enter();
215 void leave();
216
217 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
218 // The pointer will be loaded into the thread register. This is a slow version that does native call.
219 // Normally, JavaThread pointer is available in r15_thread, use that where possible.
220 void get_thread_slow(Register thread);
221
222 // Support for argument shuffling
223
224 // bias in bytes
225 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
226 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
227 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
228 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
229 void move_ptr(VMRegPair src, VMRegPair dst);
230 void object_move(OopMap* map,
231 int oop_handle_offset,
232 int framesize_in_slots,
233 VMRegPair src,
234 VMRegPair dst,
235 bool is_receiver,
236 int* receiver_offset);
237
238 // Support for VM calls
239 //
240 // It is imperative that all calls into the VM are handled via the call_VM macros.
241 // They make sure that the stack linkage is setup correctly. call_VM's correspond
242 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
243
244
245 void call_VM(Register oop_result,
246 address entry_point,
247 bool check_exceptions = true);
248 void call_VM(Register oop_result,
249 address entry_point,
250 Register arg_1,
251 bool check_exceptions = true);
252 void call_VM(Register oop_result,
253 address entry_point,
254 Register arg_1, Register arg_2,
255 bool check_exceptions = true);
256 void call_VM(Register oop_result,
257 address entry_point,
258 Register arg_1, Register arg_2, Register arg_3,
259 bool check_exceptions = true);
260
261 // Overloadings with last_Java_sp
262 void call_VM(Register oop_result,
263 Register last_java_sp,
264 address entry_point,
265 int number_of_arguments = 0,
266 bool check_exceptions = true);
267 void call_VM(Register oop_result,
268 Register last_java_sp,
269 address entry_point,
270 Register arg_1, bool
271 check_exceptions = true);
272 void call_VM(Register oop_result,
273 Register last_java_sp,
274 address entry_point,
275 Register arg_1, Register arg_2,
276 bool check_exceptions = true);
277 void call_VM(Register oop_result,
278 Register last_java_sp,
279 address entry_point,
280 Register arg_1, Register arg_2, Register arg_3,
281 bool check_exceptions = true);
282
283 void get_vm_result_oop(Register oop_result);
284 void get_vm_result_metadata(Register metadata_result);
285
286 // These always tightly bind to MacroAssembler::call_VM_base
287 // bypassing the virtual implementation
288 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
289 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
290 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
291 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
292 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
293
294 void call_VM_leaf0(address entry_point);
295 void call_VM_leaf(address entry_point,
296 int number_of_arguments = 0);
297 void call_VM_leaf(address entry_point,
298 Register arg_1);
299 void call_VM_leaf(address entry_point,
300 Register arg_1, Register arg_2);
301 void call_VM_leaf(address entry_point,
302 Register arg_1, Register arg_2, Register arg_3);
303
304 void call_VM_leaf(address entry_point,
305 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
306
307 // These always tightly bind to MacroAssembler::call_VM_leaf_base
308 // bypassing the virtual implementation
309 void super_call_VM_leaf(address entry_point);
310 void super_call_VM_leaf(address entry_point, Register arg_1);
311 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
312 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
313 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
314
315 void set_last_Java_frame(Register last_java_sp,
316 Register last_java_fp,
317 address last_java_pc,
318 Register rscratch);
319
320 void set_last_Java_frame(Register last_java_sp,
321 Register last_java_fp,
322 Label &last_java_pc,
323 Register scratch);
324
325 void reset_last_Java_frame(bool clear_fp);
326
327 // jobjects
328 void clear_jobject_tag(Register possibly_non_local);
329 void resolve_jobject(Register value, Register tmp);
330 void resolve_global_jobject(Register value, Register tmp);
331
332 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
333 void c2bool(Register x);
334
335 // C++ bool manipulation
336
337 void movbool(Register dst, Address src);
338 void movbool(Address dst, bool boolconst);
339 void movbool(Address dst, Register src);
340 void testbool(Register dst);
341
342 void resolve_oop_handle(Register result, Register tmp);
343 void resolve_weak_handle(Register result, Register tmp);
344 void load_mirror(Register mirror, Register method, Register tmp);
345 void load_method_holder_cld(Register rresult, Register rmethod);
346
347 void load_method_holder(Register holder, Register method);
348
349 // oop manipulations
350 void load_narrow_klass_compact(Register dst, Register src);
351 void load_klass(Register dst, Register src, Register tmp);
352 void store_klass(Register dst, Register src, Register tmp);
353
354 // Compares the Klass pointer of an object to a given Klass (which might be narrow,
355 // depending on UseCompressedClassPointers).
356 void cmp_klass(Register klass, Register obj, Register tmp);
357
358 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
359 // Uses tmp1 and tmp2 as temporary registers.
360 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
361
362 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
363 Register tmp1);
364 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
365 Register tmp1, Register tmp2, Register tmp3);
366
367 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
368 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
369 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
370 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
371
372 // Used for storing null. All other oop constants should be
373 // stored using routines that take a jobject.
374 void store_heap_oop_null(Address dst);
375
376 void store_klass_gap(Register dst, Register src);
377
378 // This dummy is to prevent a call to store_heap_oop from
379 // converting a zero (like null) into a Register by giving
380 // the compiler two choices it can't resolve
381
382 void store_heap_oop(Address dst, void* dummy);
383
384 void encode_heap_oop(Register r);
385 void decode_heap_oop(Register r);
386 void encode_heap_oop_not_null(Register r);
387 void decode_heap_oop_not_null(Register r);
388 void encode_heap_oop_not_null(Register dst, Register src);
389 void decode_heap_oop_not_null(Register dst, Register src);
390
391 void set_narrow_oop(Register dst, jobject obj);
392 void set_narrow_oop(Address dst, jobject obj);
393 void cmp_narrow_oop(Register dst, jobject obj);
394 void cmp_narrow_oop(Address dst, jobject obj);
395
396 void encode_klass_not_null(Register r, Register tmp);
397 void decode_klass_not_null(Register r, Register tmp);
398 void encode_and_move_klass_not_null(Register dst, Register src);
399 void decode_and_move_klass_not_null(Register dst, Register src);
400 void set_narrow_klass(Register dst, Klass* k);
401 void set_narrow_klass(Address dst, Klass* k);
402 void cmp_narrow_klass(Register dst, Klass* k);
403 void cmp_narrow_klass(Address dst, Klass* k);
404
405 // if heap base register is used - reinit it with the correct value
406 void reinit_heapbase();
407
408 DEBUG_ONLY(void verify_heapbase(const char* msg);)
409
410 // Int division/remainder for Java
411 // (as idivl, but checks for special case as described in JVM spec.)
412 // returns idivl instruction offset for implicit exception handling
413 int corrected_idivl(Register reg);
414
415 // Long division/remainder for Java
416 // (as idivq, but checks for special case as described in JVM spec.)
417 // returns idivq instruction offset for implicit exception handling
418 int corrected_idivq(Register reg);
419
420 void int3();
421
422 // Long operation macros for a 32bit cpu
423 // Long negation for Java
424 void lneg(Register hi, Register lo);
425
426 // Long multiplication for Java
427 // (destroys contents of eax, ebx, ecx and edx)
428 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
429
430 // Long shifts for Java
431 // (semantics as described in JVM spec.)
432 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
433 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
434
435 // Long compare for Java
436 // (semantics as described in JVM spec.)
437 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
438
439
440 // misc
441
442 // Sign extension
443 void sign_extend_short(Register reg);
444 void sign_extend_byte(Register reg);
445
446 // Division by power of 2, rounding towards 0
447 void division_with_shift(Register reg, int shift_value);
448
449 // dst = c = a * b + c
450 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
451 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
452
453 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
454 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
455 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
456 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
457
458
459 // same as fcmp2int, but using SSE2
460 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
461 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
462
463 void push_IU_state();
464 void pop_IU_state();
465
466 void push_FPU_state();
467 void pop_FPU_state();
468
469 void push_CPU_state();
470 void pop_CPU_state();
471
472 void push_cont_fastpath();
473 void pop_cont_fastpath();
474
475 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
476
477 // Round up to a power of two
478 void round_to(Register reg, int modulus);
479
480 private:
481 // General purpose and XMM registers potentially clobbered by native code; there
482 // is no need for FPU or AVX opmask related methods because C1/interpreter
483 // - we save/restore FPU state as a whole always
484 // - do not care about AVX-512 opmask
485 static RegSet call_clobbered_gp_registers();
486 static XMMRegSet call_clobbered_xmm_registers();
487
488 void push_set(XMMRegSet set, int offset);
489 void pop_set(XMMRegSet set, int offset);
490
491 public:
492 void push_set(RegSet set, int offset = -1);
493 void pop_set(RegSet set, int offset = -1);
494
495 // Push and pop everything that might be clobbered by a native
496 // runtime call.
497 // Only save the lower 64 bits of each vector register.
498 // Additional registers can be excluded in a passed RegSet.
499 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
500 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
501
502 void push_call_clobbered_registers(bool save_fpu = true) {
503 push_call_clobbered_registers_except(RegSet(), save_fpu);
504 }
505 void pop_call_clobbered_registers(bool restore_fpu = true) {
506 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
507 }
508
509 // allocation
510 void tlab_allocate(
511 Register obj, // result: pointer to object after successful allocation
512 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
513 int con_size_in_bytes, // object size in bytes if known at compile time
514 Register t1, // temp register
515 Register t2, // temp register
516 Label& slow_case // continuation point if fast allocation fails
517 );
518 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
519
520 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
521
522 // interface method calling
523 void lookup_interface_method(Register recv_klass,
524 Register intf_klass,
525 RegisterOrConstant itable_index,
526 Register method_result,
527 Register scan_temp,
528 Label& no_such_interface,
529 bool return_method = true);
530
531 void lookup_interface_method_stub(Register recv_klass,
532 Register holder_klass,
533 Register resolved_klass,
534 Register method_result,
535 Register scan_temp,
536 Register temp_reg2,
537 Register receiver,
538 int itable_index,
539 Label& L_no_such_interface);
540
541 // virtual method calling
542 void lookup_virtual_method(Register recv_klass,
543 RegisterOrConstant vtable_index,
544 Register method_result);
545
546 // Test sub_klass against super_klass, with fast and slow paths.
547
548 // The fast path produces a tri-state answer: yes / no / maybe-slow.
549 // One of the three labels can be null, meaning take the fall-through.
550 // If super_check_offset is -1, the value is loaded up from super_klass.
551 // No registers are killed, except temp_reg.
552 void check_klass_subtype_fast_path(Register sub_klass,
553 Register super_klass,
554 Register temp_reg,
555 Label* L_success,
556 Label* L_failure,
557 Label* L_slow_path,
558 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
559
560 // The rest of the type check; must be wired to a corresponding fast path.
561 // It does not repeat the fast path logic, so don't use it standalone.
562 // The temp_reg and temp2_reg can be noreg, if no temps are available.
563 // Updates the sub's secondary super cache as necessary.
564 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
565 void check_klass_subtype_slow_path(Register sub_klass,
566 Register super_klass,
567 Register temp_reg,
568 Register temp2_reg,
569 Label* L_success,
570 Label* L_failure,
571 bool set_cond_codes = false);
572
573 // The 64-bit version, which may do a hashed subclass lookup.
574 void check_klass_subtype_slow_path(Register sub_klass,
575 Register super_klass,
576 Register temp_reg,
577 Register temp2_reg,
578 Register temp3_reg,
579 Register temp4_reg,
580 Label* L_success,
581 Label* L_failure);
582
583 // Three parts of a hashed subclass lookup: a simple linear search,
584 // a table lookup, and a fallback that does linear probing in the
585 // event of a hash collision.
586 void check_klass_subtype_slow_path_linear(Register sub_klass,
587 Register super_klass,
588 Register temp_reg,
589 Register temp2_reg,
590 Label* L_success,
591 Label* L_failure,
592 bool set_cond_codes = false);
593 void check_klass_subtype_slow_path_table(Register sub_klass,
594 Register super_klass,
595 Register temp_reg,
596 Register temp2_reg,
597 Register temp3_reg,
598 Register result_reg,
599 Label* L_success,
600 Label* L_failure);
601 void hashed_check_klass_subtype_slow_path(Register sub_klass,
602 Register super_klass,
603 Register temp_reg,
604 Label* L_success,
605 Label* L_failure);
606
607 // As above, but with a constant super_klass.
608 // The result is in Register result, not the condition codes.
609 void lookup_secondary_supers_table_const(Register sub_klass,
610 Register super_klass,
611 Register temp1,
612 Register temp2,
613 Register temp3,
614 Register temp4,
615 Register result,
616 u1 super_klass_slot);
617
618 using Assembler::salq;
619 void salq(Register dest, Register count);
620 using Assembler::rorq;
621 void rorq(Register dest, Register count);
622 void lookup_secondary_supers_table_var(Register sub_klass,
623 Register super_klass,
624 Register temp1,
625 Register temp2,
626 Register temp3,
627 Register temp4,
628 Register result);
629
630 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
631 Register r_array_base,
632 Register r_array_index,
633 Register r_bitmap,
634 Register temp1,
635 Register temp2,
636 Label* L_success,
637 Label* L_failure = nullptr);
638
639 void verify_secondary_supers_table(Register r_sub_klass,
640 Register r_super_klass,
641 Register expected,
642 Register temp1,
643 Register temp2,
644 Register temp3);
645
646 void repne_scanq(Register addr, Register value, Register count, Register limit,
647 Label* L_success,
648 Label* L_failure = nullptr);
649
650 // If r is valid, return r.
651 // If r is invalid, remove a register r2 from available_regs, add r2
652 // to regs_to_push, then return r2.
653 Register allocate_if_noreg(const Register r,
654 RegSetIterator<Register> &available_regs,
655 RegSet ®s_to_push);
656
657 // Simplified, combined version, good for typical uses.
658 // Falls through on failure.
659 void check_klass_subtype(Register sub_klass,
660 Register super_klass,
661 Register temp_reg,
662 Label& L_success);
663
664 void clinit_barrier(Register klass,
665 Label* L_fast_path = nullptr,
666 Label* L_slow_path = nullptr);
667
668 // method handles (JSR 292)
669 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
670
671 // Debugging
672
673 // only if +VerifyOops
674 void _verify_oop(Register reg, const char* s, const char* file, int line);
675 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
676
677 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
678 if (VerifyOops) {
679 _verify_oop(reg, s, file, line);
680 }
681 }
682 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
683 if (VerifyOops) {
684 _verify_oop_addr(reg, s, file, line);
685 }
686 }
687
688 // TODO: verify method and klass metadata (compare against vptr?)
689 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
690 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
691
692 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
693 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
694 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
695 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
696 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
697
698 // Verify or restore cpu control state after JNI call
699 void restore_cpu_control_state_after_jni(Register rscratch);
700
701 // prints msg, dumps registers and stops execution
702 void stop(const char* msg);
703
704 // prints msg and continues
705 void warn(const char* msg);
706
707 // dumps registers and other state
708 void print_state();
709
710 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
711 static void debug64(char* msg, int64_t pc, int64_t regs[]);
712 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
713 static void print_state64(int64_t pc, int64_t regs[]);
714
715 void os_breakpoint();
716
717 void untested() { stop("untested"); }
718
719 void unimplemented(const char* what = "");
720
721 void should_not_reach_here() { stop("should not reach here"); }
722
723 void print_CPU_state();
724
725 // Stack overflow checking
726 void bang_stack_with_offset(int offset) {
727 // stack grows down, caller passes positive offset
728 assert(offset > 0, "must bang with negative offset");
729 movl(Address(rsp, (-offset)), rax);
730 }
731
732 // Writes to stack successive pages until offset reached to check for
733 // stack overflow + shadow pages. Also, clobbers tmp
734 void bang_stack_size(Register size, Register tmp);
735
736 // Check for reserved stack access in method being exited (for JIT)
737 void reserved_stack_check();
738
739 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
740
741 void verify_tlab();
742
743 static Condition negate_condition(Condition cond);
744
745 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
746 // operands. In general the names are modified to avoid hiding the instruction in Assembler
747 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
748 // here in MacroAssembler. The major exception to this rule is call
749
750 // Arithmetics
751
752
753 void addptr(Address dst, int32_t src) { addq(dst, src); }
754 void addptr(Address dst, Register src);
755
756 void addptr(Register dst, Address src) { addq(dst, src); }
757 void addptr(Register dst, int32_t src);
758 void addptr(Register dst, Register src);
759 void addptr(Register dst, RegisterOrConstant src) {
760 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
761 else addptr(dst, src.as_register());
762 }
763
764 void andptr(Register dst, int32_t src);
765 void andptr(Register src1, Register src2) { andq(src1, src2); }
766
767 using Assembler::andq;
768 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
769
770 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
771
772 // renamed to drag out the casting of address to int32_t/intptr_t
773 void cmp32(Register src1, int32_t imm);
774
775 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
776 // compare reg - mem, or reg - &mem
777 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
778
779 void cmp32(Register src1, Address src2);
780
781 void cmpoop(Register src1, Register src2);
782 void cmpoop(Register src1, Address src2);
783 void cmpoop(Register dst, jobject obj, Register rscratch);
784
785 // NOTE src2 must be the lval. This is NOT an mem-mem compare
786 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
787
788 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
789
790 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
791 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
792
793 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
794 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
795
796 // cmp64 to avoild hiding cmpq
797 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
798
799 void cmpxchgptr(Register reg, Address adr);
800
801 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
802
803 void imulptr(Register dst, Register src) { imulq(dst, src); }
804 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
805
806
807 void negptr(Register dst) { negq(dst); }
808
809 void notptr(Register dst) { notq(dst); }
810
811 void shlptr(Register dst, int32_t shift);
812 void shlptr(Register dst) { shlq(dst); }
813
814 void shrptr(Register dst, int32_t shift);
815 void shrptr(Register dst) { shrq(dst); }
816
817 void sarptr(Register dst) { sarq(dst); }
818 void sarptr(Register dst, int32_t src) { sarq(dst, src); }
819
820 void subptr(Address dst, int32_t src) { subq(dst, src); }
821
822 void subptr(Register dst, Address src) { subq(dst, src); }
823 void subptr(Register dst, int32_t src);
824 // Force generation of a 4 byte immediate value even if it fits into 8bit
825 void subptr_imm32(Register dst, int32_t src);
826 void subptr(Register dst, Register src);
827 void subptr(Register dst, RegisterOrConstant src) {
828 if (src.is_constant()) subptr(dst, (int) src.as_constant());
829 else subptr(dst, src.as_register());
830 }
831
832 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
833 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
834
835 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
836 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
837
838 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
839
840
841
842 // Helper functions for statistics gathering.
843 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
844 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
845 // Unconditional atomic increment.
846 void atomic_incl(Address counter_addr);
847 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
848 void atomic_incq(Address counter_addr);
849 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
850 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
851 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
852
853 using Assembler::lea;
854 void lea(Register dst, AddressLiteral adr);
855 void lea(Address dst, AddressLiteral adr, Register rscratch);
856
857 void leal32(Register dst, Address src) { leal(dst, src); }
858
859 // Import other testl() methods from the parent class or else
860 // they will be hidden by the following overriding declaration.
861 using Assembler::testl;
862 void testl(Address dst, int32_t imm32);
863 void testl(Register dst, int32_t imm32);
864 void testl(Register dst, AddressLiteral src); // requires reachable address
865 using Assembler::testq;
866 void testq(Address dst, int32_t imm32);
867 void testq(Register dst, int32_t imm32);
868
869 void orptr(Register dst, Address src) { orq(dst, src); }
870 void orptr(Register dst, Register src) { orq(dst, src); }
871 void orptr(Register dst, int32_t src) { orq(dst, src); }
872 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
873
874 void testptr(Register src, int32_t imm32) { testq(src, imm32); }
875 void testptr(Register src1, Address src2) { testq(src1, src2); }
876 void testptr(Address src, int32_t imm32) { testq(src, imm32); }
877 void testptr(Register src1, Register src2);
878
879 void xorptr(Register dst, Register src) { xorq(dst, src); }
880 void xorptr(Register dst, Address src) { xorq(dst, src); }
881
882 // Calls
883
884 void call(Label& L, relocInfo::relocType rtype);
885 void call(Register entry);
886 void call(Address addr) { Assembler::call(addr); }
887
888 // NOTE: this call transfers to the effective address of entry NOT
889 // the address contained by entry. This is because this is more natural
890 // for jumps/calls.
891 void call(AddressLiteral entry, Register rscratch = rax);
892
893 // Emit the CompiledIC call idiom
894 void ic_call(address entry, jint method_index = 0);
895 static int ic_check_size();
896 int ic_check(int end_alignment);
897
898 void emit_static_call_stub();
899
900 // Jumps
901
902 // NOTE: these jumps transfer to the effective address of dst NOT
903 // the address contained by dst. This is because this is more natural
904 // for jumps/calls.
905 void jump(AddressLiteral dst, Register rscratch = noreg);
906
907 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
908
909 // 32bit can do a case table jump in one instruction but we no longer allow the base
910 // to be installed in the Address class. This jump will transfer to the address
911 // contained in the location described by entry (not the address of entry)
912 void jump(ArrayAddress entry, Register rscratch);
913
914 // Adding more natural conditional jump instructions
915 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
916 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
917 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
918 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
919 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
920 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
921 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
922 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
923 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
924 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
925 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
926 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
927 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
928 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
929 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
930 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
931 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
932 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
933 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
934 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
935 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
936 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
937 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
938 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
939 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
940 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
941 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
942 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
943 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
944 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
945 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
946 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
947
948 // Short versions of the above
949 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
950 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
951 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
952 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
953 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
954 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
955 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
956 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
957 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
958 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
959 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
960 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
961 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
962 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
963 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
964 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
965 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
966 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
967 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
968 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
969 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
970 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
971 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
972 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
973 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
974 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
975 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
976 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
977 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
978 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
979 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
980 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
981
982 // Floating
983
984 void push_f(XMMRegister r);
985 void pop_f(XMMRegister r);
986 void push_d(XMMRegister r);
987 void pop_d(XMMRegister r);
988
989 void push_ppx(Register src);
990 void pop_ppx(Register dst);
991
992 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
993 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
994 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
995
996 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
997
998 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
999 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
1000 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1001
1002 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
1003 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
1004 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1005
1006 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
1007 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
1008 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1009
1010 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1011
1012 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1013 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1014 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1015
1016 private:
1017 void sha256_AVX2_one_round_compute(
1018 Register reg_old_h,
1019 Register reg_a,
1020 Register reg_b,
1021 Register reg_c,
1022 Register reg_d,
1023 Register reg_e,
1024 Register reg_f,
1025 Register reg_g,
1026 Register reg_h,
1027 int iter);
1028 void sha256_AVX2_four_rounds_compute_first(int start);
1029 void sha256_AVX2_four_rounds_compute_last(int start);
1030 void sha256_AVX2_one_round_and_sched(
1031 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1032 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
1033 XMMRegister xmm_2, /* ymm6 */
1034 XMMRegister xmm_3, /* ymm7 */
1035 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1036 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
1037 Register reg_c, /* edi */
1038 Register reg_d, /* esi */
1039 Register reg_e, /* r8d */
1040 Register reg_f, /* r9d */
1041 Register reg_g, /* r10d */
1042 Register reg_h, /* r11d */
1043 int iter);
1044
1045 void addm(int disp, Register r1, Register r2);
1046
1047 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1048 Register e, Register f, Register g, Register h, int iteration);
1049
1050 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1051 Register a, Register b, Register c, Register d, Register e, Register f,
1052 Register g, Register h, int iteration);
1053
1054 void addmq(int disp, Register r1, Register r2);
1055 public:
1056 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1057 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1058 Register buf, Register state, Register ofs, Register limit, Register rsp,
1059 bool multi_block, XMMRegister shuf_mask);
1060 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1061 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1062 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1063 XMMRegister shuf_mask);
1064 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1065
1066 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1067 bool multi_block);
1068
1069 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1070 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1071 Register buf, Register state, Register ofs, Register limit, Register rsp,
1072 bool multi_block);
1073
1074 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1075 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1076 Register buf, Register state, Register ofs, Register limit, Register rsp,
1077 bool multi_block, XMMRegister shuf_mask);
1078
1079 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1080 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1081 Register rax, Register rcx, Register rdx, Register tmp);
1082
1083 private:
1084
1085 // these are private because users should be doing movflt/movdbl
1086
1087 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1088 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1089 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1090 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1091
1092 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1093 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1094
1095 public:
1096
1097 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1098 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1099 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1100
1101 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1102 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1103 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1104
1105 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1106 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1107 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1108
1109 using Assembler::vbroadcasti128;
1110 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1111
1112 using Assembler::vbroadcastsd;
1113 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1114
1115 using Assembler::vbroadcastss;
1116 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1117
1118 // Vector float blend
1119 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1120 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1121
1122 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1123 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1124 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1125
1126 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1127 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1128 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1129
1130 // Move Unaligned Double Quadword
1131 void movdqu(Address dst, XMMRegister src);
1132 void movdqu(XMMRegister dst, XMMRegister src);
1133 void movdqu(XMMRegister dst, Address src);
1134 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1135
1136 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1137 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1138 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1139 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1140 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1141 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1142
1143 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1144 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1145 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1146 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1147 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1148 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1149
1150 // Safe move operation, lowers down to 16bit moves for targets supporting
1151 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1152 void kmov(Address dst, KRegister src);
1153 void kmov(KRegister dst, Address src);
1154 void kmov(KRegister dst, KRegister src);
1155 void kmov(Register dst, KRegister src);
1156 void kmov(KRegister dst, Register src);
1157
1158 using Assembler::movddup;
1159 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1160
1161 using Assembler::vmovddup;
1162 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1163
1164 // AVX Unaligned forms
1165 void vmovdqu(Address dst, XMMRegister src);
1166 void vmovdqu(XMMRegister dst, Address src);
1167 void vmovdqu(XMMRegister dst, XMMRegister src);
1168 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1169 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1170 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len);
1171 void vmovdqu(XMMRegister dst, Address src, int vector_len);
1172 void vmovdqu(Address dst, XMMRegister src, int vector_len);
1173
1174 // AVX Aligned forms
1175 using Assembler::vmovdqa;
1176 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1177 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1178
1179 // AVX512 Unaligned
1180 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1181 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1182 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1183
1184 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1185 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1186
1187 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1188 if (dst->encoding() != src->encoding() || mask != k0) {
1189 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1190 }
1191 }
1192 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1193 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1194 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1195
1196 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1197 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1198 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1199
1200 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1201 if (dst->encoding() != src->encoding() || mask != k0) {
1202 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1203 }
1204 }
1205 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1206 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1207 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1208
1209 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1210 if (dst->encoding() != src->encoding()) {
1211 Assembler::evmovdqul(dst, src, vector_len);
1212 }
1213 }
1214 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1215 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1216
1217 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1218 if (dst->encoding() != src->encoding() || mask != k0) {
1219 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1220 }
1221 }
1222 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1223 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1224 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1225
1226 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1227 if (dst->encoding() != src->encoding()) {
1228 Assembler::evmovdquq(dst, src, vector_len);
1229 }
1230 }
1231 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1232 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1233 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1234 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1235
1236 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1237 if (dst->encoding() != src->encoding() || mask != k0) {
1238 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1239 }
1240 }
1241 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1242 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1243 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1244 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1245
1246 using Assembler::movapd;
1247 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1248
1249 // Move Aligned Double Quadword
1250 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1251 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1252 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1253
1254 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1255 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1256 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1257 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1258
1259 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1260 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1261 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1262
1263 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1264 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1265 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1266
1267 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1268 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1269 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1270
1271 // Carry-Less Multiplication Quadword
1272 void pclmulldq(XMMRegister dst, XMMRegister src) {
1273 // 0x00 - multiply lower 64 bits [0:63]
1274 Assembler::pclmulqdq(dst, src, 0x00);
1275 }
1276 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1277 // 0x11 - multiply upper 64 bits [64:127]
1278 Assembler::pclmulqdq(dst, src, 0x11);
1279 }
1280
1281 void pcmpeqb(XMMRegister dst, XMMRegister src);
1282 void pcmpeqw(XMMRegister dst, XMMRegister src);
1283
1284 void pcmpestri(XMMRegister dst, Address src, int imm8);
1285 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1286
1287 void pmovzxbw(XMMRegister dst, XMMRegister src);
1288 void pmovzxbw(XMMRegister dst, Address src);
1289
1290 void pmovmskb(Register dst, XMMRegister src);
1291
1292 void ptest(XMMRegister dst, XMMRegister src);
1293
1294 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1295 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1296 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1297
1298 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1299 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1300 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1301
1302 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1303 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1304 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1305
1306 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1307 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1308 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1309
1310 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1311 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1312 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1313
1314 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1315 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1316 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1317
1318 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1319 void xorpd(XMMRegister dst, XMMRegister src);
1320 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1321 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1322
1323 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1324 void xorps(XMMRegister dst, XMMRegister src);
1325 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1326 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1327
1328 // Shuffle Bytes
1329 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1330 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1331 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1332 // AVX 3-operands instructions
1333
1334 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1335 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1336 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1337
1338 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1339 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1340 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1341
1342 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1343 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1344
1345 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1346 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1347 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1348
1349 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1350 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1351
1352 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1353 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1354 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1355
1356 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1357 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1358 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1359
1360 using Assembler::vpbroadcastd;
1361 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1362
1363 using Assembler::vpbroadcastq;
1364 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1365
1366 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1367 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1368
1369 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1370 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1371 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1372
1373 // Vector compares
1374 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1375 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1376 }
1377 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1378
1379 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1380 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1381 }
1382 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1383
1384 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1385 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1386 }
1387 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1388
1389 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1390 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1391 }
1392 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1393
1394 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1395
1396 // Emit comparison instruction for the specified comparison predicate.
1397 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1398 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1399
1400 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1401 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1402
1403 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1404
1405 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1406 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1407
1408 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1409 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1410 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1411
1412 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1413
1414 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1415 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1416
1417 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1418 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1419
1420 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1421 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1422
1423 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1424 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1425
1426 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1427 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1428
1429 using Assembler::evpsllw;
1430 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1431 if (!is_varshift) {
1432 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1433 } else {
1434 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1435 }
1436 }
1437 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1438 if (!is_varshift) {
1439 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1440 } else {
1441 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1442 }
1443 }
1444 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1445 if (!is_varshift) {
1446 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1447 } else {
1448 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1449 }
1450 }
1451 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1452 if (!is_varshift) {
1453 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1454 } else {
1455 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1456 }
1457 }
1458 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1459 if (!is_varshift) {
1460 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1461 } else {
1462 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1463 }
1464 }
1465
1466 using Assembler::evpsrlq;
1467 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1468 if (!is_varshift) {
1469 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1470 } else {
1471 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1472 }
1473 }
1474 using Assembler::evpsraw;
1475 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1476 if (!is_varshift) {
1477 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1478 } else {
1479 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1480 }
1481 }
1482 using Assembler::evpsrad;
1483 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1484 if (!is_varshift) {
1485 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1486 } else {
1487 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1488 }
1489 }
1490 using Assembler::evpsraq;
1491 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1492 if (!is_varshift) {
1493 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1494 } else {
1495 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1496 }
1497 }
1498
1499 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1500 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1501 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1502 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1503
1504 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1505 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1506 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1507 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1508
1509 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1510 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1511
1512 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1513 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1514
1515 void vptest(XMMRegister dst, XMMRegister src);
1516 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1517
1518 void punpcklbw(XMMRegister dst, XMMRegister src);
1519 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1520
1521 void pshufd(XMMRegister dst, Address src, int mode);
1522 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1523
1524 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1525 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1526
1527 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1528 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1529 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1530
1531 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1532 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1533 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1534
1535 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1536
1537 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1538 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1539 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1540
1541 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1542 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1543 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1544
1545 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1546 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1547 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1548
1549 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1550 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1551 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1552
1553 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1554 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1555 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1556
1557 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1558 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1559 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1560
1561 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1562 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1563
1564 // AVX Vector instructions
1565
1566 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1567 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1568 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1569
1570 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1571 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1572 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1573
1574 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1575 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1576 Assembler::vpxor(dst, nds, src, vector_len);
1577 else
1578 Assembler::vxorpd(dst, nds, src, vector_len);
1579 }
1580 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1581 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1582 Assembler::vpxor(dst, nds, src, vector_len);
1583 else
1584 Assembler::vxorpd(dst, nds, src, vector_len);
1585 }
1586 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1587
1588 // Simple version for AVX2 256bit vectors
1589 void vpxor(XMMRegister dst, XMMRegister src) {
1590 assert(UseAVX >= 2, "Should be at least AVX2");
1591 Assembler::vpxor(dst, dst, src, AVX_256bit);
1592 }
1593 void vpxor(XMMRegister dst, Address src) {
1594 assert(UseAVX >= 2, "Should be at least AVX2");
1595 Assembler::vpxor(dst, dst, src, AVX_256bit);
1596 }
1597
1598 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1599 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1600
1601 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1602 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1603 Assembler::vinserti32x4(dst, nds, src, imm8);
1604 } else if (UseAVX > 1) {
1605 // vinserti128 is available only in AVX2
1606 Assembler::vinserti128(dst, nds, src, imm8);
1607 } else {
1608 Assembler::vinsertf128(dst, nds, src, imm8);
1609 }
1610 }
1611
1612 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1613 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1614 Assembler::vinserti32x4(dst, nds, src, imm8);
1615 } else if (UseAVX > 1) {
1616 // vinserti128 is available only in AVX2
1617 Assembler::vinserti128(dst, nds, src, imm8);
1618 } else {
1619 Assembler::vinsertf128(dst, nds, src, imm8);
1620 }
1621 }
1622
1623 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1624 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1625 Assembler::vextracti32x4(dst, src, imm8);
1626 } else if (UseAVX > 1) {
1627 // vextracti128 is available only in AVX2
1628 Assembler::vextracti128(dst, src, imm8);
1629 } else {
1630 Assembler::vextractf128(dst, src, imm8);
1631 }
1632 }
1633
1634 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1635 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1636 Assembler::vextracti32x4(dst, src, imm8);
1637 } else if (UseAVX > 1) {
1638 // vextracti128 is available only in AVX2
1639 Assembler::vextracti128(dst, src, imm8);
1640 } else {
1641 Assembler::vextractf128(dst, src, imm8);
1642 }
1643 }
1644
1645 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1646 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1647 vinserti128(dst, dst, src, 1);
1648 }
1649 void vinserti128_high(XMMRegister dst, Address src) {
1650 vinserti128(dst, dst, src, 1);
1651 }
1652 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1653 vextracti128(dst, src, 1);
1654 }
1655 void vextracti128_high(Address dst, XMMRegister src) {
1656 vextracti128(dst, src, 1);
1657 }
1658
1659 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1660 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1661 Assembler::vinsertf32x4(dst, dst, src, 1);
1662 } else {
1663 Assembler::vinsertf128(dst, dst, src, 1);
1664 }
1665 }
1666
1667 void vinsertf128_high(XMMRegister dst, Address src) {
1668 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1669 Assembler::vinsertf32x4(dst, dst, src, 1);
1670 } else {
1671 Assembler::vinsertf128(dst, dst, src, 1);
1672 }
1673 }
1674
1675 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1676 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1677 Assembler::vextractf32x4(dst, src, 1);
1678 } else {
1679 Assembler::vextractf128(dst, src, 1);
1680 }
1681 }
1682
1683 void vextractf128_high(Address dst, XMMRegister src) {
1684 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1685 Assembler::vextractf32x4(dst, src, 1);
1686 } else {
1687 Assembler::vextractf128(dst, src, 1);
1688 }
1689 }
1690
1691 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1692 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1693 Assembler::vinserti64x4(dst, dst, src, 1);
1694 }
1695 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1696 Assembler::vinsertf64x4(dst, dst, src, 1);
1697 }
1698 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1699 Assembler::vextracti64x4(dst, src, 1);
1700 }
1701 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1702 Assembler::vextractf64x4(dst, src, 1);
1703 }
1704 void vextractf64x4_high(Address dst, XMMRegister src) {
1705 Assembler::vextractf64x4(dst, src, 1);
1706 }
1707 void vinsertf64x4_high(XMMRegister dst, Address src) {
1708 Assembler::vinsertf64x4(dst, dst, src, 1);
1709 }
1710
1711 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1712 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1713 vinserti128(dst, dst, src, 0);
1714 }
1715 void vinserti128_low(XMMRegister dst, Address src) {
1716 vinserti128(dst, dst, src, 0);
1717 }
1718 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1719 vextracti128(dst, src, 0);
1720 }
1721 void vextracti128_low(Address dst, XMMRegister src) {
1722 vextracti128(dst, src, 0);
1723 }
1724
1725 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1726 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1727 Assembler::vinsertf32x4(dst, dst, src, 0);
1728 } else {
1729 Assembler::vinsertf128(dst, dst, src, 0);
1730 }
1731 }
1732
1733 void vinsertf128_low(XMMRegister dst, Address src) {
1734 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1735 Assembler::vinsertf32x4(dst, dst, src, 0);
1736 } else {
1737 Assembler::vinsertf128(dst, dst, src, 0);
1738 }
1739 }
1740
1741 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1742 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1743 Assembler::vextractf32x4(dst, src, 0);
1744 } else {
1745 Assembler::vextractf128(dst, src, 0);
1746 }
1747 }
1748
1749 void vextractf128_low(Address dst, XMMRegister src) {
1750 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1751 Assembler::vextractf32x4(dst, src, 0);
1752 } else {
1753 Assembler::vextractf128(dst, src, 0);
1754 }
1755 }
1756
1757 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1758 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1759 Assembler::vinserti64x4(dst, dst, src, 0);
1760 }
1761 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1762 Assembler::vinsertf64x4(dst, dst, src, 0);
1763 }
1764 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1765 Assembler::vextracti64x4(dst, src, 0);
1766 }
1767 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1768 Assembler::vextractf64x4(dst, src, 0);
1769 }
1770 void vextractf64x4_low(Address dst, XMMRegister src) {
1771 Assembler::vextractf64x4(dst, src, 0);
1772 }
1773 void vinsertf64x4_low(XMMRegister dst, Address src) {
1774 Assembler::vinsertf64x4(dst, dst, src, 0);
1775 }
1776
1777 // Carry-Less Multiplication Quadword
1778 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1779 // 0x00 - multiply lower 64 bits [0:63]
1780 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1781 }
1782 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1783 // 0x11 - multiply upper 64 bits [64:127]
1784 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1785 }
1786 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1787 // 0x10 - multiply nds[0:63] and src[64:127]
1788 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1789 }
1790 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1791 //0x01 - multiply nds[64:127] and src[0:63]
1792 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1793 }
1794
1795 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1796 // 0x00 - multiply lower 64 bits [0:63]
1797 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1798 }
1799 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1800 // 0x11 - multiply upper 64 bits [64:127]
1801 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1802 }
1803
1804 // AVX-512 mask operations.
1805 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1806 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1807 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1808 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1809 void kortest(uint masklen, KRegister src1, KRegister src2);
1810 void ktest(uint masklen, KRegister src1, KRegister src2);
1811
1812 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1813 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1814
1815 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1816 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1817
1818 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1819 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1820
1821 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1822 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1823
1824 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1825 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1826 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1827 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1828
1829 using Assembler::evpandq;
1830 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1831
1832 using Assembler::evpaddq;
1833 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1834
1835 using Assembler::evporq;
1836 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1837
1838 using Assembler::vpshufb;
1839 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1840
1841 using Assembler::vpor;
1842 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1843
1844 using Assembler::vpternlogq;
1845 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1846
1847 void cmov32( Condition cc, Register dst, Address src);
1848 void cmov32( Condition cc, Register dst, Register src);
1849
1850 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1851
1852 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); }
1853 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1854
1855 void movoop(Register dst, jobject obj);
1856 void movoop(Address dst, jobject obj, Register rscratch);
1857
1858 void mov_metadata(Register dst, Metadata* obj);
1859 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1860
1861 void movptr(Register dst, Register src);
1862 void movptr(Register dst, Address src);
1863 void movptr(Register dst, AddressLiteral src);
1864 void movptr(Register dst, ArrayAddress src);
1865 void movptr(Register dst, intptr_t src);
1866 void movptr(Address dst, Register src);
1867 void movptr(Address dst, int32_t imm);
1868 void movptr(Address dst, intptr_t src, Register rscratch);
1869 void movptr(ArrayAddress dst, Register src, Register rscratch);
1870
1871 void movptr(Register dst, RegisterOrConstant src) {
1872 if (src.is_constant()) movptr(dst, src.as_constant());
1873 else movptr(dst, src.as_register());
1874 }
1875
1876
1877 // to avoid hiding movl
1878 void mov32(Register dst, AddressLiteral src);
1879 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1880
1881 // Import other mov() methods from the parent class or else
1882 // they will be hidden by the following overriding declaration.
1883 using Assembler::movdl;
1884 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1885
1886 using Assembler::movq;
1887 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1888
1889 // Can push value or effective address
1890 void pushptr(AddressLiteral src, Register rscratch);
1891
1892 void pushptr(Address src) { pushq(src); }
1893 void popptr(Address src) { popq(src); }
1894
1895 void pushoop(jobject obj, Register rscratch);
1896 void pushklass(Metadata* obj, Register rscratch);
1897
1898 // sign extend as need a l to ptr sized element
1899 void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1900 void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1901
1902
1903 public:
1904 // clear memory of size 'cnt' qwords, starting at 'base';
1905 // if 'is_large' is set, do not try to produce short loop
1906 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1907
1908 // clear memory initialization sequence for constant size;
1909 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1910
1911 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1912 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1913
1914 // Fill primitive arrays
1915 void generate_fill(BasicType t, bool aligned,
1916 Register to, Register value, Register count,
1917 Register rtmp, XMMRegister xtmp);
1918
1919 void encode_iso_array(Register src, Register dst, Register len,
1920 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1921 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1922
1923 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1924 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1925 Register y, Register y_idx, Register z,
1926 Register carry, Register product,
1927 Register idx, Register kdx);
1928 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1929 Register yz_idx, Register idx,
1930 Register carry, Register product, int offset);
1931 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1932 Register carry, Register carry2,
1933 Register idx, Register jdx,
1934 Register yz_idx1, Register yz_idx2,
1935 Register tmp, Register tmp3, Register tmp4);
1936 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1937 Register yz_idx, Register idx, Register jdx,
1938 Register carry, Register product,
1939 Register carry2);
1940 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
1941 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1942 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1943 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1944 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1945 Register tmp2);
1946 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1947 Register rdxReg, Register raxReg);
1948 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1949 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1950 Register tmp3, Register tmp4);
1951 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1952 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1953
1954 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1955 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1956 Register raxReg);
1957 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1958 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1959 Register raxReg);
1960 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1961 Register result, Register tmp1, Register tmp2,
1962 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1963
1964 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1965 void update_byte_crc32(Register crc, Register val, Register table);
1966 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1967
1968 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1969 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1970 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1971 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1972
1973 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1974 // Note on a naming convention:
1975 // Prefix w = register only used on a Westmere+ architecture
1976 // Prefix n = register only used on a Nehalem architecture
1977 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1978 Register tmp1, Register tmp2, Register tmp3);
1979 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1980 Register in_out,
1981 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1982 XMMRegister w_xtmp2,
1983 Register tmp1,
1984 Register n_tmp2, Register n_tmp3);
1985 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1986 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1987 Register tmp1, Register tmp2,
1988 Register n_tmp3);
1989 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1990 Register in_out1, Register in_out2, Register in_out3,
1991 Register tmp1, Register tmp2, Register tmp3,
1992 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1993 Register tmp4, Register tmp5,
1994 Register n_tmp6);
1995 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1996 Register tmp1, Register tmp2, Register tmp3,
1997 Register tmp4, Register tmp5, Register tmp6,
1998 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1999 bool is_pclmulqdq_supported);
2000 // Fold 128-bit data chunk
2001 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2002 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2003 // Fold 512-bit data chunk
2004 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2005 // Fold 8-bit data
2006 void fold_8bit_crc32(Register crc, Register table, Register tmp);
2007 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2008
2009 // Compress char[] array to byte[].
2010 void char_array_compress(Register src, Register dst, Register len,
2011 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2012 XMMRegister tmp4, Register tmp5, Register result,
2013 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2014
2015 // Inflate byte[] array to char[].
2016 void byte_array_inflate(Register src, Register dst, Register len,
2017 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2018
2019 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2020 Register length, Register temp, int vec_enc);
2021
2022 void fill64_masked(uint shift, Register dst, int disp,
2023 XMMRegister xmm, KRegister mask, Register length,
2024 Register temp, bool use64byteVector = false);
2025
2026 void fill32_masked(uint shift, Register dst, int disp,
2027 XMMRegister xmm, KRegister mask, Register length,
2028 Register temp);
2029
2030 void fill32(Address dst, XMMRegister xmm);
2031
2032 void fill32(Register dst, int disp, XMMRegister xmm);
2033
2034 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2035
2036 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2037
2038 void convert_f2i(Register dst, XMMRegister src);
2039 void convert_d2i(Register dst, XMMRegister src);
2040 void convert_f2l(Register dst, XMMRegister src);
2041 void convert_d2l(Register dst, XMMRegister src);
2042 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2043 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2044
2045 void cache_wb(Address line);
2046 void cache_wbsync(bool is_pre);
2047
2048 #ifdef COMPILER2_OR_JVMCI
2049 void generate_fill_avx3(BasicType type, Register to, Register value,
2050 Register count, Register rtmp, XMMRegister xtmp);
2051 #endif // COMPILER2_OR_JVMCI
2052
2053 void vallones(XMMRegister dst, int vector_len);
2054
2055 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2056
2057 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2058 void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2059
2060 void save_legacy_gprs();
2061 void restore_legacy_gprs();
2062 void load_aotrc_address(Register reg, address a);
2063 void setcc(Assembler::Condition comparison, Register dst);
2064 };
2065
2066 #endif // CPU_X86_MACROASSEMBLER_X86_HPP