1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/vm_version.hpp"
34 #include "utilities/checkedCast.hpp"
35
36 // MacroAssembler extends Assembler by frequently used macros.
37 //
38 // Instructions for which a 'better' code sequence exists depending
39 // on arguments should also go in here.
40
41 class MacroAssembler: public Assembler {
42 friend class LIR_Assembler;
43 friend class Runtime1; // as_Address()
44
45 public:
46 // Support for VM calls
47 //
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
49 // may customize this version by overriding it for its purposes (e.g., to save/restore
50 // additional registers when doing a VM call).
51
52 virtual void call_VM_leaf_base(
53 address entry_point, // the entry point
54 int number_of_arguments // the number of arguments to pop after the call
55 );
56
57 protected:
58 // This is the base routine called by the different versions of call_VM. The interpreter
59 // may customize this version by overriding it for its purposes (e.g., to save/restore
60 // additional registers when doing a VM call).
61 //
62 // call_VM_base returns the register which contains the thread upon return.
63 // If no last_java_sp is specified (noreg) than rsp will be used instead.
64 virtual void call_VM_base( // returns the register containing the thread upon return
65 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
66 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
67 address entry_point, // the entry point
68 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
69 bool check_exceptions // whether to check for pending exceptions after return
70 );
71
72 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
73
74 public:
75 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
76
77 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
78 // The implementation is only non-empty for the InterpreterMacroAssembler,
79 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
80 virtual void check_and_handle_popframe();
81 virtual void check_and_handle_earlyret();
82
83 Address as_Address(AddressLiteral adr);
84 Address as_Address(ArrayAddress adr, Register rscratch);
85
86 // Support for null-checks
87 //
88 // Generates code that causes a null OS exception if the content of reg is null.
89 // If the accessed location is M[reg + offset] and the offset is known, provide the
90 // offset. No explicit code generation is needed if the offset is within a certain
91 // range (0 <= offset <= page_size).
92
93 void null_check(Register reg, int offset = -1);
94 static bool needs_explicit_null_check(intptr_t offset);
95 static bool uses_implicit_null_check(void* address);
96
97 // Required platform-specific helpers for Label::patch_instructions.
98 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
99 void pd_patch_instruction(address branch, address target, const char* file, int line) {
100 unsigned char op = branch[0];
101 assert(op == 0xE8 /* call */ ||
102 op == 0xE9 /* jmp */ ||
103 op == 0xEB /* short jmp */ ||
104 (op & 0xF0) == 0x70 /* short jcc */ ||
105 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
106 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
107 (op == 0x8D) /* lea */,
108 "Invalid opcode at patch point");
109
110 if (op == 0xEB || (op & 0xF0) == 0x70) {
111 // short offset operators (jmp and jcc)
112 char* disp = (char*) &branch[1];
113 int imm8 = checked_cast<int>(target - (address) &disp[1]);
114 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
115 file == nullptr ? "<null>" : file, line);
116 *disp = (char)imm8;
117 } else {
118 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
119 int imm32 = checked_cast<int>(target - (address) &disp[1]);
120 *disp = imm32;
121 }
122 }
123
124 // The following 4 methods return the offset of the appropriate move instruction
125
126 // Support for fast byte/short loading with zero extension (depending on particular CPU)
127 int load_unsigned_byte(Register dst, Address src);
128 int load_unsigned_short(Register dst, Address src);
129
130 // Support for fast byte/short loading with sign extension (depending on particular CPU)
131 int load_signed_byte(Register dst, Address src);
132 int load_signed_short(Register dst, Address src);
133
134 // Support for sign-extension (hi:lo = extend_sign(lo))
135 void extend_sign(Register hi, Register lo);
136
137 // Load and store values by size and signed-ness
138 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
139 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
140
141 // Support for inc/dec with optimal instruction selection depending on value
142
143 void increment(Register reg, int value = 1) { incrementq(reg, value); }
144 void decrement(Register reg, int value = 1) { decrementq(reg, value); }
145 void increment(Address dst, int value = 1) { incrementq(dst, value); }
146 void decrement(Address dst, int value = 1) { decrementq(dst, value); }
147
148 void decrementl(Address dst, int value = 1);
149 void decrementl(Register reg, int value = 1);
150
151 void decrementq(Register reg, int value = 1);
152 void decrementq(Address dst, int value = 1);
153
154 void incrementl(Address dst, int value = 1);
155 void incrementl(Register reg, int value = 1);
156
157 void incrementq(Register reg, int value = 1);
158 void incrementq(Address dst, int value = 1);
159
160 void incrementl(AddressLiteral dst, Register rscratch = noreg);
161 void incrementl(ArrayAddress dst, Register rscratch);
162
163 void incrementq(AddressLiteral dst, Register rscratch = noreg);
164
165 void movhlf(XMMRegister dst, XMMRegister src, Register rscratch = noreg);
166
167 // Support optimal SSE move instructions.
168 void movflt(XMMRegister dst, XMMRegister src) {
169 if (dst-> encoding() == src->encoding()) return;
170 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
171 else { movss (dst, src); return; }
172 }
173 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
174 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
175 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
176
177 // Move with zero extension
178 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
179
180 void movdbl(XMMRegister dst, XMMRegister src) {
181 if (dst-> encoding() == src->encoding()) return;
182 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
183 else { movsd (dst, src); return; }
184 }
185
186 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
187
188 void movdbl(XMMRegister dst, Address src) {
189 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
190 else { movlpd(dst, src); return; }
191 }
192 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
193
194 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
195 // Use separate tmp XMM register because caller may
196 // requires src XMM register to be unchanged (as in x86.ad).
197 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
198 movdl(dst, tmp);
199 movswl(dst, dst);
200 }
201
202 void flt16_to_flt(XMMRegister dst, Register src) {
203 movdl(dst, src);
204 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
205 }
206
207 // Alignment
208 void align32();
209 void align64();
210 void align(uint modulus);
211 void align(uint modulus, uint target);
212
213 void post_call_nop();
214
215 // Stack frame creation/removal
216 void enter();
217 void leave();
218
219 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
220 // The pointer will be loaded into the thread register. This is a slow version that does native call.
221 // Normally, JavaThread pointer is available in r15_thread, use that where possible.
222 void get_thread_slow(Register thread);
223
224 // Support for argument shuffling
225
226 // bias in bytes
227 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
228 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
229 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
230 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
231 void move_ptr(VMRegPair src, VMRegPair dst);
232 void object_move(OopMap* map,
233 int oop_handle_offset,
234 int framesize_in_slots,
235 VMRegPair src,
236 VMRegPair dst,
237 bool is_receiver,
238 int* receiver_offset);
239
240 // Support for VM calls
241 //
242 // It is imperative that all calls into the VM are handled via the call_VM macros.
243 // They make sure that the stack linkage is setup correctly. call_VM's correspond
244 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
245
246
247 void call_VM(Register oop_result,
248 address entry_point,
249 bool check_exceptions = true);
250 void call_VM(Register oop_result,
251 address entry_point,
252 Register arg_1,
253 bool check_exceptions = true);
254 void call_VM(Register oop_result,
255 address entry_point,
256 Register arg_1, Register arg_2,
257 bool check_exceptions = true);
258 void call_VM(Register oop_result,
259 address entry_point,
260 Register arg_1, Register arg_2, Register arg_3,
261 bool check_exceptions = true);
262
263 // Overloadings with last_Java_sp
264 void call_VM(Register oop_result,
265 Register last_java_sp,
266 address entry_point,
267 int number_of_arguments = 0,
268 bool check_exceptions = true);
269 void call_VM(Register oop_result,
270 Register last_java_sp,
271 address entry_point,
272 Register arg_1, bool
273 check_exceptions = true);
274 void call_VM(Register oop_result,
275 Register last_java_sp,
276 address entry_point,
277 Register arg_1, Register arg_2,
278 bool check_exceptions = true);
279 void call_VM(Register oop_result,
280 Register last_java_sp,
281 address entry_point,
282 Register arg_1, Register arg_2, Register arg_3,
283 bool check_exceptions = true);
284
285 void get_vm_result_oop(Register oop_result);
286 void get_vm_result_metadata(Register metadata_result);
287
288 // These always tightly bind to MacroAssembler::call_VM_base
289 // bypassing the virtual implementation
290 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
291 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
292 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
293 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
294 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
295
296 void call_VM_leaf0(address entry_point);
297 void call_VM_leaf(address entry_point,
298 int number_of_arguments = 0);
299 void call_VM_leaf(address entry_point,
300 Register arg_1);
301 void call_VM_leaf(address entry_point,
302 Register arg_1, Register arg_2);
303 void call_VM_leaf(address entry_point,
304 Register arg_1, Register arg_2, Register arg_3);
305
306 void call_VM_leaf(address entry_point,
307 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
308
309 // These always tightly bind to MacroAssembler::call_VM_leaf_base
310 // bypassing the virtual implementation
311 void super_call_VM_leaf(address entry_point);
312 void super_call_VM_leaf(address entry_point, Register arg_1);
313 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
314 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
315 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
316
317 void set_last_Java_frame(Register last_java_sp,
318 Register last_java_fp,
319 address last_java_pc,
320 Register rscratch);
321
322 void set_last_Java_frame(Register last_java_sp,
323 Register last_java_fp,
324 Label &last_java_pc,
325 Register scratch);
326
327 void reset_last_Java_frame(bool clear_fp);
328
329 // jobjects
330 void clear_jobject_tag(Register possibly_non_local);
331 void resolve_jobject(Register value, Register tmp);
332 void resolve_global_jobject(Register value, Register tmp);
333
334 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
335 void c2bool(Register x);
336
337 // C++ bool manipulation
338
339 void movbool(Register dst, Address src);
340 void movbool(Address dst, bool boolconst);
341 void movbool(Address dst, Register src);
342 void testbool(Register dst);
343
344 void resolve_oop_handle(Register result, Register tmp);
345 void resolve_weak_handle(Register result, Register tmp);
346 void load_mirror(Register mirror, Register method, Register tmp);
347 void load_method_holder_cld(Register rresult, Register rmethod);
348
349 void load_method_holder(Register holder, Register method);
350
351 // oop manipulations
352 void load_narrow_klass_compact(Register dst, Register src);
353 void load_klass(Register dst, Register src, Register tmp);
354 void store_klass(Register dst, Register src, Register tmp);
355
356 // Compares the narrow Klass pointer of an object to a given narrow Klass.
357 void cmp_klass(Register klass, Register obj, Register tmp);
358
359 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
360 // Uses tmp1 and tmp2 as temporary registers.
361 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
362
363 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
364 Register tmp1);
365 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
366 Register tmp1, Register tmp2, Register tmp3);
367
368 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
369 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
370 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
371 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
372
373 // Used for storing null. All other oop constants should be
374 // stored using routines that take a jobject.
375 void store_heap_oop_null(Address dst);
376
377 void store_klass_gap(Register dst, Register src);
378
379 // This dummy is to prevent a call to store_heap_oop from
380 // converting a zero (like null) into a Register by giving
381 // the compiler two choices it can't resolve
382
383 void store_heap_oop(Address dst, void* dummy);
384
385 void encode_heap_oop(Register r);
386 void decode_heap_oop(Register r);
387 void encode_heap_oop_not_null(Register r);
388 void decode_heap_oop_not_null(Register r);
389 void encode_heap_oop_not_null(Register dst, Register src);
390 void decode_heap_oop_not_null(Register dst, Register src);
391
392 void set_narrow_oop(Register dst, jobject obj);
393 void set_narrow_oop(Address dst, jobject obj);
394 void cmp_narrow_oop(Register dst, jobject obj);
395 void cmp_narrow_oop(Address dst, jobject obj);
396
397 void encode_klass_not_null(Register r, Register tmp);
398 void decode_klass_not_null(Register r, Register tmp);
399 void encode_and_move_klass_not_null(Register dst, Register src);
400 void decode_and_move_klass_not_null(Register dst, Register src);
401 void set_narrow_klass(Register dst, Klass* k);
402 void set_narrow_klass(Address dst, Klass* k);
403 void cmp_narrow_klass(Register dst, Klass* k);
404 void cmp_narrow_klass(Address dst, Klass* k);
405
406 // if heap base register is used - reinit it with the correct value
407 void reinit_heapbase();
408
409 DEBUG_ONLY(void verify_heapbase(const char* msg);)
410
411 // Int division/remainder for Java
412 // (as idivl, but checks for special case as described in JVM spec.)
413 // returns idivl instruction offset for implicit exception handling
414 int corrected_idivl(Register reg);
415
416 // Long division/remainder for Java
417 // (as idivq, but checks for special case as described in JVM spec.)
418 // returns idivq instruction offset for implicit exception handling
419 int corrected_idivq(Register reg);
420
421 void int3();
422
423 // Long operation macros for a 32bit cpu
424 // Long negation for Java
425 void lneg(Register hi, Register lo);
426
427 // Long multiplication for Java
428 // (destroys contents of eax, ebx, ecx and edx)
429 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
430
431 // Long shifts for Java
432 // (semantics as described in JVM spec.)
433 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
434 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
435
436 // Long compare for Java
437 // (semantics as described in JVM spec.)
438 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
439
440
441 // misc
442
443 // Sign extension
444 void sign_extend_short(Register reg);
445 void sign_extend_byte(Register reg);
446
447 // Clean up a subword typed value to the representation in compliance with JVMS ยง2.3
448 void narrow_subword_type(Register reg, BasicType bt);
449
450 // Division by power of 2, rounding towards 0
451 void division_with_shift(Register reg, int shift_value);
452
453 // dst = c = a * b + c
454 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
455 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
456
457 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
458 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
459 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
460 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
461
462
463 // same as fcmp2int, but using SSE2
464 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
465 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
466
467 void push_IU_state();
468 void pop_IU_state();
469
470 void push_FPU_state();
471 void pop_FPU_state();
472
473 void push_CPU_state();
474 void pop_CPU_state();
475
476 void push_cont_fastpath();
477 void pop_cont_fastpath();
478
479 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
480
481 // Round up to a power of two
482 void round_to(Register reg, int modulus);
483
484 private:
485 // General purpose and XMM registers potentially clobbered by native code; there
486 // is no need for FPU or AVX opmask related methods because C1/interpreter
487 // - we save/restore FPU state as a whole always
488 // - do not care about AVX-512 opmask
489 static RegSet call_clobbered_gp_registers();
490 static XMMRegSet call_clobbered_xmm_registers();
491
492 void push_set(XMMRegSet set, int offset);
493 void pop_set(XMMRegSet set, int offset);
494
495 public:
496 void push_set(RegSet set, int offset = -1);
497 void pop_set(RegSet set, int offset = -1);
498
499 // Push and pop everything that might be clobbered by a native
500 // runtime call.
501 // Only save the lower 64 bits of each vector register.
502 // Additional registers can be excluded in a passed RegSet.
503 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
504 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
505
506 void push_call_clobbered_registers(bool save_fpu = true) {
507 push_call_clobbered_registers_except(RegSet(), save_fpu);
508 }
509 void pop_call_clobbered_registers(bool restore_fpu = true) {
510 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
511 }
512
513 // allocation
514 void tlab_allocate(
515 Register obj, // result: pointer to object after successful allocation
516 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
517 int con_size_in_bytes, // object size in bytes if known at compile time
518 Register t1, // temp register
519 Register t2, // temp register
520 Label& slow_case // continuation point if fast allocation fails
521 );
522 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
523
524 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
525
526 // interface method calling
527 void lookup_interface_method(Register recv_klass,
528 Register intf_klass,
529 RegisterOrConstant itable_index,
530 Register method_result,
531 Register scan_temp,
532 Label& no_such_interface,
533 bool return_method = true);
534
535 void lookup_interface_method_stub(Register recv_klass,
536 Register holder_klass,
537 Register resolved_klass,
538 Register method_result,
539 Register scan_temp,
540 Register temp_reg2,
541 Register receiver,
542 int itable_index,
543 Label& L_no_such_interface);
544
545 // virtual method calling
546 void lookup_virtual_method(Register recv_klass,
547 RegisterOrConstant vtable_index,
548 Register method_result);
549
550 // Test sub_klass against super_klass, with fast and slow paths.
551
552 // The fast path produces a tri-state answer: yes / no / maybe-slow.
553 // One of the three labels can be null, meaning take the fall-through.
554 // If super_check_offset is -1, the value is loaded up from super_klass.
555 // No registers are killed, except temp_reg.
556 void check_klass_subtype_fast_path(Register sub_klass,
557 Register super_klass,
558 Register temp_reg,
559 Label* L_success,
560 Label* L_failure,
561 Label* L_slow_path,
562 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
563
564 // The rest of the type check; must be wired to a corresponding fast path.
565 // It does not repeat the fast path logic, so don't use it standalone.
566 // The temp_reg and temp2_reg can be noreg, if no temps are available.
567 // Updates the sub's secondary super cache as necessary.
568 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
569 void check_klass_subtype_slow_path(Register sub_klass,
570 Register super_klass,
571 Register temp_reg,
572 Register temp2_reg,
573 Label* L_success,
574 Label* L_failure,
575 bool set_cond_codes = false);
576
577 // The 64-bit version, which may do a hashed subclass lookup.
578 void check_klass_subtype_slow_path(Register sub_klass,
579 Register super_klass,
580 Register temp_reg,
581 Register temp2_reg,
582 Register temp3_reg,
583 Register temp4_reg,
584 Label* L_success,
585 Label* L_failure);
586
587 // Three parts of a hashed subclass lookup: a simple linear search,
588 // a table lookup, and a fallback that does linear probing in the
589 // event of a hash collision.
590 void check_klass_subtype_slow_path_linear(Register sub_klass,
591 Register super_klass,
592 Register temp_reg,
593 Register temp2_reg,
594 Label* L_success,
595 Label* L_failure,
596 bool set_cond_codes = false);
597 void check_klass_subtype_slow_path_table(Register sub_klass,
598 Register super_klass,
599 Register temp_reg,
600 Register temp2_reg,
601 Register temp3_reg,
602 Register result_reg,
603 Label* L_success,
604 Label* L_failure);
605 void hashed_check_klass_subtype_slow_path(Register sub_klass,
606 Register super_klass,
607 Register temp_reg,
608 Label* L_success,
609 Label* L_failure);
610
611 // As above, but with a constant super_klass.
612 // The result is in Register result, not the condition codes.
613 void lookup_secondary_supers_table_const(Register sub_klass,
614 Register super_klass,
615 Register temp1,
616 Register temp2,
617 Register temp3,
618 Register temp4,
619 Register result,
620 u1 super_klass_slot);
621
622 using Assembler::salq;
623 void salq(Register dest, Register count);
624 using Assembler::rorq;
625 void rorq(Register dest, Register count);
626 void lookup_secondary_supers_table_var(Register sub_klass,
627 Register super_klass,
628 Register temp1,
629 Register temp2,
630 Register temp3,
631 Register temp4,
632 Register result);
633
634 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
635 Register r_array_base,
636 Register r_array_index,
637 Register r_bitmap,
638 Register temp1,
639 Register temp2,
640 Label* L_success,
641 Label* L_failure = nullptr);
642
643 void verify_secondary_supers_table(Register r_sub_klass,
644 Register r_super_klass,
645 Register expected,
646 Register temp1,
647 Register temp2,
648 Register temp3);
649
650 void repne_scanq(Register addr, Register value, Register count, Register limit,
651 Label* L_success,
652 Label* L_failure = nullptr);
653
654 // If r is valid, return r.
655 // If r is invalid, remove a register r2 from available_regs, add r2
656 // to regs_to_push, then return r2.
657 Register allocate_if_noreg(const Register r,
658 RegSetIterator<Register> &available_regs,
659 RegSet ®s_to_push);
660
661 // Simplified, combined version, good for typical uses.
662 // Falls through on failure.
663 void check_klass_subtype(Register sub_klass,
664 Register super_klass,
665 Register temp_reg,
666 Label& L_success);
667
668 void clinit_barrier(Register klass,
669 Label* L_fast_path = nullptr,
670 Label* L_slow_path = nullptr);
671
672 // method handles (JSR 292)
673 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
674
675 void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
676
677 // Debugging
678
679 // only if +VerifyOops
680 void _verify_oop(Register reg, const char* s, const char* file, int line);
681 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
682
683 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
684 if (VerifyOops) {
685 _verify_oop(reg, s, file, line);
686 }
687 }
688 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
689 if (VerifyOops) {
690 _verify_oop_addr(reg, s, file, line);
691 }
692 }
693
694 // TODO: verify method and klass metadata (compare against vptr?)
695 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
696 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
697
698 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
699 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
700 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
701 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
702 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
703
704 // Verify or restore cpu control state after JNI call
705 void restore_cpu_control_state_after_jni(Register rscratch);
706
707 // prints msg, dumps registers and stops execution
708 void stop(const char* msg);
709
710 // prints msg and continues
711 void warn(const char* msg);
712
713 // dumps registers and other state
714 void print_state();
715
716 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
717 static void debug64(char* msg, int64_t pc, int64_t regs[]);
718 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
719 static void print_state64(int64_t pc, int64_t regs[]);
720
721 void os_breakpoint();
722
723 void untested() { stop("untested"); }
724
725 void unimplemented(const char* what = "");
726
727 void should_not_reach_here() { stop("should not reach here"); }
728
729 void print_CPU_state();
730
731 // Stack overflow checking
732 void bang_stack_with_offset(int offset) {
733 // stack grows down, caller passes positive offset
734 assert(offset > 0, "must bang with negative offset");
735 movl(Address(rsp, (-offset)), rax);
736 }
737
738 // Writes to stack successive pages until offset reached to check for
739 // stack overflow + shadow pages. Also, clobbers tmp
740 void bang_stack_size(Register size, Register tmp);
741
742 // Check for reserved stack access in method being exited (for JIT)
743 void reserved_stack_check();
744
745 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
746
747 void verify_tlab();
748
749 static Condition negate_condition(Condition cond);
750
751 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
752 // operands. In general the names are modified to avoid hiding the instruction in Assembler
753 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
754 // here in MacroAssembler. The major exception to this rule is call
755
756 // Arithmetics
757
758
759 void addptr(Address dst, int32_t src) { addq(dst, src); }
760 void addptr(Address dst, Register src);
761
762 void addptr(Register dst, Address src) { addq(dst, src); }
763 void addptr(Register dst, int32_t src);
764 void addptr(Register dst, Register src);
765 void addptr(Register dst, RegisterOrConstant src) {
766 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
767 else addptr(dst, src.as_register());
768 }
769
770 void andptr(Register dst, int32_t src);
771 void andptr(Register src1, Register src2) { andq(src1, src2); }
772
773 using Assembler::andq;
774 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
775
776 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
777
778 // renamed to drag out the casting of address to int32_t/intptr_t
779 void cmp32(Register src1, int32_t imm);
780
781 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
782 // compare reg - mem, or reg - &mem
783 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
784
785 void cmp32(Register src1, Address src2);
786
787 void cmpoop(Register src1, Register src2);
788 void cmpoop(Register src1, Address src2);
789 void cmpoop(Register dst, jobject obj, Register rscratch);
790
791 // NOTE src2 must be the lval. This is NOT an mem-mem compare
792 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
793
794 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
795
796 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
797 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
798
799 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
800 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
801
802 // cmp64 to avoild hiding cmpq
803 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
804
805 void cmpxchgptr(Register reg, Address adr);
806
807 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
808
809 void imulptr(Register dst, Register src) { imulq(dst, src); }
810 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
811
812
813 void negptr(Register dst) { negq(dst); }
814
815 void notptr(Register dst) { notq(dst); }
816
817 void shlptr(Register dst, int32_t shift);
818 void shlptr(Register dst) { shlq(dst); }
819
820 void shrptr(Register dst, int32_t shift);
821 void shrptr(Register dst) { shrq(dst); }
822
823 void sarptr(Register dst) { sarq(dst); }
824 void sarptr(Register dst, int32_t src) { sarq(dst, src); }
825
826 void subptr(Address dst, int32_t src) { subq(dst, src); }
827
828 void subptr(Register dst, Address src) { subq(dst, src); }
829 void subptr(Register dst, int32_t src);
830 // Force generation of a 4 byte immediate value even if it fits into 8bit
831 void subptr_imm32(Register dst, int32_t src);
832 void subptr(Register dst, Register src);
833 void subptr(Register dst, RegisterOrConstant src) {
834 if (src.is_constant()) subptr(dst, (int) src.as_constant());
835 else subptr(dst, src.as_register());
836 }
837
838 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
839 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
840
841 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
842 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
843
844 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
845
846
847
848 // Helper functions for statistics gathering.
849 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
850 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
851 // Unconditional atomic increment.
852 void atomic_incl(Address counter_addr);
853 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
854 void atomic_incq(Address counter_addr);
855 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
856 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
857 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
858
859 using Assembler::lea;
860 void lea(Register dst, AddressLiteral adr);
861 void lea(Address dst, AddressLiteral adr, Register rscratch);
862
863 void leal32(Register dst, Address src) { leal(dst, src); }
864
865 // Import other testl() methods from the parent class or else
866 // they will be hidden by the following overriding declaration.
867 using Assembler::testl;
868 void testl(Address dst, int32_t imm32);
869 void testl(Register dst, int32_t imm32);
870 void testl(Register dst, AddressLiteral src); // requires reachable address
871 using Assembler::testq;
872 void testq(Address dst, int32_t imm32);
873 void testq(Register dst, int32_t imm32);
874
875 void orptr(Register dst, Address src) { orq(dst, src); }
876 void orptr(Register dst, Register src) { orq(dst, src); }
877 void orptr(Register dst, int32_t src) { orq(dst, src); }
878 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
879
880 void testptr(Register src, int32_t imm32) { testq(src, imm32); }
881 void testptr(Register src1, Address src2) { testq(src1, src2); }
882 void testptr(Address src, int32_t imm32) { testq(src, imm32); }
883 void testptr(Register src1, Register src2);
884
885 void xorptr(Register dst, Register src) { xorq(dst, src); }
886 void xorptr(Register dst, Address src) { xorq(dst, src); }
887
888 // Calls
889
890 void call(Label& L, relocInfo::relocType rtype);
891 void call(Register entry);
892 void call(Address addr) { Assembler::call(addr); }
893
894 // NOTE: this call transfers to the effective address of entry NOT
895 // the address contained by entry. This is because this is more natural
896 // for jumps/calls.
897 void call(AddressLiteral entry, Register rscratch = rax);
898
899 // Emit the CompiledIC call idiom
900 void ic_call(address entry, jint method_index = 0);
901 static int ic_check_size();
902 int ic_check(int end_alignment);
903
904 void emit_static_call_stub();
905
906 // Jumps
907
908 // NOTE: these jumps transfer to the effective address of dst NOT
909 // the address contained by dst. This is because this is more natural
910 // for jumps/calls.
911 void jump(AddressLiteral dst, Register rscratch = noreg);
912
913 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
914
915 // 32bit can do a case table jump in one instruction but we no longer allow the base
916 // to be installed in the Address class. This jump will transfer to the address
917 // contained in the location described by entry (not the address of entry)
918 void jump(ArrayAddress entry, Register rscratch);
919
920 // Adding more natural conditional jump instructions
921 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
922 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
923 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
924 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
925 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
926 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
927 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
928 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
929 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
930 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
931 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
932 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
933 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
934 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
935 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
936 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
937 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
938 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
939 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
940 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
941 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
942 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
943 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
944 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
945 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
946 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
947 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
948 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
949 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
950 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
951 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
952 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
953
954 // Short versions of the above
955 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
956 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
957 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
958 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
959 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
960 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
961 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
962 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
963 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
964 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
965 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
966 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
967 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
968 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
969 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
970 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
971 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
972 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
973 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
974 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
975 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
976 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
977 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
978 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
979 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
980 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
981 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
982 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
983 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
984 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
985 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
986 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
987
988 // Floating
989
990 void push_f(XMMRegister r);
991 void pop_f(XMMRegister r);
992 void push_d(XMMRegister r);
993 void pop_d(XMMRegister r);
994
995 void push_ppx(Register src);
996 void pop_ppx(Register dst);
997
998 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
999 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
1000 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1001
1002 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1003
1004 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
1005 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
1006 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1007
1008 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
1009 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
1010 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1011
1012 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
1013 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
1014 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1015
1016 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1017
1018 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1019 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1020 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1021
1022 private:
1023 void sha256_AVX2_one_round_compute(
1024 Register reg_old_h,
1025 Register reg_a,
1026 Register reg_b,
1027 Register reg_c,
1028 Register reg_d,
1029 Register reg_e,
1030 Register reg_f,
1031 Register reg_g,
1032 Register reg_h,
1033 int iter);
1034 void sha256_AVX2_four_rounds_compute_first(int start);
1035 void sha256_AVX2_four_rounds_compute_last(int start);
1036 void sha256_AVX2_one_round_and_sched(
1037 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1038 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
1039 XMMRegister xmm_2, /* ymm6 */
1040 XMMRegister xmm_3, /* ymm7 */
1041 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1042 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
1043 Register reg_c, /* edi */
1044 Register reg_d, /* esi */
1045 Register reg_e, /* r8d */
1046 Register reg_f, /* r9d */
1047 Register reg_g, /* r10d */
1048 Register reg_h, /* r11d */
1049 int iter);
1050
1051 void addm(int disp, Register r1, Register r2);
1052
1053 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1054 Register e, Register f, Register g, Register h, int iteration);
1055
1056 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1057 Register a, Register b, Register c, Register d, Register e, Register f,
1058 Register g, Register h, int iteration);
1059
1060 void addmq(int disp, Register r1, Register r2);
1061 public:
1062 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1063 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1064 Register buf, Register state, Register ofs, Register limit, Register rsp,
1065 bool multi_block, XMMRegister shuf_mask);
1066 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1067 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1068 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1069 XMMRegister shuf_mask);
1070 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1071
1072 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1073 bool multi_block);
1074
1075 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1076 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1077 Register buf, Register state, Register ofs, Register limit, Register rsp,
1078 bool multi_block);
1079
1080 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1081 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1082 Register buf, Register state, Register ofs, Register limit, Register rsp,
1083 bool multi_block, XMMRegister shuf_mask);
1084
1085 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1086 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1087 Register rax, Register rcx, Register rdx, Register tmp);
1088
1089 private:
1090
1091 // these are private because users should be doing movflt/movdbl
1092
1093 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1094 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1095 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1096 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1097
1098 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1099 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1100
1101 public:
1102
1103 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1104 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1105 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1106
1107 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1108 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1109 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1110
1111 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1112 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1113 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1114
1115 using Assembler::vbroadcasti128;
1116 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1117
1118 using Assembler::vbroadcastsd;
1119 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1120
1121 using Assembler::vbroadcastss;
1122 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1123
1124 // Vector float blend
1125 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1126 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1127
1128 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1129 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1130 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1131
1132 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1133 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1134 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1135
1136 // Move Unaligned Double Quadword
1137 void movdqu(Address dst, XMMRegister src);
1138 void movdqu(XMMRegister dst, XMMRegister src);
1139 void movdqu(XMMRegister dst, Address src);
1140 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1141
1142 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1143 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1144 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1145 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1146 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1147 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1148
1149 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1150 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1151 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1152 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1153 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1154 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1155
1156 // Safe move operation, lowers down to 16bit moves for targets supporting
1157 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1158 void kmov(Address dst, KRegister src);
1159 void kmov(KRegister dst, Address src);
1160 void kmov(KRegister dst, KRegister src);
1161 void kmov(Register dst, KRegister src);
1162 void kmov(KRegister dst, Register src);
1163
1164 using Assembler::movddup;
1165 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1166
1167 using Assembler::vmovddup;
1168 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1169
1170 // AVX Unaligned forms
1171 void vmovdqu(Address dst, XMMRegister src);
1172 void vmovdqu(XMMRegister dst, Address src);
1173 void vmovdqu(XMMRegister dst, XMMRegister src);
1174 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1175 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1176 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len);
1177 void vmovdqu(XMMRegister dst, Address src, int vector_len);
1178 void vmovdqu(Address dst, XMMRegister src, int vector_len);
1179
1180 // AVX Aligned forms
1181 using Assembler::vmovdqa;
1182 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1183 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1184
1185 // AVX512 Unaligned
1186 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1187 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1188 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1189
1190 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1191 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1192
1193 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1194 if (dst->encoding() != src->encoding() || mask != k0) {
1195 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1196 }
1197 }
1198 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1199 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1200 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1201
1202 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1203 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1204 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1205
1206 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1207 if (dst->encoding() != src->encoding() || mask != k0) {
1208 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1209 }
1210 }
1211 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1212 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1213 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1214
1215 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1216 if (dst->encoding() != src->encoding()) {
1217 Assembler::evmovdqul(dst, src, vector_len);
1218 }
1219 }
1220 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1221 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1222
1223 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1224 if (dst->encoding() != src->encoding() || mask != k0) {
1225 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1226 }
1227 }
1228 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1229 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1230 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1231
1232 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1233 if (dst->encoding() != src->encoding()) {
1234 Assembler::evmovdquq(dst, src, vector_len);
1235 }
1236 }
1237 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1238 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1239 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1240 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1241
1242 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1243 if (dst->encoding() != src->encoding() || mask != k0) {
1244 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1245 }
1246 }
1247 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1248 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1249 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1250 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1251
1252 using Assembler::movapd;
1253 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1254
1255 // Move Aligned Double Quadword
1256 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1257 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1258 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1259
1260 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1261 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1262 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1263 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1264
1265 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1266 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1267 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1268
1269 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1270 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1271 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1272
1273 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1274 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1275 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1276
1277 // Carry-Less Multiplication Quadword
1278 void pclmulldq(XMMRegister dst, XMMRegister src) {
1279 // 0x00 - multiply lower 64 bits [0:63]
1280 Assembler::pclmulqdq(dst, src, 0x00);
1281 }
1282 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1283 // 0x11 - multiply upper 64 bits [64:127]
1284 Assembler::pclmulqdq(dst, src, 0x11);
1285 }
1286
1287 void pcmpeqb(XMMRegister dst, XMMRegister src);
1288 void pcmpeqw(XMMRegister dst, XMMRegister src);
1289
1290 void pcmpestri(XMMRegister dst, Address src, int imm8);
1291 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1292
1293 void pmovzxbw(XMMRegister dst, XMMRegister src);
1294 void pmovzxbw(XMMRegister dst, Address src);
1295
1296 void pmovmskb(Register dst, XMMRegister src);
1297
1298 void ptest(XMMRegister dst, XMMRegister src);
1299
1300 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1301 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1302 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1303
1304 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1305 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1306 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1307
1308 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1309 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1310 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1311
1312 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1313 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1314 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1315
1316 void evucomish(XMMRegister dst, XMMRegister src) { Assembler::evucomish(dst, src); }
1317 void evucomish(XMMRegister dst, Address src) { Assembler::evucomish(dst, src); }
1318 void evucomish(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1319
1320 void evucomxsh(XMMRegister dst, XMMRegister src) { Assembler::evucomxsh(dst, src); }
1321 void evucomxsh(XMMRegister dst, Address src) { Assembler::evucomxsh(dst, src); }
1322 void evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1323
1324 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1325 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1326 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1327
1328 void evucomxss(XMMRegister dst, XMMRegister src) { Assembler::evucomxss(dst, src); }
1329 void evucomxss(XMMRegister dst, Address src) { Assembler::evucomxss(dst, src); }
1330 void evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1331
1332 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1333 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1334 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1335
1336 void evucomxsd(XMMRegister dst, XMMRegister src) { Assembler::evucomxsd(dst, src); }
1337 void evucomxsd(XMMRegister dst, Address src) { Assembler::evucomxsd(dst, src); }
1338 void evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1339
1340 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1341 void xorpd(XMMRegister dst, XMMRegister src);
1342 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1343 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1344
1345 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1346 void xorps(XMMRegister dst, XMMRegister src);
1347 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1348 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1349
1350 // Shuffle Bytes
1351 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1352 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1353 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1354 // AVX 3-operands instructions
1355
1356 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1357 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1358 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1359
1360 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1361 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1362 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1363
1364 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1365 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1366
1367 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1368 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1369 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1370
1371 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1372 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1373
1374 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1375 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1376 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1377
1378 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1379 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1380 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1381
1382 using Assembler::vpbroadcastd;
1383 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1384
1385 using Assembler::vpbroadcastq;
1386 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1387
1388 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1389 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1390
1391 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1392 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1393 using Assembler::evpcmpeqd;
1394 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1395
1396 // Vector compares
1397 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1398 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1399 }
1400 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1401
1402 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1403 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1404 }
1405 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1406
1407 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1408 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1409 }
1410 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1411
1412 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1413 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1414 }
1415 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1416
1417 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1418
1419 // Emit comparison instruction for the specified comparison predicate.
1420 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1421 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1422
1423 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1424 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1425
1426 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1427
1428 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1429 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1430
1431 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1432 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1433 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1434
1435 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1436
1437 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1438 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1439
1440 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1441 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1442
1443 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1444 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1445
1446 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1447 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1448
1449 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1450 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1451
1452 using Assembler::evpsllw;
1453 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1454 if (!is_varshift) {
1455 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1456 } else {
1457 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1458 }
1459 }
1460 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1461 if (!is_varshift) {
1462 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1463 } else {
1464 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1465 }
1466 }
1467 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1468 if (!is_varshift) {
1469 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1470 } else {
1471 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1472 }
1473 }
1474 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1475 if (!is_varshift) {
1476 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1477 } else {
1478 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1479 }
1480 }
1481 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1482 if (!is_varshift) {
1483 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1484 } else {
1485 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1486 }
1487 }
1488
1489 using Assembler::evpsrlq;
1490 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1491 if (!is_varshift) {
1492 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1493 } else {
1494 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1495 }
1496 }
1497 using Assembler::evpsraw;
1498 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1499 if (!is_varshift) {
1500 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1501 } else {
1502 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1503 }
1504 }
1505 using Assembler::evpsrad;
1506 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1507 if (!is_varshift) {
1508 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1509 } else {
1510 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1511 }
1512 }
1513 using Assembler::evpsraq;
1514 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1515 if (!is_varshift) {
1516 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1517 } else {
1518 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1519 }
1520 }
1521
1522 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1523 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1524 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1525 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1526
1527 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1528 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1529 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1530 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1531
1532 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1533 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1534
1535 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1536 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1537
1538 void vptest(XMMRegister dst, XMMRegister src);
1539 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1540
1541 void punpcklbw(XMMRegister dst, XMMRegister src);
1542 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1543
1544 void pshufd(XMMRegister dst, Address src, int mode);
1545 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1546
1547 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1548 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1549
1550 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1551 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1552 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1553
1554 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1555 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1556 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1557
1558 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1559
1560 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1561 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1562 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1563
1564 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1565 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1566 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1567
1568 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1569 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1570 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1571
1572 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1573 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1574 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1575
1576 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1577 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1578 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1579
1580 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1581 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1582 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1583
1584 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1585 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1586
1587 // AVX Vector instructions
1588
1589 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1590 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1591 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1592
1593 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1594 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1595 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1596
1597 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1598 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1599 Assembler::vpxor(dst, nds, src, vector_len);
1600 else
1601 Assembler::vxorpd(dst, nds, src, vector_len);
1602 }
1603 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1604 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1605 Assembler::vpxor(dst, nds, src, vector_len);
1606 else
1607 Assembler::vxorpd(dst, nds, src, vector_len);
1608 }
1609 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1610
1611 // Simple version for AVX2 256bit vectors
1612 void vpxor(XMMRegister dst, XMMRegister src) {
1613 assert(UseAVX >= 2, "Should be at least AVX2");
1614 Assembler::vpxor(dst, dst, src, AVX_256bit);
1615 }
1616 void vpxor(XMMRegister dst, Address src) {
1617 assert(UseAVX >= 2, "Should be at least AVX2");
1618 Assembler::vpxor(dst, dst, src, AVX_256bit);
1619 }
1620
1621 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1622 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1623
1624 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1625 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1626 Assembler::vinserti32x4(dst, nds, src, imm8);
1627 } else if (UseAVX > 1) {
1628 // vinserti128 is available only in AVX2
1629 Assembler::vinserti128(dst, nds, src, imm8);
1630 } else {
1631 Assembler::vinsertf128(dst, nds, src, imm8);
1632 }
1633 }
1634
1635 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1636 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1637 Assembler::vinserti32x4(dst, nds, src, imm8);
1638 } else if (UseAVX > 1) {
1639 // vinserti128 is available only in AVX2
1640 Assembler::vinserti128(dst, nds, src, imm8);
1641 } else {
1642 Assembler::vinsertf128(dst, nds, src, imm8);
1643 }
1644 }
1645
1646 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1647 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1648 Assembler::vextracti32x4(dst, src, imm8);
1649 } else if (UseAVX > 1) {
1650 // vextracti128 is available only in AVX2
1651 Assembler::vextracti128(dst, src, imm8);
1652 } else {
1653 Assembler::vextractf128(dst, src, imm8);
1654 }
1655 }
1656
1657 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1658 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1659 Assembler::vextracti32x4(dst, src, imm8);
1660 } else if (UseAVX > 1) {
1661 // vextracti128 is available only in AVX2
1662 Assembler::vextracti128(dst, src, imm8);
1663 } else {
1664 Assembler::vextractf128(dst, src, imm8);
1665 }
1666 }
1667
1668 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1669 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1670 vinserti128(dst, dst, src, 1);
1671 }
1672 void vinserti128_high(XMMRegister dst, Address src) {
1673 vinserti128(dst, dst, src, 1);
1674 }
1675 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1676 vextracti128(dst, src, 1);
1677 }
1678 void vextracti128_high(Address dst, XMMRegister src) {
1679 vextracti128(dst, src, 1);
1680 }
1681
1682 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1683 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1684 Assembler::vinsertf32x4(dst, dst, src, 1);
1685 } else {
1686 Assembler::vinsertf128(dst, dst, src, 1);
1687 }
1688 }
1689
1690 void vinsertf128_high(XMMRegister dst, Address src) {
1691 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1692 Assembler::vinsertf32x4(dst, dst, src, 1);
1693 } else {
1694 Assembler::vinsertf128(dst, dst, src, 1);
1695 }
1696 }
1697
1698 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1699 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1700 Assembler::vextractf32x4(dst, src, 1);
1701 } else {
1702 Assembler::vextractf128(dst, src, 1);
1703 }
1704 }
1705
1706 void vextractf128_high(Address dst, XMMRegister src) {
1707 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1708 Assembler::vextractf32x4(dst, src, 1);
1709 } else {
1710 Assembler::vextractf128(dst, src, 1);
1711 }
1712 }
1713
1714 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1715 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1716 Assembler::vinserti64x4(dst, dst, src, 1);
1717 }
1718 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1719 Assembler::vinsertf64x4(dst, dst, src, 1);
1720 }
1721 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1722 Assembler::vextracti64x4(dst, src, 1);
1723 }
1724 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1725 Assembler::vextractf64x4(dst, src, 1);
1726 }
1727 void vextractf64x4_high(Address dst, XMMRegister src) {
1728 Assembler::vextractf64x4(dst, src, 1);
1729 }
1730 void vinsertf64x4_high(XMMRegister dst, Address src) {
1731 Assembler::vinsertf64x4(dst, dst, src, 1);
1732 }
1733
1734 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1735 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1736 vinserti128(dst, dst, src, 0);
1737 }
1738 void vinserti128_low(XMMRegister dst, Address src) {
1739 vinserti128(dst, dst, src, 0);
1740 }
1741 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1742 vextracti128(dst, src, 0);
1743 }
1744 void vextracti128_low(Address dst, XMMRegister src) {
1745 vextracti128(dst, src, 0);
1746 }
1747
1748 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1749 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1750 Assembler::vinsertf32x4(dst, dst, src, 0);
1751 } else {
1752 Assembler::vinsertf128(dst, dst, src, 0);
1753 }
1754 }
1755
1756 void vinsertf128_low(XMMRegister dst, Address src) {
1757 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1758 Assembler::vinsertf32x4(dst, dst, src, 0);
1759 } else {
1760 Assembler::vinsertf128(dst, dst, src, 0);
1761 }
1762 }
1763
1764 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1765 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1766 Assembler::vextractf32x4(dst, src, 0);
1767 } else {
1768 Assembler::vextractf128(dst, src, 0);
1769 }
1770 }
1771
1772 void vextractf128_low(Address dst, XMMRegister src) {
1773 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1774 Assembler::vextractf32x4(dst, src, 0);
1775 } else {
1776 Assembler::vextractf128(dst, src, 0);
1777 }
1778 }
1779
1780 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1781 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1782 Assembler::vinserti64x4(dst, dst, src, 0);
1783 }
1784 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1785 Assembler::vinsertf64x4(dst, dst, src, 0);
1786 }
1787 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1788 Assembler::vextracti64x4(dst, src, 0);
1789 }
1790 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1791 Assembler::vextractf64x4(dst, src, 0);
1792 }
1793 void vextractf64x4_low(Address dst, XMMRegister src) {
1794 Assembler::vextractf64x4(dst, src, 0);
1795 }
1796 void vinsertf64x4_low(XMMRegister dst, Address src) {
1797 Assembler::vinsertf64x4(dst, dst, src, 0);
1798 }
1799
1800 // Carry-Less Multiplication Quadword
1801 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1802 // 0x00 - multiply lower 64 bits [0:63]
1803 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1804 }
1805 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1806 // 0x11 - multiply upper 64 bits [64:127]
1807 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1808 }
1809 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1810 // 0x10 - multiply nds[0:63] and src[64:127]
1811 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1812 }
1813 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1814 //0x01 - multiply nds[64:127] and src[0:63]
1815 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1816 }
1817
1818 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1819 // 0x00 - multiply lower 64 bits [0:63]
1820 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1821 }
1822 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1823 // 0x11 - multiply upper 64 bits [64:127]
1824 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1825 }
1826
1827 // AVX-512 mask operations.
1828 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1829 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1830 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1831 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1832 void kortest(uint masklen, KRegister src1, KRegister src2);
1833 void ktest(uint masklen, KRegister src1, KRegister src2);
1834
1835 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1836 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1837
1838 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1839 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1840
1841 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1842 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1843
1844 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1845 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1846
1847 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1848 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1849 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1850 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1851
1852 using Assembler::evpandq;
1853 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1854
1855 using Assembler::evpaddq;
1856 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1857
1858 using Assembler::evporq;
1859 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1860
1861 using Assembler::vpshufb;
1862 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1863
1864 using Assembler::vpor;
1865 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1866
1867 using Assembler::vpternlogq;
1868 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1869
1870 void cmov32( Condition cc, Register dst, Address src);
1871 void cmov32( Condition cc, Register dst, Register src);
1872
1873 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1874
1875 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); }
1876 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1877
1878 void movoop(Register dst, jobject obj);
1879 void movoop(Address dst, jobject obj, Register rscratch);
1880
1881 void mov_metadata(Register dst, Metadata* obj);
1882 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1883
1884 void mov64(Register dst, int64_t imm64);
1885 void mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format);
1886
1887 void movptr(Register dst, Register src);
1888 void movptr(Register dst, Address src);
1889 void movptr(Register dst, AddressLiteral src);
1890 void movptr(Register dst, ArrayAddress src);
1891 void movptr(Register dst, intptr_t src);
1892 void movptr(Address dst, Register src);
1893 void movptr(Address dst, int32_t imm);
1894 void movptr(Address dst, intptr_t src, Register rscratch);
1895 void movptr(ArrayAddress dst, Register src, Register rscratch);
1896
1897 void movptr(Register dst, RegisterOrConstant src) {
1898 if (src.is_constant()) movptr(dst, src.as_constant());
1899 else movptr(dst, src.as_register());
1900 }
1901
1902
1903 // to avoid hiding movl
1904 void mov32(Register dst, AddressLiteral src);
1905 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1906
1907 // Import other mov() methods from the parent class or else
1908 // they will be hidden by the following overriding declaration.
1909 using Assembler::movdl;
1910 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1911
1912 using Assembler::movq;
1913 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1914
1915 // Can push value or effective address
1916 void pushptr(AddressLiteral src, Register rscratch);
1917
1918 void pushptr(Address src) { pushq(src); }
1919 void popptr(Address src) { popq(src); }
1920
1921 void pushoop(jobject obj, Register rscratch);
1922 void pushklass(Metadata* obj, Register rscratch);
1923
1924 // sign extend as need a l to ptr sized element
1925 void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1926 void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1927
1928
1929 public:
1930 // clear memory of size 'cnt' qwords, starting at 'base';
1931 // if 'is_large' is set, do not try to produce short loop
1932 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1933
1934 // clear memory initialization sequence for constant size;
1935 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1936
1937 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1938 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1939
1940 // Fill primitive arrays
1941 void generate_fill(BasicType t, bool aligned,
1942 Register to, Register value, Register count,
1943 Register rtmp, XMMRegister xtmp);
1944
1945 void encode_iso_array(Register src, Register dst, Register len,
1946 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1947 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1948
1949 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1950 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1951 Register y, Register y_idx, Register z,
1952 Register carry, Register product,
1953 Register idx, Register kdx);
1954 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1955 Register yz_idx, Register idx,
1956 Register carry, Register product, int offset);
1957 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1958 Register carry, Register carry2,
1959 Register idx, Register jdx,
1960 Register yz_idx1, Register yz_idx2,
1961 Register tmp, Register tmp3, Register tmp4);
1962 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1963 Register yz_idx, Register idx, Register jdx,
1964 Register carry, Register product,
1965 Register carry2);
1966 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
1967 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1968 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1969 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1970 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1971 Register tmp2);
1972 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1973 Register rdxReg, Register raxReg);
1974 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1975 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1976 Register tmp3, Register tmp4);
1977 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1978 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1979
1980 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1981 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1982 Register raxReg);
1983 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1984 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1985 Register raxReg);
1986 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1987 Register result, Register tmp1, Register tmp2,
1988 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1989
1990 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1991 void update_byte_crc32(Register crc, Register val, Register table);
1992 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1993
1994 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1995 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1996 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1997 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1998
1999 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2000 // Note on a naming convention:
2001 // Prefix w = register only used on a Westmere+ architecture
2002 // Prefix n = register only used on a Nehalem architecture
2003 void crc32c_ipl_alg4(Register in_out, uint32_t n,
2004 Register tmp1, Register tmp2, Register tmp3);
2005 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2006 Register in_out,
2007 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2008 XMMRegister w_xtmp2,
2009 Register tmp1,
2010 Register n_tmp2, Register n_tmp3);
2011 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2012 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2013 Register tmp1, Register tmp2,
2014 Register n_tmp3);
2015 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2016 Register in_out1, Register in_out2, Register in_out3,
2017 Register tmp1, Register tmp2, Register tmp3,
2018 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2019 Register tmp4, Register tmp5,
2020 Register n_tmp6);
2021 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2022 Register tmp1, Register tmp2, Register tmp3,
2023 Register tmp4, Register tmp5, Register tmp6,
2024 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2025 bool is_pclmulqdq_supported);
2026 // Fold 128-bit data chunk
2027 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2028 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2029 // Fold 512-bit data chunk
2030 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2031 // Fold 8-bit data
2032 void fold_8bit_crc32(Register crc, Register table, Register tmp);
2033 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2034
2035 // Compress char[] array to byte[].
2036 void char_array_compress(Register src, Register dst, Register len,
2037 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2038 XMMRegister tmp4, Register tmp5, Register result,
2039 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2040
2041 // Inflate byte[] array to char[].
2042 void byte_array_inflate(Register src, Register dst, Register len,
2043 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2044
2045 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2046 Register length, Register temp, int vec_enc);
2047
2048 void fill64_masked(uint shift, Register dst, int disp,
2049 XMMRegister xmm, KRegister mask, Register length,
2050 Register temp, bool use64byteVector = false);
2051
2052 void fill32_masked(uint shift, Register dst, int disp,
2053 XMMRegister xmm, KRegister mask, Register length,
2054 Register temp);
2055
2056 void fill32(Address dst, XMMRegister xmm);
2057
2058 void fill32(Register dst, int disp, XMMRegister xmm);
2059
2060 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2061
2062 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2063
2064 void convert_f2i(Register dst, XMMRegister src);
2065 void convert_d2i(Register dst, XMMRegister src);
2066 void convert_f2l(Register dst, XMMRegister src);
2067 void convert_d2l(Register dst, XMMRegister src);
2068 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2069 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2070
2071 void cache_wb(Address line);
2072 void cache_wbsync(bool is_pre);
2073
2074 #ifdef COMPILER2_OR_JVMCI
2075 void generate_fill_avx3(BasicType type, Register to, Register value,
2076 Register count, Register rtmp, XMMRegister xtmp);
2077 #endif // COMPILER2_OR_JVMCI
2078
2079 void vallones(XMMRegister dst, int vector_len);
2080
2081 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2082
2083 void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2084 void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2085
2086 void save_legacy_gprs();
2087 void restore_legacy_gprs();
2088 void load_aotrc_address(Register reg, address a);
2089 void setcc(Assembler::Condition comparison, Register dst);
2090 };
2091
2092 #endif // CPU_X86_MACROASSEMBLER_X86_HPP