1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "asm/register.hpp"
30 #include "code/vmreg.inline.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "utilities/macros.hpp"
33 #include "runtime/signature.hpp"
34 #include "runtime/vm_version.hpp"
35 #include "utilities/checkedCast.hpp"
36
37 class ciInlineKlass;
38
39 // MacroAssembler extends Assembler by frequently used macros.
40 //
41 // Instructions for which a 'better' code sequence exists depending
42 // on arguments should also go in here.
43
44 class MacroAssembler: public Assembler {
45 friend class LIR_Assembler;
46 friend class Runtime1; // as_Address()
47
48 public:
49 // Support for VM calls
50 //
51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
52 // may customize this version by overriding it for its purposes (e.g., to save/restore
53 // additional registers when doing a VM call).
54
55 virtual void call_VM_leaf_base(
56 address entry_point, // the entry point
57 int number_of_arguments // the number of arguments to pop after the call
58 );
59
60 protected:
61 // This is the base routine called by the different versions of call_VM. The interpreter
62 // may customize this version by overriding it for its purposes (e.g., to save/restore
63 // additional registers when doing a VM call).
64 //
65 // call_VM_base returns the register which contains the thread upon return.
66 // If no last_java_sp is specified (noreg) than rsp will be used instead.
67 virtual void call_VM_base( // returns the register containing the thread upon return
68 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
70 address entry_point, // the entry point
71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
72 bool check_exceptions // whether to check for pending exceptions after return
73 );
74
75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
76
77 public:
78 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
79
80 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
81 // The implementation is only non-empty for the InterpreterMacroAssembler,
82 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
83 virtual void check_and_handle_popframe();
84 virtual void check_and_handle_earlyret();
85
86 Address as_Address(AddressLiteral adr);
87 Address as_Address(ArrayAddress adr, Register rscratch);
88
89 // Support for null-checks
90 //
91 // Generates code that causes a null OS exception if the content of reg is null.
92 // If the accessed location is M[reg + offset] and the offset is known, provide the
93 // offset. No explicit code generation is needed if the offset is within a certain
94 // range (0 <= offset <= page_size).
95
96 void null_check(Register reg, int offset = -1);
97 static bool needs_explicit_null_check(intptr_t offset);
98 static bool uses_implicit_null_check(void* address);
99
100 // markWord tests, kills markWord reg
101 void test_markword_is_inline_type(Register markword, Label& is_inline_type);
102
103 // inlineKlass queries, kills temp_reg
104 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type, bool can_be_null = true);
105
106 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
107 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
108 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
109 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
110
111 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
112 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
113 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
114 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
115 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
116 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
117
118 // Check array klass layout helper for flat or null-free arrays...
119 void test_flat_array_layout(Register lh, Label& is_flat_array);
120 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
121
122 // Required platform-specific helpers for Label::patch_instructions.
123 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
124 void pd_patch_instruction(address branch, address target, const char* file, int line) {
125 unsigned char op = branch[0];
126 assert(op == 0xE8 /* call */ ||
127 op == 0xE9 /* jmp */ ||
128 op == 0xEB /* short jmp */ ||
129 (op & 0xF0) == 0x70 /* short jcc */ ||
130 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
131 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
132 (op == 0x8D) /* lea */,
133 "Invalid opcode at patch point");
134
135 if (op == 0xEB || (op & 0xF0) == 0x70) {
136 // short offset operators (jmp and jcc)
137 char* disp = (char*) &branch[1];
138 int imm8 = checked_cast<int>(target - (address) &disp[1]);
139 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
140 file == nullptr ? "<null>" : file, line);
141 *disp = (char)imm8;
142 } else {
143 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1];
144 int imm32 = checked_cast<int>(target - (address) &disp[1]);
145 *disp = imm32;
146 }
147 }
148
149 // The following 4 methods return the offset of the appropriate move instruction
150
151 // Support for fast byte/short loading with zero extension (depending on particular CPU)
152 int load_unsigned_byte(Register dst, Address src);
153 int load_unsigned_short(Register dst, Address src);
154
155 // Support for fast byte/short loading with sign extension (depending on particular CPU)
156 int load_signed_byte(Register dst, Address src);
157 int load_signed_short(Register dst, Address src);
158
159 // Support for sign-extension (hi:lo = extend_sign(lo))
160 void extend_sign(Register hi, Register lo);
161
162 // Load and store values by size and signed-ness
163 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
164 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
165
166 // Support for inc/dec with optimal instruction selection depending on value
167
168 void increment(Register reg, int value = 1) { incrementq(reg, value); }
169 void decrement(Register reg, int value = 1) { decrementq(reg, value); }
170 void increment(Address dst, int value = 1) { incrementq(dst, value); }
171 void decrement(Address dst, int value = 1) { decrementq(dst, value); }
172
173 void decrementl(Address dst, int value = 1);
174 void decrementl(Register reg, int value = 1);
175
176 void decrementq(Register reg, int value = 1);
177 void decrementq(Address dst, int value = 1);
178
179 void incrementl(Address dst, int value = 1);
180 void incrementl(Register reg, int value = 1);
181
182 void incrementq(Register reg, int value = 1);
183 void incrementq(Address dst, int value = 1);
184
185 void incrementl(AddressLiteral dst, Register rscratch = noreg);
186 void incrementl(ArrayAddress dst, Register rscratch);
187
188 void incrementq(AddressLiteral dst, Register rscratch = noreg);
189
190 void movhlf(XMMRegister dst, XMMRegister src, Register rscratch = noreg);
191
192 // Support optimal SSE move instructions.
193 void movflt(XMMRegister dst, XMMRegister src) {
194 if (dst-> encoding() == src->encoding()) return;
195 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
196 else { movss (dst, src); return; }
197 }
198 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
199 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
200 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
201
202 // Move with zero extension
203 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
204
205 void movdbl(XMMRegister dst, XMMRegister src) {
206 if (dst-> encoding() == src->encoding()) return;
207 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
208 else { movsd (dst, src); return; }
209 }
210
211 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
212
213 void movdbl(XMMRegister dst, Address src) {
214 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
215 else { movlpd(dst, src); return; }
216 }
217 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
218
219 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) {
220 // Use separate tmp XMM register because caller may
221 // requires src XMM register to be unchanged (as in x86.ad).
222 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit);
223 movdl(dst, tmp);
224 movswl(dst, dst);
225 }
226
227 void flt16_to_flt(XMMRegister dst, Register src) {
228 movdl(dst, src);
229 vcvtph2ps(dst, dst, Assembler::AVX_128bit);
230 }
231
232 // Alignment
233 void align32();
234 void align64();
235 void align(uint modulus);
236 void align(uint modulus, uint target);
237
238 void post_call_nop();
239
240 // Stack frame creation/removal
241 void enter();
242 void leave();
243
244 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information).
245 // The pointer will be loaded into the thread register. This is a slow version that does native call.
246 // Normally, JavaThread pointer is available in r15_thread, use that where possible.
247 void get_thread_slow(Register thread);
248
249 // Support for argument shuffling
250
251 // bias in bytes
252 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
253 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
254 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
255 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0);
256 void move_ptr(VMRegPair src, VMRegPair dst);
257 void object_move(OopMap* map,
258 int oop_handle_offset,
259 int framesize_in_slots,
260 VMRegPair src,
261 VMRegPair dst,
262 bool is_receiver,
263 int* receiver_offset);
264
265 // Support for VM calls
266 //
267 // It is imperative that all calls into the VM are handled via the call_VM macros.
268 // They make sure that the stack linkage is setup correctly. call_VM's correspond
269 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
270
271
272 void call_VM(Register oop_result,
273 address entry_point,
274 bool check_exceptions = true);
275 void call_VM(Register oop_result,
276 address entry_point,
277 Register arg_1,
278 bool check_exceptions = true);
279 void call_VM(Register oop_result,
280 address entry_point,
281 Register arg_1, Register arg_2,
282 bool check_exceptions = true);
283 void call_VM(Register oop_result,
284 address entry_point,
285 Register arg_1, Register arg_2, Register arg_3,
286 bool check_exceptions = true);
287
288 // Overloadings with last_Java_sp
289 void call_VM(Register oop_result,
290 Register last_java_sp,
291 address entry_point,
292 int number_of_arguments = 0,
293 bool check_exceptions = true);
294 void call_VM(Register oop_result,
295 Register last_java_sp,
296 address entry_point,
297 Register arg_1, bool
298 check_exceptions = true);
299 void call_VM(Register oop_result,
300 Register last_java_sp,
301 address entry_point,
302 Register arg_1, Register arg_2,
303 bool check_exceptions = true);
304 void call_VM(Register oop_result,
305 Register last_java_sp,
306 address entry_point,
307 Register arg_1, Register arg_2, Register arg_3,
308 bool check_exceptions = true);
309
310 void get_vm_result_oop(Register oop_result);
311 void get_vm_result_metadata(Register metadata_result);
312
313 // These always tightly bind to MacroAssembler::call_VM_base
314 // bypassing the virtual implementation
315 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
316 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
317 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
318 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
319 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
320
321 void call_VM_leaf0(address entry_point);
322 void call_VM_leaf(address entry_point,
323 int number_of_arguments = 0);
324 void call_VM_leaf(address entry_point,
325 Register arg_1);
326 void call_VM_leaf(address entry_point,
327 Register arg_1, Register arg_2);
328 void call_VM_leaf(address entry_point,
329 Register arg_1, Register arg_2, Register arg_3);
330
331 void call_VM_leaf(address entry_point,
332 Register arg_1, Register arg_2, Register arg_3, Register arg_4);
333
334 // These always tightly bind to MacroAssembler::call_VM_leaf_base
335 // bypassing the virtual implementation
336 void super_call_VM_leaf(address entry_point);
337 void super_call_VM_leaf(address entry_point, Register arg_1);
338 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
339 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
340 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
341
342 void set_last_Java_frame(Register last_java_sp,
343 Register last_java_fp,
344 address last_java_pc,
345 Register rscratch);
346
347 void set_last_Java_frame(Register last_java_sp,
348 Register last_java_fp,
349 Label &last_java_pc,
350 Register scratch);
351
352 void reset_last_Java_frame(bool clear_fp);
353
354 // jobjects
355 void clear_jobject_tag(Register possibly_non_local);
356 void resolve_jobject(Register value, Register tmp);
357 void resolve_global_jobject(Register value, Register tmp);
358
359 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
360 void c2bool(Register x);
361
362 // C++ bool manipulation
363
364 void movbool(Register dst, Address src);
365 void movbool(Address dst, bool boolconst);
366 void movbool(Address dst, Register src);
367 void testbool(Register dst);
368
369 void resolve_oop_handle(Register result, Register tmp);
370 void resolve_weak_handle(Register result, Register tmp);
371 void load_mirror(Register mirror, Register method, Register tmp);
372 void load_method_holder_cld(Register rresult, Register rmethod);
373
374 void load_method_holder(Register holder, Register method);
375
376 // oop manipulations
377
378 // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
379 void load_metadata(Register dst, Register src);
380 void load_narrow_klass_compact(Register dst, Register src);
381 void load_klass(Register dst, Register src, Register tmp);
382 void store_klass(Register dst, Register src, Register tmp);
383
384 // Compares the narrow Klass pointer of an object to a given narrow Klass.
385 void cmp_klass(Register klass, Register obj, Register tmp);
386
387 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
388 // Uses tmp1 and tmp2 as temporary registers.
389 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
390
391 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
392 Register tmp1);
393 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
394 Register tmp1, Register tmp2, Register tmp3);
395
396 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
397
398 // inline type data payload offsets...
399 void payload_offset(Register inline_klass, Register offset);
400 void payload_addr(Register oop, Register data, Register inline_klass);
401 // get data payload ptr a flat value array at index, kills rcx and index
402 void data_for_value_array_index(Register array, Register array_klass,
403 Register index, Register data);
404
405 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
406 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0);
407 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
408 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
409
410 // Used for storing null. All other oop constants should be
411 // stored using routines that take a jobject.
412 void store_heap_oop_null(Address dst);
413
414 void load_prototype_header(Register dst, Register src, Register tmp);
415
416 void store_klass_gap(Register dst, Register src);
417
418 // This dummy is to prevent a call to store_heap_oop from
419 // converting a zero (like null) into a Register by giving
420 // the compiler two choices it can't resolve
421
422 void store_heap_oop(Address dst, void* dummy);
423
424 void encode_heap_oop(Register r);
425 void decode_heap_oop(Register r);
426 void encode_heap_oop_not_null(Register r);
427 void decode_heap_oop_not_null(Register r);
428 void encode_heap_oop_not_null(Register dst, Register src);
429 void decode_heap_oop_not_null(Register dst, Register src);
430
431 void set_narrow_oop(Register dst, jobject obj);
432 void set_narrow_oop(Address dst, jobject obj);
433 void cmp_narrow_oop(Register dst, jobject obj);
434 void cmp_narrow_oop(Address dst, jobject obj);
435
436 void encode_klass_not_null(Register r, Register tmp);
437 void decode_klass_not_null(Register r, Register tmp);
438 void encode_and_move_klass_not_null(Register dst, Register src);
439 void decode_and_move_klass_not_null(Register dst, Register src);
440 void set_narrow_klass(Register dst, Klass* k);
441 void set_narrow_klass(Address dst, Klass* k);
442 void cmp_narrow_klass(Register dst, Klass* k);
443 void cmp_narrow_klass(Address dst, Klass* k);
444
445 // if heap base register is used - reinit it with the correct value
446 void reinit_heapbase();
447
448 DEBUG_ONLY(void verify_heapbase(const char* msg);)
449
450 // Int division/remainder for Java
451 // (as idivl, but checks for special case as described in JVM spec.)
452 // returns idivl instruction offset for implicit exception handling
453 int corrected_idivl(Register reg);
454
455 // Long division/remainder for Java
456 // (as idivq, but checks for special case as described in JVM spec.)
457 // returns idivq instruction offset for implicit exception handling
458 int corrected_idivq(Register reg);
459
460 void int3();
461
462 // Long operation macros for a 32bit cpu
463 // Long negation for Java
464 void lneg(Register hi, Register lo);
465
466 // Long multiplication for Java
467 // (destroys contents of eax, ebx, ecx and edx)
468 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
469
470 // Long shifts for Java
471 // (semantics as described in JVM spec.)
472 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
473 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
474
475 // Long compare for Java
476 // (semantics as described in JVM spec.)
477 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
478
479
480 // misc
481
482 // Sign extension
483 void sign_extend_short(Register reg);
484 void sign_extend_byte(Register reg);
485
486 // Clean up a subword typed value to the representation in compliance with JVMS ยง2.3
487 void narrow_subword_type(Register reg, BasicType bt);
488
489 // Division by power of 2, rounding towards 0
490 void division_with_shift(Register reg, int shift_value);
491
492 // dst = c = a * b + c
493 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
494 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
495
496 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
497 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
498 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
499 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
500
501
502 // same as fcmp2int, but using SSE2
503 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
504 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
505
506 void push_IU_state();
507 void pop_IU_state();
508
509 void push_FPU_state();
510 void pop_FPU_state();
511
512 void push_CPU_state();
513 void pop_CPU_state();
514
515 void push_cont_fastpath();
516 void pop_cont_fastpath();
517
518 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);)
519
520 // Round up to a power of two
521 void round_to(Register reg, int modulus);
522
523 private:
524 // General purpose and XMM registers potentially clobbered by native code; there
525 // is no need for FPU or AVX opmask related methods because C1/interpreter
526 // - we save/restore FPU state as a whole always
527 // - do not care about AVX-512 opmask
528 static RegSet call_clobbered_gp_registers();
529 static XMMRegSet call_clobbered_xmm_registers();
530
531 void push_set(XMMRegSet set, int offset);
532 void pop_set(XMMRegSet set, int offset);
533
534 public:
535 void push_set(RegSet set, int offset = -1);
536 void pop_set(RegSet set, int offset = -1);
537
538 // Push and pop everything that might be clobbered by a native
539 // runtime call.
540 // Only save the lower 64 bits of each vector register.
541 // Additional registers can be excluded in a passed RegSet.
542 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
543 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
544
545 void push_call_clobbered_registers(bool save_fpu = true) {
546 push_call_clobbered_registers_except(RegSet(), save_fpu);
547 }
548 void pop_call_clobbered_registers(bool restore_fpu = true) {
549 pop_call_clobbered_registers_except(RegSet(), restore_fpu);
550 }
551
552 // allocation
553
554 // Object / value buffer allocation...
555 // Allocate instance of klass, assumes klass initialized by caller
556 // new_obj prefers to be rax
557 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
558 void allocate_instance(Register klass, Register new_obj,
559 Register t1, Register t2,
560 bool clear_fields, Label& alloc_failed);
561
562 void tlab_allocate(
563 Register obj, // result: pointer to object after successful allocation
564 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
565 int con_size_in_bytes, // object size in bytes if known at compile time
566 Register t1, // temp register
567 Register t2, // temp register
568 Label& slow_case // continuation point if fast allocation fails
569 );
570 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
571
572 void inline_layout_info(Register klass, Register index, Register layout_info);
573
574 void population_count(Register dst, Register src, Register scratch1, Register scratch2);
575
576 // interface method calling
577 void lookup_interface_method(Register recv_klass,
578 Register intf_klass,
579 RegisterOrConstant itable_index,
580 Register method_result,
581 Register scan_temp,
582 Label& no_such_interface,
583 bool return_method = true);
584
585 void lookup_interface_method_stub(Register recv_klass,
586 Register holder_klass,
587 Register resolved_klass,
588 Register method_result,
589 Register scan_temp,
590 Register temp_reg2,
591 Register receiver,
592 int itable_index,
593 Label& L_no_such_interface);
594
595 // virtual method calling
596 void lookup_virtual_method(Register recv_klass,
597 RegisterOrConstant vtable_index,
598 Register method_result);
599
600 // Test sub_klass against super_klass, with fast and slow paths.
601
602 // The fast path produces a tri-state answer: yes / no / maybe-slow.
603 // One of the three labels can be null, meaning take the fall-through.
604 // If super_check_offset is -1, the value is loaded up from super_klass.
605 // No registers are killed, except temp_reg.
606 void check_klass_subtype_fast_path(Register sub_klass,
607 Register super_klass,
608 Register temp_reg,
609 Label* L_success,
610 Label* L_failure,
611 Label* L_slow_path,
612 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
613
614 // The rest of the type check; must be wired to a corresponding fast path.
615 // It does not repeat the fast path logic, so don't use it standalone.
616 // The temp_reg and temp2_reg can be noreg, if no temps are available.
617 // Updates the sub's secondary super cache as necessary.
618 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
619 void check_klass_subtype_slow_path(Register sub_klass,
620 Register super_klass,
621 Register temp_reg,
622 Register temp2_reg,
623 Label* L_success,
624 Label* L_failure,
625 bool set_cond_codes = false);
626
627 // The 64-bit version, which may do a hashed subclass lookup.
628 void check_klass_subtype_slow_path(Register sub_klass,
629 Register super_klass,
630 Register temp_reg,
631 Register temp2_reg,
632 Register temp3_reg,
633 Register temp4_reg,
634 Label* L_success,
635 Label* L_failure);
636
637 // Three parts of a hashed subclass lookup: a simple linear search,
638 // a table lookup, and a fallback that does linear probing in the
639 // event of a hash collision.
640 void check_klass_subtype_slow_path_linear(Register sub_klass,
641 Register super_klass,
642 Register temp_reg,
643 Register temp2_reg,
644 Label* L_success,
645 Label* L_failure,
646 bool set_cond_codes = false);
647 void check_klass_subtype_slow_path_table(Register sub_klass,
648 Register super_klass,
649 Register temp_reg,
650 Register temp2_reg,
651 Register temp3_reg,
652 Register result_reg,
653 Label* L_success,
654 Label* L_failure);
655 void hashed_check_klass_subtype_slow_path(Register sub_klass,
656 Register super_klass,
657 Register temp_reg,
658 Label* L_success,
659 Label* L_failure);
660
661 // As above, but with a constant super_klass.
662 // The result is in Register result, not the condition codes.
663 void lookup_secondary_supers_table_const(Register sub_klass,
664 Register super_klass,
665 Register temp1,
666 Register temp2,
667 Register temp3,
668 Register temp4,
669 Register result,
670 u1 super_klass_slot);
671
672 using Assembler::salq;
673 void salq(Register dest, Register count);
674 using Assembler::rorq;
675 void rorq(Register dest, Register count);
676 void lookup_secondary_supers_table_var(Register sub_klass,
677 Register super_klass,
678 Register temp1,
679 Register temp2,
680 Register temp3,
681 Register temp4,
682 Register result);
683
684 void lookup_secondary_supers_table_slow_path(Register r_super_klass,
685 Register r_array_base,
686 Register r_array_index,
687 Register r_bitmap,
688 Register temp1,
689 Register temp2,
690 Label* L_success,
691 Label* L_failure = nullptr);
692
693 void verify_secondary_supers_table(Register r_sub_klass,
694 Register r_super_klass,
695 Register expected,
696 Register temp1,
697 Register temp2,
698 Register temp3);
699
700 void repne_scanq(Register addr, Register value, Register count, Register limit,
701 Label* L_success,
702 Label* L_failure = nullptr);
703
704 // If r is valid, return r.
705 // If r is invalid, remove a register r2 from available_regs, add r2
706 // to regs_to_push, then return r2.
707 Register allocate_if_noreg(const Register r,
708 RegSetIterator<Register> &available_regs,
709 RegSet ®s_to_push);
710
711 // Simplified, combined version, good for typical uses.
712 // Falls through on failure.
713 void check_klass_subtype(Register sub_klass,
714 Register super_klass,
715 Register temp_reg,
716 Label& L_success);
717
718 void clinit_barrier(Register klass,
719 Label* L_fast_path = nullptr,
720 Label* L_slow_path = nullptr);
721
722 // method handles (JSR 292)
723 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
724
725 void profile_receiver_type(Register recv, Register mdp, int mdp_offset);
726
727 // Debugging
728
729 // only if +VerifyOops
730 void _verify_oop(Register reg, const char* s, const char* file, int line);
731 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
732
733 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
734 if (VerifyOops) {
735 _verify_oop(reg, s, file, line);
736 }
737 }
738 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
739 if (VerifyOops) {
740 _verify_oop_addr(reg, s, file, line);
741 }
742 }
743
744 // TODO: verify method and klass metadata (compare against vptr?)
745 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
746 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
747
748 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
749 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
750 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
751 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
752 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
753
754 // Verify or restore cpu control state after JNI call
755 void restore_cpu_control_state_after_jni(Register rscratch);
756
757 // prints msg, dumps registers and stops execution
758 void stop(const char* msg);
759
760 // prints msg and continues
761 void warn(const char* msg);
762
763 // dumps registers and other state
764 void print_state();
765
766 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
767 static void debug64(char* msg, int64_t pc, int64_t regs[]);
768 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
769 static void print_state64(int64_t pc, int64_t regs[]);
770
771 void os_breakpoint();
772
773 void untested() { stop("untested"); }
774
775 void unimplemented(const char* what = "");
776
777 void should_not_reach_here() { stop("should not reach here"); }
778
779 void print_CPU_state();
780
781 // Stack overflow checking
782 void bang_stack_with_offset(int offset) {
783 // stack grows down, caller passes positive offset
784 assert(offset > 0, "must bang with negative offset");
785 movl(Address(rsp, (-offset)), rax);
786 }
787
788 // Writes to stack successive pages until offset reached to check for
789 // stack overflow + shadow pages. Also, clobbers tmp
790 void bang_stack_size(Register size, Register tmp);
791
792 // Check for reserved stack access in method being exited (for JIT)
793 void reserved_stack_check();
794
795 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod);
796
797 void verify_tlab();
798
799 static Condition negate_condition(Condition cond);
800
801 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
802 // operands. In general the names are modified to avoid hiding the instruction in Assembler
803 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
804 // here in MacroAssembler. The major exception to this rule is call
805
806 // Arithmetics
807
808
809 void addptr(Address dst, int32_t src) { addq(dst, src); }
810 void addptr(Address dst, Register src);
811
812 void addptr(Register dst, Address src) { addq(dst, src); }
813 void addptr(Register dst, int32_t src);
814 void addptr(Register dst, Register src);
815 void addptr(Register dst, RegisterOrConstant src) {
816 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
817 else addptr(dst, src.as_register());
818 }
819
820 void andptr(Register dst, int32_t src);
821 void andptr(Register src1, Register src2) { andq(src1, src2); }
822 void andptr(Register dst, Address src) { andq(dst, src); }
823
824 using Assembler::andq;
825 void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
826
827 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
828
829 // renamed to drag out the casting of address to int32_t/intptr_t
830 void cmp32(Register src1, int32_t imm);
831
832 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
833 // compare reg - mem, or reg - &mem
834 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
835
836 void cmp32(Register src1, Address src2);
837
838 void cmpoop(Register src1, Register src2);
839 void cmpoop(Register src1, Address src2);
840 void cmpoop(Register dst, jobject obj, Register rscratch);
841
842 // NOTE src2 must be the lval. This is NOT an mem-mem compare
843 void cmpptr(Address src1, AddressLiteral src2, Register rscratch);
844
845 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg);
846
847 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); }
848 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); }
849
850 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); }
851 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); }
852
853 // cmp64 to avoild hiding cmpq
854 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg);
855
856 void cmpxchgptr(Register reg, Address adr);
857
858 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
859
860 void imulptr(Register dst, Register src) { imulq(dst, src); }
861 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); }
862
863
864 void negptr(Register dst) { negq(dst); }
865
866 void notptr(Register dst) { notq(dst); }
867
868 void shlptr(Register dst, int32_t shift);
869 void shlptr(Register dst) { shlq(dst); }
870
871 void shrptr(Register dst, int32_t shift);
872 void shrptr(Register dst) { shrq(dst); }
873
874 void sarptr(Register dst) { sarq(dst); }
875 void sarptr(Register dst, int32_t src) { sarq(dst, src); }
876
877 void subptr(Address dst, int32_t src) { subq(dst, src); }
878
879 void subptr(Register dst, Address src) { subq(dst, src); }
880 void subptr(Register dst, int32_t src);
881 // Force generation of a 4 byte immediate value even if it fits into 8bit
882 void subptr_imm32(Register dst, int32_t src);
883 void subptr(Register dst, Register src);
884 void subptr(Register dst, RegisterOrConstant src) {
885 if (src.is_constant()) subptr(dst, (int) src.as_constant());
886 else subptr(dst, src.as_register());
887 }
888
889 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); }
890 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); }
891
892 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); }
893 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); }
894
895 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); }
896
897
898
899 // Helper functions for statistics gathering.
900 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
901 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg);
902 // Unconditional atomic increment.
903 void atomic_incl(Address counter_addr);
904 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg);
905 void atomic_incq(Address counter_addr);
906 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg);
907 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); }
908 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); }
909
910 using Assembler::lea;
911 void lea(Register dst, AddressLiteral adr);
912 void lea(Address dst, AddressLiteral adr, Register rscratch);
913
914 void leal32(Register dst, Address src) { leal(dst, src); }
915
916 // Import other testl() methods from the parent class or else
917 // they will be hidden by the following overriding declaration.
918 using Assembler::testl;
919 void testl(Address dst, int32_t imm32);
920 void testl(Register dst, int32_t imm32);
921 void testl(Register dst, AddressLiteral src); // requires reachable address
922 using Assembler::testq;
923 void testq(Address dst, int32_t imm32);
924 void testq(Register dst, int32_t imm32);
925
926 void orptr(Register dst, Address src) { orq(dst, src); }
927 void orptr(Register dst, Register src) { orq(dst, src); }
928 void orptr(Register dst, int32_t src) { orq(dst, src); }
929 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); }
930
931 void testptr(Register src, int32_t imm32) { testq(src, imm32); }
932 void testptr(Register src1, Address src2) { testq(src1, src2); }
933 void testptr(Address src, int32_t imm32) { testq(src, imm32); }
934 void testptr(Register src1, Register src2);
935
936 void xorptr(Register dst, Register src) { xorq(dst, src); }
937 void xorptr(Register dst, Address src) { xorq(dst, src); }
938
939 // Calls
940
941 void call(Label& L, relocInfo::relocType rtype);
942 void call(Register entry);
943 void call(Address addr) { Assembler::call(addr); }
944
945 // NOTE: this call transfers to the effective address of entry NOT
946 // the address contained by entry. This is because this is more natural
947 // for jumps/calls.
948 void call(AddressLiteral entry, Register rscratch = rax);
949
950 // Emit the CompiledIC call idiom
951 void ic_call(address entry, jint method_index = 0);
952 static int ic_check_size();
953 int ic_check(int end_alignment);
954
955 void emit_static_call_stub();
956
957 // Jumps
958
959 // NOTE: these jumps transfer to the effective address of dst NOT
960 // the address contained by dst. This is because this is more natural
961 // for jumps/calls.
962 void jump(AddressLiteral dst, Register rscratch = noreg);
963
964 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg);
965
966 // 32bit can do a case table jump in one instruction but we no longer allow the base
967 // to be installed in the Address class. This jump will transfer to the address
968 // contained in the location described by entry (not the address of entry)
969 void jump(ArrayAddress entry, Register rscratch);
970
971 // Adding more natural conditional jump instructions
972 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); }
973 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); }
974 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); }
975 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); }
976 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); }
977 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); }
978 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); }
979 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); }
980 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
981 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); }
982 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); }
983 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
984 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); }
985 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); }
986 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
987 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); }
988 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
989 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); }
990 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
991 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); }
992 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
993 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); }
994 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
995 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); }
996 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
997 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); }
998 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
999 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); }
1000 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1001 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); }
1002 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1003 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); }
1004
1005 // Short versions of the above
1006 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); }
1007 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); }
1008 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); }
1009 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); }
1010 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); }
1011 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); }
1012 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); }
1013 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); }
1014 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); }
1015 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); }
1016 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); }
1017 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1018 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); }
1019 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); }
1020 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); }
1021 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); }
1022 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); }
1023 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); }
1024 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); }
1025 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); }
1026 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1027 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); }
1028 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); }
1029 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); }
1030 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); }
1031 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); }
1032 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); }
1033 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); }
1034 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); }
1035 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); }
1036 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); }
1037 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); }
1038
1039 // Floating
1040
1041 void push_f(XMMRegister r);
1042 void pop_f(XMMRegister r);
1043 void push_d(XMMRegister r);
1044 void pop_d(XMMRegister r);
1045
1046 void push_ppx(Register src);
1047 void pop_ppx(Register dst);
1048
1049 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
1050 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
1051 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1052
1053 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); }
1054
1055 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
1056 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
1057 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1058
1059 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
1060 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
1061 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1062
1063 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
1064 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
1065 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1066
1067 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); }
1068
1069 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg);
1070 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
1071 void ldmxcsr(AddressLiteral src, Register rscratch = noreg);
1072
1073 private:
1074 void sha256_AVX2_one_round_compute(
1075 Register reg_old_h,
1076 Register reg_a,
1077 Register reg_b,
1078 Register reg_c,
1079 Register reg_d,
1080 Register reg_e,
1081 Register reg_f,
1082 Register reg_g,
1083 Register reg_h,
1084 int iter);
1085 void sha256_AVX2_four_rounds_compute_first(int start);
1086 void sha256_AVX2_four_rounds_compute_last(int start);
1087 void sha256_AVX2_one_round_and_sched(
1088 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
1089 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
1090 XMMRegister xmm_2, /* ymm6 */
1091 XMMRegister xmm_3, /* ymm7 */
1092 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
1093 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
1094 Register reg_c, /* edi */
1095 Register reg_d, /* esi */
1096 Register reg_e, /* r8d */
1097 Register reg_f, /* r9d */
1098 Register reg_g, /* r10d */
1099 Register reg_h, /* r11d */
1100 int iter);
1101
1102 void addm(int disp, Register r1, Register r2);
1103
1104 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
1105 Register e, Register f, Register g, Register h, int iteration);
1106
1107 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1108 Register a, Register b, Register c, Register d, Register e, Register f,
1109 Register g, Register h, int iteration);
1110
1111 void addmq(int disp, Register r1, Register r2);
1112 public:
1113 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1114 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1115 Register buf, Register state, Register ofs, Register limit, Register rsp,
1116 bool multi_block, XMMRegister shuf_mask);
1117 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1118 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1119 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
1120 XMMRegister shuf_mask);
1121 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block);
1122
1123 void fast_md5(Register buf, Address state, Address ofs, Address limit,
1124 bool multi_block);
1125
1126 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
1127 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
1128 Register buf, Register state, Register ofs, Register limit, Register rsp,
1129 bool multi_block);
1130
1131 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1132 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1133 Register buf, Register state, Register ofs, Register limit, Register rsp,
1134 bool multi_block, XMMRegister shuf_mask);
1135
1136 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1137 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1138 Register rax, Register rcx, Register rdx, Register tmp);
1139
1140 private:
1141
1142 // these are private because users should be doing movflt/movdbl
1143
1144 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1145 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1146 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1147 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1148
1149 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1150 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1151
1152 public:
1153
1154 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1155 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1156 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1157
1158 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1159 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1160 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1161
1162 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1163 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1164 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1165
1166 using Assembler::vbroadcasti128;
1167 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1168
1169 using Assembler::vbroadcastsd;
1170 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1171
1172 using Assembler::vbroadcastss;
1173 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1174
1175 // Vector float blend
1176 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1177 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg);
1178
1179 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1180 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1181 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1182
1183 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1184 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1185 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1186
1187 // Move Unaligned Double Quadword
1188 void movdqu(Address dst, XMMRegister src);
1189 void movdqu(XMMRegister dst, XMMRegister src);
1190 void movdqu(XMMRegister dst, Address src);
1191 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1192
1193 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1194 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1195 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1196 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1197 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1198 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1199
1200 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1201 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1202 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1203 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1204 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1205 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg);
1206
1207 // Safe move operation, lowers down to 16bit moves for targets supporting
1208 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1209 void kmov(Address dst, KRegister src);
1210 void kmov(KRegister dst, Address src);
1211 void kmov(KRegister dst, KRegister src);
1212 void kmov(Register dst, KRegister src);
1213 void kmov(KRegister dst, Register src);
1214
1215 using Assembler::movddup;
1216 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1217
1218 using Assembler::vmovddup;
1219 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1220
1221 // AVX Unaligned forms
1222 void vmovdqu(Address dst, XMMRegister src);
1223 void vmovdqu(XMMRegister dst, Address src);
1224 void vmovdqu(XMMRegister dst, XMMRegister src);
1225 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1226 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1227 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len);
1228 void vmovdqu(XMMRegister dst, Address src, int vector_len);
1229 void vmovdqu(Address dst, XMMRegister src, int vector_len);
1230
1231 // AVX Aligned forms
1232 using Assembler::vmovdqa;
1233 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1234 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1235
1236 // AVX512 Unaligned
1237 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
1238 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
1239 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len);
1240
1241 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1242 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); }
1243
1244 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1245 if (dst->encoding() != src->encoding() || mask != k0) {
1246 Assembler::evmovdqub(dst, mask, src, merge, vector_len);
1247 }
1248 }
1249 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1250 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1251 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1252
1253 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1254 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1255 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); }
1256
1257 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1258 if (dst->encoding() != src->encoding() || mask != k0) {
1259 Assembler::evmovdquw(dst, mask, src, merge, vector_len);
1260 }
1261 }
1262 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1263 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1264 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1265
1266 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1267 if (dst->encoding() != src->encoding()) {
1268 Assembler::evmovdqul(dst, src, vector_len);
1269 }
1270 }
1271 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1272 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1273
1274 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1275 if (dst->encoding() != src->encoding() || mask != k0) {
1276 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1277 }
1278 }
1279 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1280 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1281 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1282
1283 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1284 if (dst->encoding() != src->encoding()) {
1285 Assembler::evmovdquq(dst, src, vector_len);
1286 }
1287 }
1288 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1289 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1290 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1291 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1292
1293 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1294 if (dst->encoding() != src->encoding() || mask != k0) {
1295 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1296 }
1297 }
1298 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1299 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1300 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1301 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1302
1303 using Assembler::movapd;
1304 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1305
1306 // Move Aligned Double Quadword
1307 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1308 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1309 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1310
1311 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1312 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1313 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1314 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1315
1316 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1317 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1318 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1319
1320 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1321 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1322 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1323
1324 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1325 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1326 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1327
1328 // Carry-Less Multiplication Quadword
1329 void pclmulldq(XMMRegister dst, XMMRegister src) {
1330 // 0x00 - multiply lower 64 bits [0:63]
1331 Assembler::pclmulqdq(dst, src, 0x00);
1332 }
1333 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1334 // 0x11 - multiply upper 64 bits [64:127]
1335 Assembler::pclmulqdq(dst, src, 0x11);
1336 }
1337
1338 void pcmpeqb(XMMRegister dst, XMMRegister src);
1339 void pcmpeqw(XMMRegister dst, XMMRegister src);
1340
1341 void pcmpestri(XMMRegister dst, Address src, int imm8);
1342 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1343
1344 void pmovzxbw(XMMRegister dst, XMMRegister src);
1345 void pmovzxbw(XMMRegister dst, Address src);
1346
1347 void pmovmskb(Register dst, XMMRegister src);
1348
1349 void ptest(XMMRegister dst, XMMRegister src);
1350
1351 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1352 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1353 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg);
1354
1355 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1356 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1357 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1358
1359 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1360 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1361 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1362
1363 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1364 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1365 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1366
1367 void evucomish(XMMRegister dst, XMMRegister src) { Assembler::evucomish(dst, src); }
1368 void evucomish(XMMRegister dst, Address src) { Assembler::evucomish(dst, src); }
1369 void evucomish(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1370
1371 void evucomxsh(XMMRegister dst, XMMRegister src) { Assembler::evucomxsh(dst, src); }
1372 void evucomxsh(XMMRegister dst, Address src) { Assembler::evucomxsh(dst, src); }
1373 void evucomxsh(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1374
1375 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1376 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1377 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1378
1379 void evucomxss(XMMRegister dst, XMMRegister src) { Assembler::evucomxss(dst, src); }
1380 void evucomxss(XMMRegister dst, Address src) { Assembler::evucomxss(dst, src); }
1381 void evucomxss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1382
1383 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1384 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1385 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1386
1387 void evucomxsd(XMMRegister dst, XMMRegister src) { Assembler::evucomxsd(dst, src); }
1388 void evucomxsd(XMMRegister dst, Address src) { Assembler::evucomxsd(dst, src); }
1389 void evucomxsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1390
1391 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1392 void xorpd(XMMRegister dst, XMMRegister src);
1393 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1394 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1395
1396 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1397 void xorps(XMMRegister dst, XMMRegister src);
1398 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1399 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1400
1401 // Shuffle Bytes
1402 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1403 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1404 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1405 // AVX 3-operands instructions
1406
1407 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1408 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1409 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1410
1411 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1412 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1413 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1414
1415 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1416 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg);
1417
1418 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1419 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1420 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1421
1422 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1423 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1424
1425 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1426 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1427 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1428
1429 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1430 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1431 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1432
1433 using Assembler::vpbroadcastd;
1434 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1435
1436 using Assembler::vpbroadcastq;
1437 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg);
1438
1439 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1440 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len);
1441
1442 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1443 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1444 using Assembler::evpcmpeqd;
1445 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1446
1447 // Vector compares
1448 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1449 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len);
1450 }
1451 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1452
1453 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1454 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len);
1455 }
1456 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1457
1458 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1459 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len);
1460 }
1461 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1462
1463 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) {
1464 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len);
1465 }
1466 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg);
1467
1468 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len);
1469
1470 // Emit comparison instruction for the specified comparison predicate.
1471 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len);
1472 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1473
1474 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1475 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1476
1477 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1478
1479 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1480 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1481
1482 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1483 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); }
1484 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1485
1486 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); }
1487
1488 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1489 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1490
1491 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1492 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1493
1494 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1495 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1496
1497 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1498 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1499
1500 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1501 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1502
1503 using Assembler::evpsllw;
1504 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1505 if (!is_varshift) {
1506 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len);
1507 } else {
1508 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len);
1509 }
1510 }
1511 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1512 if (!is_varshift) {
1513 Assembler::evpslld(dst, mask, nds, src, merge, vector_len);
1514 } else {
1515 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len);
1516 }
1517 }
1518 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1519 if (!is_varshift) {
1520 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len);
1521 } else {
1522 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len);
1523 }
1524 }
1525 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1526 if (!is_varshift) {
1527 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len);
1528 } else {
1529 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len);
1530 }
1531 }
1532 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1533 if (!is_varshift) {
1534 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len);
1535 } else {
1536 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len);
1537 }
1538 }
1539
1540 using Assembler::evpsrlq;
1541 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1542 if (!is_varshift) {
1543 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len);
1544 } else {
1545 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len);
1546 }
1547 }
1548 using Assembler::evpsraw;
1549 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1550 if (!is_varshift) {
1551 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len);
1552 } else {
1553 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len);
1554 }
1555 }
1556 using Assembler::evpsrad;
1557 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1558 if (!is_varshift) {
1559 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len);
1560 } else {
1561 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len);
1562 }
1563 }
1564 using Assembler::evpsraq;
1565 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) {
1566 if (!is_varshift) {
1567 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len);
1568 } else {
1569 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len);
1570 }
1571 }
1572
1573 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1574 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1575 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1576 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1577
1578 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1579 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1580 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1581 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1582
1583 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1584 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1585
1586 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1587 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1588
1589 void vptest(XMMRegister dst, XMMRegister src);
1590 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1591
1592 void punpcklbw(XMMRegister dst, XMMRegister src);
1593 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1594
1595 void pshufd(XMMRegister dst, Address src, int mode);
1596 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1597
1598 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1599 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1600
1601 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1602 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1603 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1604
1605 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1606 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1607 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1608
1609 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1610
1611 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1612 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1613 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1614
1615 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1616 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1617 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1618
1619 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1620 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1621 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1622
1623 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1624 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1625 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1626
1627 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1628 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1629 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1630
1631 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1632 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1633 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1634
1635 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1636 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg);
1637
1638 // AVX Vector instructions
1639
1640 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1641 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1642 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1643
1644 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1645 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1646 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1647
1648 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1649 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1650 Assembler::vpxor(dst, nds, src, vector_len);
1651 else
1652 Assembler::vxorpd(dst, nds, src, vector_len);
1653 }
1654 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1655 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1656 Assembler::vpxor(dst, nds, src, vector_len);
1657 else
1658 Assembler::vxorpd(dst, nds, src, vector_len);
1659 }
1660 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1661
1662 // Simple version for AVX2 256bit vectors
1663 void vpxor(XMMRegister dst, XMMRegister src) {
1664 assert(UseAVX >= 2, "Should be at least AVX2");
1665 Assembler::vpxor(dst, dst, src, AVX_256bit);
1666 }
1667 void vpxor(XMMRegister dst, Address src) {
1668 assert(UseAVX >= 2, "Should be at least AVX2");
1669 Assembler::vpxor(dst, dst, src, AVX_256bit);
1670 }
1671
1672 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1673 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1674
1675 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1676 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1677 Assembler::vinserti32x4(dst, nds, src, imm8);
1678 } else if (UseAVX > 1) {
1679 // vinserti128 is available only in AVX2
1680 Assembler::vinserti128(dst, nds, src, imm8);
1681 } else {
1682 Assembler::vinsertf128(dst, nds, src, imm8);
1683 }
1684 }
1685
1686 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1687 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1688 Assembler::vinserti32x4(dst, nds, src, imm8);
1689 } else if (UseAVX > 1) {
1690 // vinserti128 is available only in AVX2
1691 Assembler::vinserti128(dst, nds, src, imm8);
1692 } else {
1693 Assembler::vinsertf128(dst, nds, src, imm8);
1694 }
1695 }
1696
1697 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1698 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1699 Assembler::vextracti32x4(dst, src, imm8);
1700 } else if (UseAVX > 1) {
1701 // vextracti128 is available only in AVX2
1702 Assembler::vextracti128(dst, src, imm8);
1703 } else {
1704 Assembler::vextractf128(dst, src, imm8);
1705 }
1706 }
1707
1708 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1709 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1710 Assembler::vextracti32x4(dst, src, imm8);
1711 } else if (UseAVX > 1) {
1712 // vextracti128 is available only in AVX2
1713 Assembler::vextracti128(dst, src, imm8);
1714 } else {
1715 Assembler::vextractf128(dst, src, imm8);
1716 }
1717 }
1718
1719 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1720 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1721 vinserti128(dst, dst, src, 1);
1722 }
1723 void vinserti128_high(XMMRegister dst, Address src) {
1724 vinserti128(dst, dst, src, 1);
1725 }
1726 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1727 vextracti128(dst, src, 1);
1728 }
1729 void vextracti128_high(Address dst, XMMRegister src) {
1730 vextracti128(dst, src, 1);
1731 }
1732
1733 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1734 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1735 Assembler::vinsertf32x4(dst, dst, src, 1);
1736 } else {
1737 Assembler::vinsertf128(dst, dst, src, 1);
1738 }
1739 }
1740
1741 void vinsertf128_high(XMMRegister dst, Address src) {
1742 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1743 Assembler::vinsertf32x4(dst, dst, src, 1);
1744 } else {
1745 Assembler::vinsertf128(dst, dst, src, 1);
1746 }
1747 }
1748
1749 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1750 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1751 Assembler::vextractf32x4(dst, src, 1);
1752 } else {
1753 Assembler::vextractf128(dst, src, 1);
1754 }
1755 }
1756
1757 void vextractf128_high(Address dst, XMMRegister src) {
1758 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1759 Assembler::vextractf32x4(dst, src, 1);
1760 } else {
1761 Assembler::vextractf128(dst, src, 1);
1762 }
1763 }
1764
1765 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1766 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1767 Assembler::vinserti64x4(dst, dst, src, 1);
1768 }
1769 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1770 Assembler::vinsertf64x4(dst, dst, src, 1);
1771 }
1772 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1773 Assembler::vextracti64x4(dst, src, 1);
1774 }
1775 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1776 Assembler::vextractf64x4(dst, src, 1);
1777 }
1778 void vextractf64x4_high(Address dst, XMMRegister src) {
1779 Assembler::vextractf64x4(dst, src, 1);
1780 }
1781 void vinsertf64x4_high(XMMRegister dst, Address src) {
1782 Assembler::vinsertf64x4(dst, dst, src, 1);
1783 }
1784
1785 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1786 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1787 vinserti128(dst, dst, src, 0);
1788 }
1789 void vinserti128_low(XMMRegister dst, Address src) {
1790 vinserti128(dst, dst, src, 0);
1791 }
1792 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1793 vextracti128(dst, src, 0);
1794 }
1795 void vextracti128_low(Address dst, XMMRegister src) {
1796 vextracti128(dst, src, 0);
1797 }
1798
1799 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1800 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1801 Assembler::vinsertf32x4(dst, dst, src, 0);
1802 } else {
1803 Assembler::vinsertf128(dst, dst, src, 0);
1804 }
1805 }
1806
1807 void vinsertf128_low(XMMRegister dst, Address src) {
1808 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1809 Assembler::vinsertf32x4(dst, dst, src, 0);
1810 } else {
1811 Assembler::vinsertf128(dst, dst, src, 0);
1812 }
1813 }
1814
1815 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1816 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1817 Assembler::vextractf32x4(dst, src, 0);
1818 } else {
1819 Assembler::vextractf128(dst, src, 0);
1820 }
1821 }
1822
1823 void vextractf128_low(Address dst, XMMRegister src) {
1824 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1825 Assembler::vextractf32x4(dst, src, 0);
1826 } else {
1827 Assembler::vextractf128(dst, src, 0);
1828 }
1829 }
1830
1831 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1832 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1833 Assembler::vinserti64x4(dst, dst, src, 0);
1834 }
1835 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1836 Assembler::vinsertf64x4(dst, dst, src, 0);
1837 }
1838 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1839 Assembler::vextracti64x4(dst, src, 0);
1840 }
1841 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1842 Assembler::vextractf64x4(dst, src, 0);
1843 }
1844 void vextractf64x4_low(Address dst, XMMRegister src) {
1845 Assembler::vextractf64x4(dst, src, 0);
1846 }
1847 void vinsertf64x4_low(XMMRegister dst, Address src) {
1848 Assembler::vinsertf64x4(dst, dst, src, 0);
1849 }
1850
1851 // Carry-Less Multiplication Quadword
1852 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1853 // 0x00 - multiply lower 64 bits [0:63]
1854 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1855 }
1856 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1857 // 0x11 - multiply upper 64 bits [64:127]
1858 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1859 }
1860 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1861 // 0x10 - multiply nds[0:63] and src[64:127]
1862 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1863 }
1864 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1865 //0x01 - multiply nds[64:127] and src[0:63]
1866 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1867 }
1868
1869 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1870 // 0x00 - multiply lower 64 bits [0:63]
1871 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1872 }
1873 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1874 // 0x11 - multiply upper 64 bits [64:127]
1875 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1876 }
1877
1878 // AVX-512 mask operations.
1879 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2);
1880 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1881 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg);
1882 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2);
1883 void kortest(uint masklen, KRegister src1, KRegister src2);
1884 void ktest(uint masklen, KRegister src1, KRegister src2);
1885
1886 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1887 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1888
1889 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1890 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1891
1892 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1893 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1894
1895 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len);
1896 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len);
1897
1898 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1899 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1900 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc);
1901 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc);
1902
1903 using Assembler::evpandq;
1904 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1905
1906 using Assembler::evpaddq;
1907 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg);
1908
1909 using Assembler::evporq;
1910 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1911
1912 using Assembler::vpshufb;
1913 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1914
1915 using Assembler::vpor;
1916 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg);
1917
1918 using Assembler::vpternlogq;
1919 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg);
1920
1921 void cmov32( Condition cc, Register dst, Address src);
1922 void cmov32( Condition cc, Register dst, Register src);
1923
1924 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1925
1926 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); }
1927 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); }
1928
1929 void movoop(Register dst, jobject obj);
1930 void movoop(Address dst, jobject obj, Register rscratch);
1931
1932 void mov_metadata(Register dst, Metadata* obj);
1933 void mov_metadata(Address dst, Metadata* obj, Register rscratch);
1934
1935 void mov64(Register dst, int64_t imm64);
1936 void mov64(Register dst, int64_t imm64, relocInfo::relocType rtype, int format);
1937
1938 void movptr(Register dst, Register src);
1939 void movptr(Register dst, Address src);
1940 void movptr(Register dst, AddressLiteral src);
1941 void movptr(Register dst, ArrayAddress src);
1942 void movptr(Register dst, intptr_t src);
1943 void movptr(Address dst, Register src);
1944 void movptr(Address dst, int32_t imm);
1945 void movptr(Address dst, intptr_t src, Register rscratch);
1946 void movptr(ArrayAddress dst, Register src, Register rscratch);
1947
1948 void movptr(Register dst, RegisterOrConstant src) {
1949 if (src.is_constant()) movptr(dst, src.as_constant());
1950 else movptr(dst, src.as_register());
1951 }
1952
1953
1954 // to avoid hiding movl
1955 void mov32(Register dst, AddressLiteral src);
1956 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg);
1957
1958 // Import other mov() methods from the parent class or else
1959 // they will be hidden by the following overriding declaration.
1960 using Assembler::movdl;
1961 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1962
1963 using Assembler::movq;
1964 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
1965
1966 // Can push value or effective address
1967 void pushptr(AddressLiteral src, Register rscratch);
1968
1969 void pushptr(Address src) { pushq(src); }
1970 void popptr(Address src) { popq(src); }
1971
1972 void pushoop(jobject obj, Register rscratch);
1973 void pushklass(Metadata* obj, Register rscratch);
1974
1975 // sign extend as need a l to ptr sized element
1976 void movl2ptr(Register dst, Address src) { movslq(dst, src); }
1977 void movl2ptr(Register dst, Register src) { movslq(dst, src); }
1978
1979
1980 public:
1981 // Inline type specific methods
1982 #include "asm/macroAssembler_common.hpp"
1983
1984 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
1985 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
1986 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
1987 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
1988 RegState reg_state[]);
1989 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
1990 VMRegPair* from, int from_count, int& from_index, VMReg to,
1991 RegState reg_state[], Register val_array);
1992 int extend_stack_for_inline_args(int args_on_stack);
1993 void remove_frame(int initial_framesize, bool needs_stack_repair);
1994 VMReg spill_reg_for(VMReg reg);
1995
1996 // clear memory of size 'cnt' qwords, starting at 'base';
1997 // if 'is_large' is set, do not try to produce short loop
1998 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
1999
2000 // clear memory initialization sequence for constant size;
2001 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2002
2003 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2004 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2005
2006 // Fill primitive arrays
2007 void generate_fill(BasicType t, bool aligned,
2008 Register to, Register value, Register count,
2009 Register rtmp, XMMRegister xtmp);
2010
2011 void encode_iso_array(Register src, Register dst, Register len,
2012 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2013 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2014
2015 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2016 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2017 Register y, Register y_idx, Register z,
2018 Register carry, Register product,
2019 Register idx, Register kdx);
2020 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
2021 Register yz_idx, Register idx,
2022 Register carry, Register product, int offset);
2023 void multiply_128_x_128_bmi2_loop(Register y, Register z,
2024 Register carry, Register carry2,
2025 Register idx, Register jdx,
2026 Register yz_idx1, Register yz_idx2,
2027 Register tmp, Register tmp3, Register tmp4);
2028 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
2029 Register yz_idx, Register idx, Register jdx,
2030 Register carry, Register product,
2031 Register carry2);
2032 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
2033 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
2034 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
2035 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2036 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
2037 Register tmp2);
2038 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
2039 Register rdxReg, Register raxReg);
2040 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
2041 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2042 Register tmp3, Register tmp4);
2043 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
2044 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
2045
2046 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
2047 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2048 Register raxReg);
2049 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
2050 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
2051 Register raxReg);
2052 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
2053 Register result, Register tmp1, Register tmp2,
2054 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
2055
2056 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
2057 void update_byte_crc32(Register crc, Register val, Register table);
2058 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
2059
2060 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
2061 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
2062 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
2063 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
2064
2065 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
2066 // Note on a naming convention:
2067 // Prefix w = register only used on a Westmere+ architecture
2068 // Prefix n = register only used on a Nehalem architecture
2069 void crc32c_ipl_alg4(Register in_out, uint32_t n,
2070 Register tmp1, Register tmp2, Register tmp3);
2071 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
2072 Register in_out,
2073 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
2074 XMMRegister w_xtmp2,
2075 Register tmp1,
2076 Register n_tmp2, Register n_tmp3);
2077 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
2078 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2079 Register tmp1, Register tmp2,
2080 Register n_tmp3);
2081 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
2082 Register in_out1, Register in_out2, Register in_out3,
2083 Register tmp1, Register tmp2, Register tmp3,
2084 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2085 Register tmp4, Register tmp5,
2086 Register n_tmp6);
2087 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
2088 Register tmp1, Register tmp2, Register tmp3,
2089 Register tmp4, Register tmp5, Register tmp6,
2090 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
2091 bool is_pclmulqdq_supported);
2092 // Fold 128-bit data chunk
2093 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
2094 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
2095 // Fold 512-bit data chunk
2096 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
2097 // Fold 8-bit data
2098 void fold_8bit_crc32(Register crc, Register table, Register tmp);
2099 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
2100
2101 // Compress char[] array to byte[].
2102 void char_array_compress(Register src, Register dst, Register len,
2103 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2104 XMMRegister tmp4, Register tmp5, Register result,
2105 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
2106
2107 // Inflate byte[] array to char[].
2108 void byte_array_inflate(Register src, Register dst, Register len,
2109 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
2110
2111 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask,
2112 Register length, Register temp, int vec_enc);
2113
2114 void fill64_masked(uint shift, Register dst, int disp,
2115 XMMRegister xmm, KRegister mask, Register length,
2116 Register temp, bool use64byteVector = false);
2117
2118 void fill32_masked(uint shift, Register dst, int disp,
2119 XMMRegister xmm, KRegister mask, Register length,
2120 Register temp);
2121
2122 void fill32(Address dst, XMMRegister xmm);
2123
2124 void fill32(Register dst, int disp, XMMRegister xmm);
2125
2126 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
2127
2128 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
2129
2130 void convert_f2i(Register dst, XMMRegister src);
2131 void convert_d2i(Register dst, XMMRegister src);
2132 void convert_f2l(Register dst, XMMRegister src);
2133 void convert_d2l(Register dst, XMMRegister src);
2134 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx);
2135 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx);
2136
2137 void cache_wb(Address line);
2138 void cache_wbsync(bool is_pre);
2139
2140 #ifdef COMPILER2_OR_JVMCI
2141 void generate_fill_avx3(BasicType type, Register to, Register value,
2142 Register count, Register rtmp, XMMRegister xtmp);
2143 #endif // COMPILER2_OR_JVMCI
2144
2145 void vallones(XMMRegister dst, int vector_len);
2146
2147 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
2148
2149 void fast_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow);
2150 void fast_unlock(Register obj, Register reg_rax, Register tmp, Label& slow);
2151
2152 void save_legacy_gprs();
2153 void restore_legacy_gprs();
2154 void load_aotrc_address(Register reg, address a);
2155 void setcc(Assembler::Condition comparison, Register dst);
2156 };
2157
2158 #endif // CPU_X86_MACROASSEMBLER_X86_HPP