1 /*
2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "code/vmreg.inline.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "utilities/macros.hpp"
32 #include "runtime/rtmLocking.hpp"
33 #include "runtime/vm_version.hpp"
34
35 // MacroAssembler extends Assembler by frequently used macros.
36 //
37 // Instructions for which a 'better' code sequence exists depending
38 // on arguments should also go in here.
39
40 class MacroAssembler: public Assembler {
41 friend class LIR_Assembler;
42 friend class Runtime1; // as_Address()
43
44 public:
45 // Support for VM calls
46 //
47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
48 // may customize this version by overriding it for its purposes (e.g., to save/restore
49 // additional registers when doing a VM call).
50
51 virtual void call_VM_leaf_base(
52 address entry_point, // the entry point
53 int number_of_arguments // the number of arguments to pop after the call
54 );
55
56 protected:
57 // This is the base routine called by the different versions of call_VM. The interpreter
58 // may customize this version by overriding it for its purposes (e.g., to save/restore
59 // additional registers when doing a VM call).
60 //
61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
62 // returns the register which contains the thread upon return. If a thread register has been
63 // specified, the return value will correspond to that register. If no last_java_sp is specified
64 // (noreg) than rsp will be used instead.
65 virtual void call_VM_base( // returns the register containing the thread upon return
66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
67 Register java_thread, // the thread if computed before ; use noreg otherwise
68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
69 address entry_point, // the entry point
70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
71 bool check_exceptions // whether to check for pending exceptions after return
72 );
73
74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
75
76 // helpers for FPU flag access
77 // tmp is a temporary register, if none is available use noreg
78 void save_rax (Register tmp);
79 void restore_rax(Register tmp);
80
81 public:
82 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
83
84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
85 // The implementation is only non-empty for the InterpreterMacroAssembler,
86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
87 virtual void check_and_handle_popframe(Register java_thread);
88 virtual void check_and_handle_earlyret(Register java_thread);
89
90 Address as_Address(AddressLiteral adr);
91 Address as_Address(ArrayAddress adr);
92
93 // Support for NULL-checks
94 //
95 // Generates code that causes a NULL OS exception if the content of reg is NULL.
96 // If the accessed location is M[reg + offset] and the offset is known, provide the
97 // offset. No explicit code generation is needed if the offset is within a certain
98 // range (0 <= offset <= page_size).
99
100 void null_check(Register reg, int offset = -1);
101 static bool needs_explicit_null_check(intptr_t offset);
102 static bool uses_implicit_null_check(void* address);
103
104 // Required platform-specific helpers for Label::patch_instructions.
105 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
106 void pd_patch_instruction(address branch, address target, const char* file, int line) {
107 unsigned char op = branch[0];
108 assert(op == 0xE8 /* call */ ||
109 op == 0xE9 /* jmp */ ||
110 op == 0xEB /* short jmp */ ||
111 (op & 0xF0) == 0x70 /* short jcc */ ||
112 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
113 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
114 "Invalid opcode at patch point");
115
116 if (op == 0xEB || (op & 0xF0) == 0x70) {
117 // short offset operators (jmp and jcc)
118 char* disp = (char*) &branch[1];
119 int imm8 = target - (address) &disp[1];
120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
121 file == NULL ? "<NULL>" : file, line);
122 *disp = imm8;
123 } else {
124 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
125 int imm32 = target - (address) &disp[1];
126 *disp = imm32;
127 }
128 }
129
130 // The following 4 methods return the offset of the appropriate move instruction
131
132 // Support for fast byte/short loading with zero extension (depending on particular CPU)
133 int load_unsigned_byte(Register dst, Address src);
134 int load_unsigned_short(Register dst, Address src);
135
136 // Support for fast byte/short loading with sign extension (depending on particular CPU)
137 int load_signed_byte(Register dst, Address src);
138 int load_signed_short(Register dst, Address src);
139
140 // Support for sign-extension (hi:lo = extend_sign(lo))
141 void extend_sign(Register hi, Register lo);
142
143 // Load and store values by size and signed-ness
144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
146
147 // Support for inc/dec with optimal instruction selection depending on value
148
149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
151
152 void decrementl(Address dst, int value = 1);
153 void decrementl(Register reg, int value = 1);
154
155 void decrementq(Register reg, int value = 1);
156 void decrementq(Address dst, int value = 1);
157
158 void incrementl(Address dst, int value = 1);
159 void incrementl(Register reg, int value = 1);
160
161 void incrementq(Register reg, int value = 1);
162 void incrementq(Address dst, int value = 1);
163
164 // Support optimal SSE move instructions.
165 void movflt(XMMRegister dst, XMMRegister src) {
166 if (dst-> encoding() == src->encoding()) return;
167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
168 else { movss (dst, src); return; }
169 }
170 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
171 void movflt(XMMRegister dst, AddressLiteral src);
172 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
173
174 // Move with zero extension
175 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
176
177 void movdbl(XMMRegister dst, XMMRegister src) {
178 if (dst-> encoding() == src->encoding()) return;
179 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
180 else { movsd (dst, src); return; }
181 }
182
183 void movdbl(XMMRegister dst, AddressLiteral src);
184
185 void movdbl(XMMRegister dst, Address src) {
186 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
187 else { movlpd(dst, src); return; }
188 }
189 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
190
191 void incrementl(AddressLiteral dst);
192 void incrementl(ArrayAddress dst);
193
194 void incrementq(AddressLiteral dst);
195
196 // Alignment
197 void align32();
198 void align64();
199 void align(int modulus);
200 void align(int modulus, int target);
201
202 // A 5 byte nop that is safe for patching (see patch_verified_entry)
203 void fat_nop();
204
205 // Stack frame creation/removal
206 void enter();
207 void leave();
208
209 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
210 // The pointer will be loaded into the thread register.
211 void get_thread(Register thread);
212
213 #ifdef _LP64
214 // Support for argument shuffling
215
216 void move32_64(VMRegPair src, VMRegPair dst);
217 void long_move(VMRegPair src, VMRegPair dst);
218 void float_move(VMRegPair src, VMRegPair dst);
219 void double_move(VMRegPair src, VMRegPair dst);
220 void move_ptr(VMRegPair src, VMRegPair dst);
221 void object_move(OopMap* map,
222 int oop_handle_offset,
223 int framesize_in_slots,
224 VMRegPair src,
225 VMRegPair dst,
226 bool is_receiver,
227 int* receiver_offset);
228 #endif // _LP64
229
230 // Support for VM calls
231 //
232 // It is imperative that all calls into the VM are handled via the call_VM macros.
233 // They make sure that the stack linkage is setup correctly. call_VM's correspond
234 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
235
236
237 void call_VM(Register oop_result,
238 address entry_point,
239 bool check_exceptions = true);
240 void call_VM(Register oop_result,
241 address entry_point,
242 Register arg_1,
243 bool check_exceptions = true);
244 void call_VM(Register oop_result,
245 address entry_point,
246 Register arg_1, Register arg_2,
247 bool check_exceptions = true);
248 void call_VM(Register oop_result,
249 address entry_point,
250 Register arg_1, Register arg_2, Register arg_3,
251 bool check_exceptions = true);
252
253 // Overloadings with last_Java_sp
254 void call_VM(Register oop_result,
255 Register last_java_sp,
256 address entry_point,
257 int number_of_arguments = 0,
258 bool check_exceptions = true);
259 void call_VM(Register oop_result,
260 Register last_java_sp,
261 address entry_point,
262 Register arg_1, bool
263 check_exceptions = true);
264 void call_VM(Register oop_result,
265 Register last_java_sp,
266 address entry_point,
267 Register arg_1, Register arg_2,
268 bool check_exceptions = true);
269 void call_VM(Register oop_result,
270 Register last_java_sp,
271 address entry_point,
272 Register arg_1, Register arg_2, Register arg_3,
273 bool check_exceptions = true);
274
275 void get_vm_result (Register oop_result, Register thread);
276 void get_vm_result_2(Register metadata_result, Register thread);
277
278 // These always tightly bind to MacroAssembler::call_VM_base
279 // bypassing the virtual implementation
280 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
281 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
282 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
283 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
284 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
285
286 void call_VM_leaf0(address entry_point);
287 void call_VM_leaf(address entry_point,
288 int number_of_arguments = 0);
289 void call_VM_leaf(address entry_point,
290 Register arg_1);
291 void call_VM_leaf(address entry_point,
292 Register arg_1, Register arg_2);
293 void call_VM_leaf(address entry_point,
294 Register arg_1, Register arg_2, Register arg_3);
295
296 // These always tightly bind to MacroAssembler::call_VM_leaf_base
297 // bypassing the virtual implementation
298 void super_call_VM_leaf(address entry_point);
299 void super_call_VM_leaf(address entry_point, Register arg_1);
300 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
301 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
302 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
303
304 // last Java Frame (fills frame anchor)
305 void set_last_Java_frame(Register thread,
306 Register last_java_sp,
307 Register last_java_fp,
308 address last_java_pc);
309
310 // thread in the default location (r15_thread on 64bit)
311 void set_last_Java_frame(Register last_java_sp,
312 Register last_java_fp,
313 address last_java_pc);
314
315 void reset_last_Java_frame(Register thread, bool clear_fp);
316
317 // thread in the default location (r15_thread on 64bit)
318 void reset_last_Java_frame(bool clear_fp);
319
320 // jobjects
321 void clear_jweak_tag(Register possibly_jweak);
322 void resolve_jobject(Register value, Register thread, Register tmp);
323
324 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
325 void c2bool(Register x);
326
327 // C++ bool manipulation
328
329 void movbool(Register dst, Address src);
330 void movbool(Address dst, bool boolconst);
331 void movbool(Address dst, Register src);
332 void testbool(Register dst);
333
334 void resolve_oop_handle(Register result, Register tmp = rscratch2);
335 void resolve_weak_handle(Register result, Register tmp);
336 void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
337 void load_method_holder_cld(Register rresult, Register rmethod);
338
339 void load_method_holder(Register holder, Register method);
340
341 // oop manipulations
342 void load_klass(Register dst, Register src, Register tmp);
343 void store_klass(Register dst, Register src, Register tmp);
344
345 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
346 Register tmp1, Register thread_tmp);
347 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
348 Register tmp1, Register tmp2);
349
350 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
351 Register thread_tmp = noreg, DecoratorSet decorators = 0);
352 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
353 Register thread_tmp = noreg, DecoratorSet decorators = 0);
354 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
355 Register tmp2 = noreg, DecoratorSet decorators = 0);
356
357 // Used for storing NULL. All other oop constants should be
358 // stored using routines that take a jobject.
359 void store_heap_oop_null(Address dst);
360
361 void load_prototype_header(Register dst, Register src, Register tmp);
362
363 #ifdef _LP64
364 void store_klass_gap(Register dst, Register src);
365
366 // This dummy is to prevent a call to store_heap_oop from
367 // converting a zero (like NULL) into a Register by giving
368 // the compiler two choices it can't resolve
369
370 void store_heap_oop(Address dst, void* dummy);
371
372 void encode_heap_oop(Register r);
373 void decode_heap_oop(Register r);
374 void encode_heap_oop_not_null(Register r);
375 void decode_heap_oop_not_null(Register r);
376 void encode_heap_oop_not_null(Register dst, Register src);
377 void decode_heap_oop_not_null(Register dst, Register src);
378
379 void set_narrow_oop(Register dst, jobject obj);
380 void set_narrow_oop(Address dst, jobject obj);
381 void cmp_narrow_oop(Register dst, jobject obj);
382 void cmp_narrow_oop(Address dst, jobject obj);
383
384 void encode_klass_not_null(Register r, Register tmp);
385 void decode_klass_not_null(Register r, Register tmp);
386 void encode_and_move_klass_not_null(Register dst, Register src);
387 void decode_and_move_klass_not_null(Register dst, Register src);
388 void set_narrow_klass(Register dst, Klass* k);
389 void set_narrow_klass(Address dst, Klass* k);
390 void cmp_narrow_klass(Register dst, Klass* k);
391 void cmp_narrow_klass(Address dst, Klass* k);
392
393 // if heap base register is used - reinit it with the correct value
394 void reinit_heapbase();
395
396 DEBUG_ONLY(void verify_heapbase(const char* msg);)
397
398 #endif // _LP64
399
400 // Int division/remainder for Java
401 // (as idivl, but checks for special case as described in JVM spec.)
402 // returns idivl instruction offset for implicit exception handling
403 int corrected_idivl(Register reg);
404
405 // Long division/remainder for Java
406 // (as idivq, but checks for special case as described in JVM spec.)
407 // returns idivq instruction offset for implicit exception handling
408 int corrected_idivq(Register reg);
409
410 void int3();
411
412 // Long operation macros for a 32bit cpu
413 // Long negation for Java
414 void lneg(Register hi, Register lo);
415
416 // Long multiplication for Java
417 // (destroys contents of eax, ebx, ecx and edx)
418 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
419
420 // Long shifts for Java
421 // (semantics as described in JVM spec.)
422 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
423 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
424
425 // Long compare for Java
426 // (semantics as described in JVM spec.)
427 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
428
429
430 // misc
431
432 // Sign extension
433 void sign_extend_short(Register reg);
434 void sign_extend_byte(Register reg);
435
436 // Division by power of 2, rounding towards 0
437 void division_with_shift(Register reg, int shift_value);
438
439 #ifndef _LP64
440 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
441 //
442 // CF (corresponds to C0) if x < y
443 // PF (corresponds to C2) if unordered
444 // ZF (corresponds to C3) if x = y
445 //
446 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
447 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
448 void fcmp(Register tmp);
449 // Variant of the above which allows y to be further down the stack
450 // and which only pops x and y if specified. If pop_right is
451 // specified then pop_left must also be specified.
452 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
453
454 // Floating-point comparison for Java
455 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
456 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
457 // (semantics as described in JVM spec.)
458 void fcmp2int(Register dst, bool unordered_is_less);
459 // Variant of the above which allows y to be further down the stack
460 // and which only pops x and y if specified. If pop_right is
461 // specified then pop_left must also be specified.
462 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
463
464 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
465 // tmp is a temporary register, if none is available use noreg
466 void fremr(Register tmp);
467
468 // only if +VerifyFPU
469 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
470 #endif // !LP64
471
472 // dst = c = a * b + c
473 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
474 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
475
476 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
477 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
478 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
479 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
480
481
482 // same as fcmp2int, but using SSE2
483 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
484 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
485
486 // branch to L if FPU flag C2 is set/not set
487 // tmp is a temporary register, if none is available use noreg
488 void jC2 (Register tmp, Label& L);
489 void jnC2(Register tmp, Label& L);
490
491 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
492 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
493 void load_float(Address src);
494
495 // Store float value to 'address'. If UseSSE >= 1, the value is stored
496 // from register xmm0. Otherwise, the value is stored from the FPU stack.
497 void store_float(Address dst);
498
499 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
500 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
501 void load_double(Address src);
502
503 // Store double value to 'address'. If UseSSE >= 2, the value is stored
504 // from register xmm0. Otherwise, the value is stored from the FPU stack.
505 void store_double(Address dst);
506
507 #ifndef _LP64
508 // Pop ST (ffree & fincstp combined)
509 void fpop();
510
511 void empty_FPU_stack();
512 #endif // !_LP64
513
514 void push_IU_state();
515 void pop_IU_state();
516
517 void push_FPU_state();
518 void pop_FPU_state();
519
520 void push_CPU_state();
521 void pop_CPU_state();
522
523 // Round up to a power of two
524 void round_to(Register reg, int modulus);
525
526 // Callee saved registers handling
527 void push_callee_saved_registers();
528 void pop_callee_saved_registers();
529
530 // allocation
531 void eden_allocate(
532 Register thread, // Current thread
533 Register obj, // result: pointer to object after successful allocation
534 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
535 int con_size_in_bytes, // object size in bytes if known at compile time
536 Register t1, // temp register
537 Label& slow_case // continuation point if fast allocation fails
538 );
539 void tlab_allocate(
540 Register thread, // Current thread
541 Register obj, // result: pointer to object after successful allocation
542 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
543 int con_size_in_bytes, // object size in bytes if known at compile time
544 Register t1, // temp register
545 Register t2, // temp register
546 Label& slow_case // continuation point if fast allocation fails
547 );
548 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
549
550 // interface method calling
551 void lookup_interface_method(Register recv_klass,
552 Register intf_klass,
553 RegisterOrConstant itable_index,
554 Register method_result,
555 Register scan_temp,
556 Label& no_such_interface,
557 bool return_method = true);
558
559 // virtual method calling
560 void lookup_virtual_method(Register recv_klass,
561 RegisterOrConstant vtable_index,
562 Register method_result);
563
564 // Test sub_klass against super_klass, with fast and slow paths.
565
566 // The fast path produces a tri-state answer: yes / no / maybe-slow.
567 // One of the three labels can be NULL, meaning take the fall-through.
568 // If super_check_offset is -1, the value is loaded up from super_klass.
569 // No registers are killed, except temp_reg.
570 void check_klass_subtype_fast_path(Register sub_klass,
571 Register super_klass,
572 Register temp_reg,
573 Label* L_success,
574 Label* L_failure,
575 Label* L_slow_path,
576 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
577
578 // The rest of the type check; must be wired to a corresponding fast path.
579 // It does not repeat the fast path logic, so don't use it standalone.
580 // The temp_reg and temp2_reg can be noreg, if no temps are available.
581 // Updates the sub's secondary super cache as necessary.
582 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
583 void check_klass_subtype_slow_path(Register sub_klass,
584 Register super_klass,
585 Register temp_reg,
586 Register temp2_reg,
587 Label* L_success,
588 Label* L_failure,
589 bool set_cond_codes = false);
590
591 // Simplified, combined version, good for typical uses.
592 // Falls through on failure.
593 void check_klass_subtype(Register sub_klass,
594 Register super_klass,
595 Register temp_reg,
596 Label& L_success);
597
598 void clinit_barrier(Register klass,
599 Register thread,
600 Label* L_fast_path = NULL,
601 Label* L_slow_path = NULL);
602
603 // method handles (JSR 292)
604 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
605
606 // Debugging
607
608 // only if +VerifyOops
609 void _verify_oop(Register reg, const char* s, const char* file, int line);
610 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
611
612 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
613 if (VerifyOops) {
614 _verify_oop(reg, s, file, line);
615 }
616 }
617 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
618 if (VerifyOops) {
619 _verify_oop_addr(reg, s, file, line);
620 }
621 }
622
623 // TODO: verify method and klass metadata (compare against vptr?)
624 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
625 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
626
627 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
628 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
629 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
630 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
631 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
632
633 // Verify or restore cpu control state after JNI call
634 void restore_cpu_control_state_after_jni();
635
636 // prints msg, dumps registers and stops execution
637 void stop(const char* msg);
638
639 // prints msg and continues
640 void warn(const char* msg);
641
642 // dumps registers and other state
643 void print_state();
644
645 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
646 static void debug64(char* msg, int64_t pc, int64_t regs[]);
647 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
648 static void print_state64(int64_t pc, int64_t regs[]);
649
650 void os_breakpoint();
651
652 void untested() { stop("untested"); }
653
654 void unimplemented(const char* what = "");
655
656 void should_not_reach_here() { stop("should not reach here"); }
657
658 void print_CPU_state();
659
660 // Stack overflow checking
661 void bang_stack_with_offset(int offset) {
662 // stack grows down, caller passes positive offset
663 assert(offset > 0, "must bang with negative offset");
664 movl(Address(rsp, (-offset)), rax);
665 }
666
667 // Writes to stack successive pages until offset reached to check for
668 // stack overflow + shadow pages. Also, clobbers tmp
669 void bang_stack_size(Register size, Register tmp);
670
671 // Check for reserved stack access in method being exited (for JIT)
672 void reserved_stack_check();
673
674 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
675
676 void verify_tlab();
677
678 // Biased locking support
679 // lock_reg and obj_reg must be loaded up with the appropriate values.
680 // swap_reg must be rax, and is killed.
681 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
682 // be killed; if not supplied, push/pop will be used internally to
683 // allocate a temporary (inefficient, avoid if possible).
684 // Optional slow case is for implementations (interpreter and C1) which branch to
685 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
686 void biased_locking_enter(Register lock_reg, Register obj_reg,
687 Register swap_reg, Register tmp_reg,
688 Register tmp_reg2, bool swap_reg_contains_mark,
689 Label& done, Label* slow_case = NULL,
690 BiasedLockingCounters* counters = NULL);
691 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
692
693 Condition negate_condition(Condition cond);
694
695 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
696 // operands. In general the names are modified to avoid hiding the instruction in Assembler
697 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
698 // here in MacroAssembler. The major exception to this rule is call
699
700 // Arithmetics
701
702
703 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
704 void addptr(Address dst, Register src);
705
706 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
707 void addptr(Register dst, int32_t src);
708 void addptr(Register dst, Register src);
709 void addptr(Register dst, RegisterOrConstant src) {
710 if (src.is_constant()) addptr(dst, (int) src.as_constant());
711 else addptr(dst, src.as_register());
712 }
713
714 void andptr(Register dst, int32_t src);
715 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
716
717 void cmp8(AddressLiteral src1, int imm);
718
719 // renamed to drag out the casting of address to int32_t/intptr_t
720 void cmp32(Register src1, int32_t imm);
721
722 void cmp32(AddressLiteral src1, int32_t imm);
723 // compare reg - mem, or reg - &mem
724 void cmp32(Register src1, AddressLiteral src2);
725
726 void cmp32(Register src1, Address src2);
727
728 #ifndef _LP64
729 void cmpklass(Address dst, Metadata* obj);
730 void cmpklass(Register dst, Metadata* obj);
731 void cmpoop(Address dst, jobject obj);
732 #endif // _LP64
733
734 void cmpoop(Register src1, Register src2);
735 void cmpoop(Register src1, Address src2);
736 void cmpoop(Register dst, jobject obj);
737
738 // NOTE src2 must be the lval. This is NOT an mem-mem compare
739 void cmpptr(Address src1, AddressLiteral src2);
740
741 void cmpptr(Register src1, AddressLiteral src2);
742
743 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
744 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
745 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
746
747 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
748 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
749
750 // cmp64 to avoild hiding cmpq
751 void cmp64(Register src1, AddressLiteral src);
752
753 void cmpxchgptr(Register reg, Address adr);
754
755 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
756
757
758 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
759 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
760
761
762 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
763
764 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
765
766 void shlptr(Register dst, int32_t shift);
767 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
768
769 void shrptr(Register dst, int32_t shift);
770 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
771
772 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
773 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
774
775 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
776
777 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
778 void subptr(Register dst, int32_t src);
779 // Force generation of a 4 byte immediate value even if it fits into 8bit
780 void subptr_imm32(Register dst, int32_t src);
781 void subptr(Register dst, Register src);
782 void subptr(Register dst, RegisterOrConstant src) {
783 if (src.is_constant()) subptr(dst, (int) src.as_constant());
784 else subptr(dst, src.as_register());
785 }
786
787 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
788 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
789
790 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
791 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
792
793 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
794
795
796
797 // Helper functions for statistics gathering.
798 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
799 void cond_inc32(Condition cond, AddressLiteral counter_addr);
800 // Unconditional atomic increment.
801 void atomic_incl(Address counter_addr);
802 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
803 #ifdef _LP64
804 void atomic_incq(Address counter_addr);
805 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
806 #endif
807 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
808 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
809
810 void lea(Register dst, AddressLiteral adr);
811 void lea(Address dst, AddressLiteral adr);
812 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
813
814 void leal32(Register dst, Address src) { leal(dst, src); }
815
816 // Import other testl() methods from the parent class or else
817 // they will be hidden by the following overriding declaration.
818 using Assembler::testl;
819 void testl(Register dst, AddressLiteral src);
820
821 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
822 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
823 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
824 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
825
826 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
827 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
828 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
829 void testptr(Register src1, Register src2);
830
831 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
832 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
833
834 // Calls
835
836 void call(Label& L, relocInfo::relocType rtype);
837 void call(Register entry);
838 void call(Address addr) { Assembler::call(addr); }
839
840 // NOTE: this call transfers to the effective address of entry NOT
841 // the address contained by entry. This is because this is more natural
842 // for jumps/calls.
843 void call(AddressLiteral entry);
844
845 // Emit the CompiledIC call idiom
846 void ic_call(address entry, jint method_index = 0);
847
848 // Jumps
849
850 // NOTE: these jumps tranfer to the effective address of dst NOT
851 // the address contained by dst. This is because this is more natural
852 // for jumps/calls.
853 void jump(AddressLiteral dst);
854 void jump_cc(Condition cc, AddressLiteral dst);
855
856 // 32bit can do a case table jump in one instruction but we no longer allow the base
857 // to be installed in the Address class. This jump will tranfers to the address
858 // contained in the location described by entry (not the address of entry)
859 void jump(ArrayAddress entry);
860
861 // Floating
862
863 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
864 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
865 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
866
867 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
868 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
869 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
870
871 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
872 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
873 void comiss(XMMRegister dst, AddressLiteral src);
874
875 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
876 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
877 void comisd(XMMRegister dst, AddressLiteral src);
878
879 #ifndef _LP64
880 void fadd_s(Address src) { Assembler::fadd_s(src); }
881 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
882
883 void fldcw(Address src) { Assembler::fldcw(src); }
884 void fldcw(AddressLiteral src);
885
886 void fld_s(int index) { Assembler::fld_s(index); }
887 void fld_s(Address src) { Assembler::fld_s(src); }
888 void fld_s(AddressLiteral src);
889
890 void fld_d(Address src) { Assembler::fld_d(src); }
891 void fld_d(AddressLiteral src);
892
893 void fmul_s(Address src) { Assembler::fmul_s(src); }
894 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
895 #endif // _LP64
896
897 void fld_x(Address src) { Assembler::fld_x(src); }
898 void fld_x(AddressLiteral src);
899
900 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
901 void ldmxcsr(AddressLiteral src);
902
903 #ifdef _LP64
904 private:
905 void sha256_AVX2_one_round_compute(
906 Register reg_old_h,
907 Register reg_a,
908 Register reg_b,
909 Register reg_c,
910 Register reg_d,
911 Register reg_e,
912 Register reg_f,
913 Register reg_g,
914 Register reg_h,
915 int iter);
916 void sha256_AVX2_four_rounds_compute_first(int start);
917 void sha256_AVX2_four_rounds_compute_last(int start);
918 void sha256_AVX2_one_round_and_sched(
919 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
920 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
921 XMMRegister xmm_2, /* ymm6 */
922 XMMRegister xmm_3, /* ymm7 */
923 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
924 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
925 Register reg_c, /* edi */
926 Register reg_d, /* esi */
927 Register reg_e, /* r8d */
928 Register reg_f, /* r9d */
929 Register reg_g, /* r10d */
930 Register reg_h, /* r11d */
931 int iter);
932
933 void addm(int disp, Register r1, Register r2);
934 void gfmul(XMMRegister tmp0, XMMRegister t);
935 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0,
936 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3);
937 void generateHtbl_one_block(Register htbl);
938 void generateHtbl_eight_blocks(Register htbl);
939 public:
940 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
941 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
942 Register buf, Register state, Register ofs, Register limit, Register rsp,
943 bool multi_block, XMMRegister shuf_mask);
944 void avx_ghash(Register state, Register htbl, Register data, Register blocks);
945 #endif
946
947 #ifdef _LP64
948 private:
949 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
950 Register e, Register f, Register g, Register h, int iteration);
951
952 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
953 Register a, Register b, Register c, Register d, Register e, Register f,
954 Register g, Register h, int iteration);
955
956 void addmq(int disp, Register r1, Register r2);
957 public:
958 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
959 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
960 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
961 XMMRegister shuf_mask);
962 private:
963 void roundEnc(XMMRegister key, int rnum);
964 void lastroundEnc(XMMRegister key, int rnum);
965 void roundDec(XMMRegister key, int rnum);
966 void lastroundDec(XMMRegister key, int rnum);
967 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask);
968 void ev_add128(XMMRegister xmmdst, XMMRegister xmmsrc1, XMMRegister xmmsrc2,
969 int vector_len, KRegister ktmp, Register rscratch = noreg);
970
971 public:
972 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
973 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
974 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
975 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start);
976
977 #endif
978
979 void fast_md5(Register buf, Address state, Address ofs, Address limit,
980 bool multi_block);
981
982 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
983 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
984 Register buf, Register state, Register ofs, Register limit, Register rsp,
985 bool multi_block);
986
987 #ifdef _LP64
988 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
989 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
990 Register buf, Register state, Register ofs, Register limit, Register rsp,
991 bool multi_block, XMMRegister shuf_mask);
992 #else
993 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
994 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
995 Register buf, Register state, Register ofs, Register limit, Register rsp,
996 bool multi_block);
997 #endif
998
999 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1000 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1001 Register rax, Register rcx, Register rdx, Register tmp);
1002
1003 #ifdef _LP64
1004 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1005 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1006 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1007
1008 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1009 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1010 Register rax, Register rcx, Register rdx, Register r11);
1011
1012 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1013 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1014 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1015
1016 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1017 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1018 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1019 Register tmp3, Register tmp4);
1020
1021 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1022 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1023 Register rax, Register rcx, Register rdx, Register tmp1,
1024 Register tmp2, Register tmp3, Register tmp4);
1025 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1026 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1027 Register rax, Register rcx, Register rdx, Register tmp1,
1028 Register tmp2, Register tmp3, Register tmp4);
1029 #else
1030 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1031 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1032 Register rax, Register rcx, Register rdx, Register tmp1);
1033
1034 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1035 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1036 Register rax, Register rcx, Register rdx, Register tmp);
1037
1038 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1039 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1040 Register rdx, Register tmp);
1041
1042 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1043 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1044 Register rax, Register rbx, Register rdx);
1045
1046 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1047 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1048 Register rax, Register rcx, Register rdx, Register tmp);
1049
1050 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1051 Register edx, Register ebx, Register esi, Register edi,
1052 Register ebp, Register esp);
1053
1054 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1055 Register esi, Register edi, Register ebp, Register esp);
1056
1057 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1058 Register edx, Register ebx, Register esi, Register edi,
1059 Register ebp, Register esp);
1060
1061 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1062 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1063 Register rax, Register rcx, Register rdx, Register tmp);
1064 #endif
1065
1066 private:
1067
1068 // these are private because users should be doing movflt/movdbl
1069
1070 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1071 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1072 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1073 void movss(XMMRegister dst, AddressLiteral src);
1074
1075 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1076 void movlpd(XMMRegister dst, AddressLiteral src);
1077
1078 public:
1079
1080 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1081 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1082 void addsd(XMMRegister dst, AddressLiteral src);
1083
1084 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1085 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1086 void addss(XMMRegister dst, AddressLiteral src);
1087
1088 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1089 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1090 void addpd(XMMRegister dst, AddressLiteral src);
1091
1092 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1093 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1094 void divsd(XMMRegister dst, AddressLiteral src);
1095
1096 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1097 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1098 void divss(XMMRegister dst, AddressLiteral src);
1099
1100 // Move Unaligned Double Quadword
1101 void movdqu(Address dst, XMMRegister src);
1102 void movdqu(XMMRegister dst, Address src);
1103 void movdqu(XMMRegister dst, XMMRegister src);
1104 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1105
1106 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1107 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1108 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1109 void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1110 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1111 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1112
1113 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1114 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1115 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1116 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1117 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1118 void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1119
1120 // Safe move operation, lowers down to 16bit moves for targets supporting
1121 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1122 void kmov(Address dst, KRegister src);
1123 void kmov(KRegister dst, Address src);
1124 void kmov(KRegister dst, KRegister src);
1125 void kmov(Register dst, KRegister src);
1126 void kmov(KRegister dst, Register src);
1127
1128 // AVX Unaligned forms
1129 void vmovdqu(Address dst, XMMRegister src);
1130 void vmovdqu(XMMRegister dst, Address src);
1131 void vmovdqu(XMMRegister dst, XMMRegister src);
1132 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1133
1134 // AVX512 Unaligned
1135 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len);
1136 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len);
1137
1138 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1139 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1140 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1141 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1142 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1143 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1144
1145 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1146 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1147 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1148 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1149 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1150
1151 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1152 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1153 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1154 if (dst->encoding() == src->encoding()) return;
1155 Assembler::evmovdqul(dst, src, vector_len);
1156 }
1157 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1158 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1159 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1160 if (dst->encoding() == src->encoding() && mask == k0) return;
1161 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1162 }
1163 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1164
1165 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1166 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1167 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1168 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1169 if (dst->encoding() == src->encoding()) return;
1170 Assembler::evmovdquq(dst, src, vector_len);
1171 }
1172 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1173 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1174 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1175 if (dst->encoding() == src->encoding() && mask == k0) return;
1176 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1177 }
1178 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1179
1180 // Move Aligned Double Quadword
1181 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1182 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1183 void movdqa(XMMRegister dst, AddressLiteral src);
1184
1185 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1186 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1187 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1188 void movsd(XMMRegister dst, AddressLiteral src);
1189
1190 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1191 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1192 void mulpd(XMMRegister dst, AddressLiteral src);
1193
1194 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1195 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1196 void mulsd(XMMRegister dst, AddressLiteral src);
1197
1198 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1199 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1200 void mulss(XMMRegister dst, AddressLiteral src);
1201
1202 // Carry-Less Multiplication Quadword
1203 void pclmulldq(XMMRegister dst, XMMRegister src) {
1204 // 0x00 - multiply lower 64 bits [0:63]
1205 Assembler::pclmulqdq(dst, src, 0x00);
1206 }
1207 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1208 // 0x11 - multiply upper 64 bits [64:127]
1209 Assembler::pclmulqdq(dst, src, 0x11);
1210 }
1211
1212 void pcmpeqb(XMMRegister dst, XMMRegister src);
1213 void pcmpeqw(XMMRegister dst, XMMRegister src);
1214
1215 void pcmpestri(XMMRegister dst, Address src, int imm8);
1216 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1217
1218 void pmovzxbw(XMMRegister dst, XMMRegister src);
1219 void pmovzxbw(XMMRegister dst, Address src);
1220
1221 void pmovmskb(Register dst, XMMRegister src);
1222
1223 void ptest(XMMRegister dst, XMMRegister src);
1224
1225 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
1226 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
1227 void sqrtsd(XMMRegister dst, AddressLiteral src);
1228
1229 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1230 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1231 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg);
1232
1233 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1234 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1235 void sqrtss(XMMRegister dst, AddressLiteral src);
1236
1237 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1238 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1239 void subsd(XMMRegister dst, AddressLiteral src);
1240
1241 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1242 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1243 void subss(XMMRegister dst, AddressLiteral src);
1244
1245 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1246 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1247 void ucomiss(XMMRegister dst, AddressLiteral src);
1248
1249 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1250 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1251 void ucomisd(XMMRegister dst, AddressLiteral src);
1252
1253 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1254 void xorpd(XMMRegister dst, XMMRegister src);
1255 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1256 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1257
1258 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1259 void xorps(XMMRegister dst, XMMRegister src);
1260 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1261 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1262
1263 // Shuffle Bytes
1264 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1265 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1266 void pshufb(XMMRegister dst, AddressLiteral src);
1267 // AVX 3-operands instructions
1268
1269 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1270 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1271 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1272
1273 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1274 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1275 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1276
1277 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1278 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1279
1280 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1281 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1282 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1283
1284 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1285 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1286
1287 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1288 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1289 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1290
1291 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1292 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1293 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1294
1295 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1296 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1297
1298 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1299
1300 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1301 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1302
1303 // Vector compares
1304 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1305 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1306 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1307 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1308 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1309 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1310 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1311 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1312 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1313 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1314 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1315 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1316 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1317 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1318 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1319 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1320
1321
1322 // Emit comparison instruction for the specified comparison predicate.
1323 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg);
1324 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1325
1326 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1327 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1328
1329 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1330
1331 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1332 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1333 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1334 Assembler::vpmulld(dst, nds, src, vector_len);
1335 };
1336 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1337 Assembler::vpmulld(dst, nds, src, vector_len);
1338 }
1339 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1340
1341 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1342 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1343
1344 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1345 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1346
1347 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1348 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1349
1350 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1351 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1352
1353 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1354 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1355
1356 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1357 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1358
1359 void vptest(XMMRegister dst, XMMRegister src);
1360 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1361
1362 void punpcklbw(XMMRegister dst, XMMRegister src);
1363 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1364
1365 void pshufd(XMMRegister dst, Address src, int mode);
1366 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1367
1368 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1369 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1370
1371 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1372 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1373 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1374
1375 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1376 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1377 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1378
1379 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1380
1381 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1382 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1383 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1384
1385 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1386 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1387 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1388
1389 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1390 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1391 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1392
1393 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1394 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1395 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1396
1397 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1398 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1399 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1400
1401 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1402 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1403 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1404
1405 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1406 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1407
1408 // AVX Vector instructions
1409
1410 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1411 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1412 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1413
1414 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1415 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1416 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1417
1418 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1419 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1420 Assembler::vpxor(dst, nds, src, vector_len);
1421 else
1422 Assembler::vxorpd(dst, nds, src, vector_len);
1423 }
1424 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1425 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1426 Assembler::vpxor(dst, nds, src, vector_len);
1427 else
1428 Assembler::vxorpd(dst, nds, src, vector_len);
1429 }
1430 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1431
1432 // Simple version for AVX2 256bit vectors
1433 void vpxor(XMMRegister dst, XMMRegister src) {
1434 assert(UseAVX >= 2, "Should be at least AVX2");
1435 Assembler::vpxor(dst, dst, src, AVX_256bit);
1436 }
1437 void vpxor(XMMRegister dst, Address src) {
1438 assert(UseAVX >= 2, "Should be at least AVX2");
1439 Assembler::vpxor(dst, dst, src, AVX_256bit);
1440 }
1441
1442 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1443 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1444
1445 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1446 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1447 Assembler::vinserti32x4(dst, nds, src, imm8);
1448 } else if (UseAVX > 1) {
1449 // vinserti128 is available only in AVX2
1450 Assembler::vinserti128(dst, nds, src, imm8);
1451 } else {
1452 Assembler::vinsertf128(dst, nds, src, imm8);
1453 }
1454 }
1455
1456 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1457 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1458 Assembler::vinserti32x4(dst, nds, src, imm8);
1459 } else if (UseAVX > 1) {
1460 // vinserti128 is available only in AVX2
1461 Assembler::vinserti128(dst, nds, src, imm8);
1462 } else {
1463 Assembler::vinsertf128(dst, nds, src, imm8);
1464 }
1465 }
1466
1467 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1468 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1469 Assembler::vextracti32x4(dst, src, imm8);
1470 } else if (UseAVX > 1) {
1471 // vextracti128 is available only in AVX2
1472 Assembler::vextracti128(dst, src, imm8);
1473 } else {
1474 Assembler::vextractf128(dst, src, imm8);
1475 }
1476 }
1477
1478 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1479 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1480 Assembler::vextracti32x4(dst, src, imm8);
1481 } else if (UseAVX > 1) {
1482 // vextracti128 is available only in AVX2
1483 Assembler::vextracti128(dst, src, imm8);
1484 } else {
1485 Assembler::vextractf128(dst, src, imm8);
1486 }
1487 }
1488
1489 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1490 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1491 vinserti128(dst, dst, src, 1);
1492 }
1493 void vinserti128_high(XMMRegister dst, Address src) {
1494 vinserti128(dst, dst, src, 1);
1495 }
1496 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1497 vextracti128(dst, src, 1);
1498 }
1499 void vextracti128_high(Address dst, XMMRegister src) {
1500 vextracti128(dst, src, 1);
1501 }
1502
1503 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1504 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1505 Assembler::vinsertf32x4(dst, dst, src, 1);
1506 } else {
1507 Assembler::vinsertf128(dst, dst, src, 1);
1508 }
1509 }
1510
1511 void vinsertf128_high(XMMRegister dst, Address src) {
1512 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1513 Assembler::vinsertf32x4(dst, dst, src, 1);
1514 } else {
1515 Assembler::vinsertf128(dst, dst, src, 1);
1516 }
1517 }
1518
1519 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1520 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1521 Assembler::vextractf32x4(dst, src, 1);
1522 } else {
1523 Assembler::vextractf128(dst, src, 1);
1524 }
1525 }
1526
1527 void vextractf128_high(Address dst, XMMRegister src) {
1528 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1529 Assembler::vextractf32x4(dst, src, 1);
1530 } else {
1531 Assembler::vextractf128(dst, src, 1);
1532 }
1533 }
1534
1535 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1536 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1537 Assembler::vinserti64x4(dst, dst, src, 1);
1538 }
1539 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1540 Assembler::vinsertf64x4(dst, dst, src, 1);
1541 }
1542 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1543 Assembler::vextracti64x4(dst, src, 1);
1544 }
1545 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1546 Assembler::vextractf64x4(dst, src, 1);
1547 }
1548 void vextractf64x4_high(Address dst, XMMRegister src) {
1549 Assembler::vextractf64x4(dst, src, 1);
1550 }
1551 void vinsertf64x4_high(XMMRegister dst, Address src) {
1552 Assembler::vinsertf64x4(dst, dst, src, 1);
1553 }
1554
1555 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1556 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1557 vinserti128(dst, dst, src, 0);
1558 }
1559 void vinserti128_low(XMMRegister dst, Address src) {
1560 vinserti128(dst, dst, src, 0);
1561 }
1562 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1563 vextracti128(dst, src, 0);
1564 }
1565 void vextracti128_low(Address dst, XMMRegister src) {
1566 vextracti128(dst, src, 0);
1567 }
1568
1569 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1570 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1571 Assembler::vinsertf32x4(dst, dst, src, 0);
1572 } else {
1573 Assembler::vinsertf128(dst, dst, src, 0);
1574 }
1575 }
1576
1577 void vinsertf128_low(XMMRegister dst, Address src) {
1578 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1579 Assembler::vinsertf32x4(dst, dst, src, 0);
1580 } else {
1581 Assembler::vinsertf128(dst, dst, src, 0);
1582 }
1583 }
1584
1585 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1586 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1587 Assembler::vextractf32x4(dst, src, 0);
1588 } else {
1589 Assembler::vextractf128(dst, src, 0);
1590 }
1591 }
1592
1593 void vextractf128_low(Address dst, XMMRegister src) {
1594 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1595 Assembler::vextractf32x4(dst, src, 0);
1596 } else {
1597 Assembler::vextractf128(dst, src, 0);
1598 }
1599 }
1600
1601 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1602 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1603 Assembler::vinserti64x4(dst, dst, src, 0);
1604 }
1605 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1606 Assembler::vinsertf64x4(dst, dst, src, 0);
1607 }
1608 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1609 Assembler::vextracti64x4(dst, src, 0);
1610 }
1611 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1612 Assembler::vextractf64x4(dst, src, 0);
1613 }
1614 void vextractf64x4_low(Address dst, XMMRegister src) {
1615 Assembler::vextractf64x4(dst, src, 0);
1616 }
1617 void vinsertf64x4_low(XMMRegister dst, Address src) {
1618 Assembler::vinsertf64x4(dst, dst, src, 0);
1619 }
1620
1621 // Carry-Less Multiplication Quadword
1622 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1623 // 0x00 - multiply lower 64 bits [0:63]
1624 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1625 }
1626 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1627 // 0x11 - multiply upper 64 bits [64:127]
1628 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1629 }
1630 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1631 // 0x10 - multiply nds[0:63] and src[64:127]
1632 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1633 }
1634 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1635 //0x01 - multiply nds[64:127] and src[0:63]
1636 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1637 }
1638
1639 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1640 // 0x00 - multiply lower 64 bits [0:63]
1641 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1642 }
1643 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1644 // 0x11 - multiply upper 64 bits [64:127]
1645 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1646 }
1647
1648 // Data
1649
1650 void cmov32( Condition cc, Register dst, Address src);
1651 void cmov32( Condition cc, Register dst, Register src);
1652
1653 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1654
1655 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1656 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1657
1658 void movoop(Register dst, jobject obj);
1659 void movoop(Address dst, jobject obj);
1660
1661 void mov_metadata(Register dst, Metadata* obj);
1662 void mov_metadata(Address dst, Metadata* obj);
1663
1664 void movptr(ArrayAddress dst, Register src);
1665 // can this do an lea?
1666 void movptr(Register dst, ArrayAddress src);
1667
1668 void movptr(Register dst, Address src);
1669
1670 #ifdef _LP64
1671 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1672 #else
1673 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1674 #endif
1675
1676 void movptr(Register dst, intptr_t src);
1677 void movptr(Register dst, Register src);
1678 void movptr(Address dst, intptr_t src);
1679
1680 void movptr(Address dst, Register src);
1681
1682 void movptr(Register dst, RegisterOrConstant src) {
1683 if (src.is_constant()) movptr(dst, src.as_constant());
1684 else movptr(dst, src.as_register());
1685 }
1686
1687 #ifdef _LP64
1688 // Generally the next two are only used for moving NULL
1689 // Although there are situations in initializing the mark word where
1690 // they could be used. They are dangerous.
1691
1692 // They only exist on LP64 so that int32_t and intptr_t are not the same
1693 // and we have ambiguous declarations.
1694
1695 void movptr(Address dst, int32_t imm32);
1696 void movptr(Register dst, int32_t imm32);
1697 #endif // _LP64
1698
1699 // to avoid hiding movl
1700 void mov32(AddressLiteral dst, Register src);
1701 void mov32(Register dst, AddressLiteral src);
1702
1703 // to avoid hiding movb
1704 void movbyte(ArrayAddress dst, int src);
1705
1706 // Import other mov() methods from the parent class or else
1707 // they will be hidden by the following overriding declaration.
1708 using Assembler::movdl;
1709 using Assembler::movq;
1710 void movdl(XMMRegister dst, AddressLiteral src);
1711 void movq(XMMRegister dst, AddressLiteral src);
1712
1713 // Can push value or effective address
1714 void pushptr(AddressLiteral src);
1715
1716 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1717 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1718
1719 void pushoop(jobject obj);
1720 void pushklass(Metadata* obj);
1721
1722 // sign extend as need a l to ptr sized element
1723 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1724 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1725
1726
1727 public:
1728 // C2 compiled method's prolog code.
1729 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub);
1730
1731 // clear memory of size 'cnt' qwords, starting at 'base';
1732 // if 'is_large' is set, do not try to produce short loop
1733 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1734
1735 // clear memory initialization sequence for constant size;
1736 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1737
1738 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1739 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1740
1741 // Fill primitive arrays
1742 void generate_fill(BasicType t, bool aligned,
1743 Register to, Register value, Register count,
1744 Register rtmp, XMMRegister xtmp);
1745
1746 void encode_iso_array(Register src, Register dst, Register len,
1747 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1748 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1749
1750 #ifdef _LP64
1751 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1752 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1753 Register y, Register y_idx, Register z,
1754 Register carry, Register product,
1755 Register idx, Register kdx);
1756 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1757 Register yz_idx, Register idx,
1758 Register carry, Register product, int offset);
1759 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1760 Register carry, Register carry2,
1761 Register idx, Register jdx,
1762 Register yz_idx1, Register yz_idx2,
1763 Register tmp, Register tmp3, Register tmp4);
1764 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1765 Register yz_idx, Register idx, Register jdx,
1766 Register carry, Register product,
1767 Register carry2);
1768 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1769 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1770 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1771 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1772 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1773 Register tmp2);
1774 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1775 Register rdxReg, Register raxReg);
1776 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1777 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1778 Register tmp3, Register tmp4);
1779 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1780 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1781
1782 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1783 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1784 Register raxReg);
1785 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1786 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1787 Register raxReg);
1788 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1789 Register result, Register tmp1, Register tmp2,
1790 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1791 #endif
1792
1793 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1794 void update_byte_crc32(Register crc, Register val, Register table);
1795 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1796
1797
1798 #ifdef _LP64
1799 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1800 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1801 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1802 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1803 void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale);
1804 #endif // _LP64
1805
1806 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1807 // Note on a naming convention:
1808 // Prefix w = register only used on a Westmere+ architecture
1809 // Prefix n = register only used on a Nehalem architecture
1810 #ifdef _LP64
1811 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1812 Register tmp1, Register tmp2, Register tmp3);
1813 #else
1814 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1815 Register tmp1, Register tmp2, Register tmp3,
1816 XMMRegister xtmp1, XMMRegister xtmp2);
1817 #endif
1818 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1819 Register in_out,
1820 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1821 XMMRegister w_xtmp2,
1822 Register tmp1,
1823 Register n_tmp2, Register n_tmp3);
1824 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1825 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1826 Register tmp1, Register tmp2,
1827 Register n_tmp3);
1828 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1829 Register in_out1, Register in_out2, Register in_out3,
1830 Register tmp1, Register tmp2, Register tmp3,
1831 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1832 Register tmp4, Register tmp5,
1833 Register n_tmp6);
1834 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1835 Register tmp1, Register tmp2, Register tmp3,
1836 Register tmp4, Register tmp5, Register tmp6,
1837 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1838 bool is_pclmulqdq_supported);
1839 // Fold 128-bit data chunk
1840 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1841 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1842 #ifdef _LP64
1843 // Fold 512-bit data chunk
1844 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1845 #endif // _LP64
1846 // Fold 8-bit data
1847 void fold_8bit_crc32(Register crc, Register table, Register tmp);
1848 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1849
1850 // Compress char[] array to byte[].
1851 void char_array_compress(Register src, Register dst, Register len,
1852 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1853 XMMRegister tmp4, Register tmp5, Register result,
1854 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1855
1856 // Inflate byte[] array to char[].
1857 void byte_array_inflate(Register src, Register dst, Register len,
1858 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
1859
1860 void fill64_masked_avx(uint shift, Register dst, int disp,
1861 XMMRegister xmm, KRegister mask, Register length,
1862 Register temp, bool use64byteVector = false);
1863
1864 void fill32_masked_avx(uint shift, Register dst, int disp,
1865 XMMRegister xmm, KRegister mask, Register length,
1866 Register temp);
1867
1868 void fill32(Address dst, XMMRegister xmm);
1869
1870 void fill32_avx(Register dst, int disp, XMMRegister xmm);
1871
1872 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
1873
1874 void fill64_avx(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
1875
1876 #ifdef _LP64
1877 void convert_f2i(Register dst, XMMRegister src);
1878 void convert_d2i(Register dst, XMMRegister src);
1879 void convert_f2l(Register dst, XMMRegister src);
1880 void convert_d2l(Register dst, XMMRegister src);
1881
1882 void cache_wb(Address line);
1883 void cache_wbsync(bool is_pre);
1884
1885 #if COMPILER2_OR_JVMCI
1886 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from,
1887 Register to, Register count, int shift,
1888 Register index, Register temp,
1889 bool use64byteVector, Label& L_entry, Label& L_exit);
1890
1891 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from,
1892 Register to, Register start_index, Register end_index,
1893 Register count, int shift, Register temp,
1894 bool use64byteVector, Label& L_entry, Label& L_exit);
1895
1896 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm,
1897 KRegister mask, Register length, Register index,
1898 Register temp, int shift = Address::times_1, int offset = 0,
1899 bool use64byteVector = false);
1900
1901 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm,
1902 KRegister mask, Register length, Register index,
1903 Register temp, int shift = Address::times_1, int offset = 0);
1904
1905 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm,
1906 int shift = Address::times_1, int offset = 0);
1907
1908 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm,
1909 bool conjoint, int shift = Address::times_1, int offset = 0,
1910 bool use64byteVector = false);
1911 #endif // COMPILER2_OR_JVMCI
1912
1913 #endif // _LP64
1914
1915 void vallones(XMMRegister dst, int vector_len);
1916 };
1917
1918 /**
1919 * class SkipIfEqual:
1920 *
1921 * Instantiating this class will result in assembly code being output that will
1922 * jump around any code emitted between the creation of the instance and it's
1923 * automatic destruction at the end of a scope block, depending on the value of
1924 * the flag passed to the constructor, which will be checked at run-time.
1925 */
1926 class SkipIfEqual {
1927 private:
1928 MacroAssembler* _masm;
1929 Label _label;
1930
1931 public:
1932 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1933 ~SkipIfEqual();
1934 };
1935
1936 #endif // CPU_X86_MACROASSEMBLER_X86_HPP