1 /*
2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
26 #define CPU_X86_MACROASSEMBLER_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "code/vmreg.inline.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "utilities/macros.hpp"
32 #include "runtime/rtmLocking.hpp"
33 #include "runtime/vm_version.hpp"
34
35 // MacroAssembler extends Assembler by frequently used macros.
36 //
37 // Instructions for which a 'better' code sequence exists depending
38 // on arguments should also go in here.
39
40 class MacroAssembler: public Assembler {
41 friend class LIR_Assembler;
42 friend class Runtime1; // as_Address()
43
44 public:
45 // Support for VM calls
46 //
47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
48 // may customize this version by overriding it for its purposes (e.g., to save/restore
49 // additional registers when doing a VM call).
50
51 virtual void call_VM_leaf_base(
52 address entry_point, // the entry point
53 int number_of_arguments // the number of arguments to pop after the call
54 );
55
56 protected:
57 // This is the base routine called by the different versions of call_VM. The interpreter
58 // may customize this version by overriding it for its purposes (e.g., to save/restore
59 // additional registers when doing a VM call).
60 //
61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
62 // returns the register which contains the thread upon return. If a thread register has been
63 // specified, the return value will correspond to that register. If no last_java_sp is specified
64 // (noreg) than rsp will be used instead.
65 virtual void call_VM_base( // returns the register containing the thread upon return
66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
67 Register java_thread, // the thread if computed before ; use noreg otherwise
68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
69 address entry_point, // the entry point
70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
71 bool check_exceptions // whether to check for pending exceptions after return
72 );
73
74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
75
76 // helpers for FPU flag access
77 // tmp is a temporary register, if none is available use noreg
78 void save_rax (Register tmp);
79 void restore_rax(Register tmp);
80
81 public:
82 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
83
84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
85 // The implementation is only non-empty for the InterpreterMacroAssembler,
86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
87 virtual void check_and_handle_popframe(Register java_thread);
88 virtual void check_and_handle_earlyret(Register java_thread);
89
90 Address as_Address(AddressLiteral adr);
91 Address as_Address(ArrayAddress adr);
92
93 // Support for NULL-checks
94 //
95 // Generates code that causes a NULL OS exception if the content of reg is NULL.
96 // If the accessed location is M[reg + offset] and the offset is known, provide the
97 // offset. No explicit code generation is needed if the offset is within a certain
98 // range (0 <= offset <= page_size).
99
100 void null_check(Register reg, int offset = -1);
101 static bool needs_explicit_null_check(intptr_t offset);
102 static bool uses_implicit_null_check(void* address);
103
104 // Required platform-specific helpers for Label::patch_instructions.
105 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
106 void pd_patch_instruction(address branch, address target, const char* file, int line) {
107 unsigned char op = branch[0];
108 assert(op == 0xE8 /* call */ ||
109 op == 0xE9 /* jmp */ ||
110 op == 0xEB /* short jmp */ ||
111 (op & 0xF0) == 0x70 /* short jcc */ ||
112 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
113 op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
114 "Invalid opcode at patch point");
115
116 if (op == 0xEB || (op & 0xF0) == 0x70) {
117 // short offset operators (jmp and jcc)
118 char* disp = (char*) &branch[1];
119 int imm8 = target - (address) &disp[1];
120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
121 file == NULL ? "<NULL>" : file, line);
122 *disp = imm8;
123 } else {
124 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
125 int imm32 = target - (address) &disp[1];
126 *disp = imm32;
127 }
128 }
129
130 // The following 4 methods return the offset of the appropriate move instruction
131
132 // Support for fast byte/short loading with zero extension (depending on particular CPU)
133 int load_unsigned_byte(Register dst, Address src);
134 int load_unsigned_short(Register dst, Address src);
135
136 // Support for fast byte/short loading with sign extension (depending on particular CPU)
137 int load_signed_byte(Register dst, Address src);
138 int load_signed_short(Register dst, Address src);
139
140 // Support for sign-extension (hi:lo = extend_sign(lo))
141 void extend_sign(Register hi, Register lo);
142
143 // Load and store values by size and signed-ness
144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
146
147 // Support for inc/dec with optimal instruction selection depending on value
148
149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
151 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; }
152 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; }
153
154 void decrementl(Address dst, int value = 1);
155 void decrementl(Register reg, int value = 1);
156
157 void decrementq(Register reg, int value = 1);
158 void decrementq(Address dst, int value = 1);
159
160 void incrementl(Address dst, int value = 1);
161 void incrementl(Register reg, int value = 1);
162
163 void incrementq(Register reg, int value = 1);
164 void incrementq(Address dst, int value = 1);
165
166 // Support optimal SSE move instructions.
167 void movflt(XMMRegister dst, XMMRegister src) {
168 if (dst-> encoding() == src->encoding()) return;
169 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
170 else { movss (dst, src); return; }
171 }
172 void movflt(XMMRegister dst, Address src) { movss(dst, src); }
173 void movflt(XMMRegister dst, AddressLiteral src);
174 void movflt(Address dst, XMMRegister src) { movss(dst, src); }
175
176 // Move with zero extension
177 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); }
178
179 void movdbl(XMMRegister dst, XMMRegister src) {
180 if (dst-> encoding() == src->encoding()) return;
181 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
182 else { movsd (dst, src); return; }
183 }
184
185 void movdbl(XMMRegister dst, AddressLiteral src);
186
187 void movdbl(XMMRegister dst, Address src) {
188 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
189 else { movlpd(dst, src); return; }
190 }
191 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
192
193 void incrementl(AddressLiteral dst);
194 void incrementl(ArrayAddress dst);
195
196 void incrementq(AddressLiteral dst);
197
198 // Alignment
199 void align32();
200 void align64();
201 void align(int modulus);
202 void align(int modulus, int target);
203
204 // A 5 byte nop that is safe for patching (see patch_verified_entry)
205 void fat_nop();
206
207 // Stack frame creation/removal
208 void enter();
209 void leave();
210
211 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
212 // The pointer will be loaded into the thread register.
213 void get_thread(Register thread);
214
215 #ifdef _LP64
216 // Support for argument shuffling
217
218 void move32_64(VMRegPair src, VMRegPair dst);
219 void long_move(VMRegPair src, VMRegPair dst);
220 void float_move(VMRegPair src, VMRegPair dst);
221 void double_move(VMRegPair src, VMRegPair dst);
222 void move_ptr(VMRegPair src, VMRegPair dst);
223 void object_move(OopMap* map,
224 int oop_handle_offset,
225 int framesize_in_slots,
226 VMRegPair src,
227 VMRegPair dst,
228 bool is_receiver,
229 int* receiver_offset);
230 #endif // _LP64
231
232 // Support for VM calls
233 //
234 // It is imperative that all calls into the VM are handled via the call_VM macros.
235 // They make sure that the stack linkage is setup correctly. call_VM's correspond
236 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
237
238
239 void call_VM(Register oop_result,
240 address entry_point,
241 bool check_exceptions = true);
242 void call_VM(Register oop_result,
243 address entry_point,
244 Register arg_1,
245 bool check_exceptions = true);
246 void call_VM(Register oop_result,
247 address entry_point,
248 Register arg_1, Register arg_2,
249 bool check_exceptions = true);
250 void call_VM(Register oop_result,
251 address entry_point,
252 Register arg_1, Register arg_2, Register arg_3,
253 bool check_exceptions = true);
254
255 // Overloadings with last_Java_sp
256 void call_VM(Register oop_result,
257 Register last_java_sp,
258 address entry_point,
259 int number_of_arguments = 0,
260 bool check_exceptions = true);
261 void call_VM(Register oop_result,
262 Register last_java_sp,
263 address entry_point,
264 Register arg_1, bool
265 check_exceptions = true);
266 void call_VM(Register oop_result,
267 Register last_java_sp,
268 address entry_point,
269 Register arg_1, Register arg_2,
270 bool check_exceptions = true);
271 void call_VM(Register oop_result,
272 Register last_java_sp,
273 address entry_point,
274 Register arg_1, Register arg_2, Register arg_3,
275 bool check_exceptions = true);
276
277 void get_vm_result (Register oop_result, Register thread);
278 void get_vm_result_2(Register metadata_result, Register thread);
279
280 // These always tightly bind to MacroAssembler::call_VM_base
281 // bypassing the virtual implementation
282 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
283 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
284 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
285 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
286 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
287
288 void call_VM_leaf0(address entry_point);
289 void call_VM_leaf(address entry_point,
290 int number_of_arguments = 0);
291 void call_VM_leaf(address entry_point,
292 Register arg_1);
293 void call_VM_leaf(address entry_point,
294 Register arg_1, Register arg_2);
295 void call_VM_leaf(address entry_point,
296 Register arg_1, Register arg_2, Register arg_3);
297
298 // These always tightly bind to MacroAssembler::call_VM_leaf_base
299 // bypassing the virtual implementation
300 void super_call_VM_leaf(address entry_point);
301 void super_call_VM_leaf(address entry_point, Register arg_1);
302 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
303 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
304 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
305
306 // last Java Frame (fills frame anchor)
307 void set_last_Java_frame(Register thread,
308 Register last_java_sp,
309 Register last_java_fp,
310 address last_java_pc);
311
312 // thread in the default location (r15_thread on 64bit)
313 void set_last_Java_frame(Register last_java_sp,
314 Register last_java_fp,
315 address last_java_pc);
316
317 void reset_last_Java_frame(Register thread, bool clear_fp);
318
319 // thread in the default location (r15_thread on 64bit)
320 void reset_last_Java_frame(bool clear_fp);
321
322 // jobjects
323 void clear_jweak_tag(Register possibly_jweak);
324 void resolve_jobject(Register value, Register thread, Register tmp);
325
326 // C 'boolean' to Java boolean: x == 0 ? 0 : 1
327 void c2bool(Register x);
328
329 // C++ bool manipulation
330
331 void movbool(Register dst, Address src);
332 void movbool(Address dst, bool boolconst);
333 void movbool(Address dst, Register src);
334 void testbool(Register dst);
335
336 void resolve_oop_handle(Register result, Register tmp = rscratch2);
337 void resolve_weak_handle(Register result, Register tmp);
338 void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
339 void load_method_holder_cld(Register rresult, Register rmethod);
340
341 void load_method_holder(Register holder, Register method);
342
343 // oop manipulations
344 void load_klass(Register dst, Register src, Register tmp, bool null_check_src = false);
345 #ifdef _LP64
346 void load_nklass(Register dst, Register src);
347 #endif
348 void store_klass(Register dst, Register src, Register tmp);
349
350 // Compares the Klass pointer of an object to a given Klass (which might be narrow,
351 // depending on UseCompressedClassPointers).
352 void cmp_klass(Register klass, Register dst, Register tmp);
353
354 // Compares the Klass pointer of two objects o1 and o2. Result is in the condition flags.
355 // Uses t1 and t2 as temporary registers.
356 void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2);
357
358 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
359 Register tmp1, Register thread_tmp);
360 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
361 Register tmp1, Register tmp2);
362
363 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
364 Register thread_tmp = noreg, DecoratorSet decorators = 0);
365 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
366 Register thread_tmp = noreg, DecoratorSet decorators = 0);
367 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
368 Register tmp2 = noreg, DecoratorSet decorators = 0);
369
370 // Used for storing NULL. All other oop constants should be
371 // stored using routines that take a jobject.
372 void store_heap_oop_null(Address dst);
373
374 void load_prototype_header(Register dst, Register src, Register tmp);
375
376 #ifdef _LP64
377 void store_klass_gap(Register dst, Register src);
378
379 // This dummy is to prevent a call to store_heap_oop from
380 // converting a zero (like NULL) into a Register by giving
381 // the compiler two choices it can't resolve
382
383 void store_heap_oop(Address dst, void* dummy);
384
385 void encode_heap_oop(Register r);
386 void decode_heap_oop(Register r);
387 void encode_heap_oop_not_null(Register r);
388 void decode_heap_oop_not_null(Register r);
389 void encode_heap_oop_not_null(Register dst, Register src);
390 void decode_heap_oop_not_null(Register dst, Register src);
391
392 void set_narrow_oop(Register dst, jobject obj);
393 void set_narrow_oop(Address dst, jobject obj);
394 void cmp_narrow_oop(Register dst, jobject obj);
395 void cmp_narrow_oop(Address dst, jobject obj);
396
397 void encode_klass_not_null(Register r, Register tmp);
398 void decode_klass_not_null(Register r, Register tmp);
399 void encode_and_move_klass_not_null(Register dst, Register src);
400 void decode_and_move_klass_not_null(Register dst, Register src);
401 void set_narrow_klass(Register dst, Klass* k);
402 void set_narrow_klass(Address dst, Klass* k);
403 void cmp_narrow_klass(Register dst, Klass* k);
404 void cmp_narrow_klass(Address dst, Klass* k);
405
406 // if heap base register is used - reinit it with the correct value
407 void reinit_heapbase();
408
409 DEBUG_ONLY(void verify_heapbase(const char* msg);)
410
411 #endif // _LP64
412
413 // Int division/remainder for Java
414 // (as idivl, but checks for special case as described in JVM spec.)
415 // returns idivl instruction offset for implicit exception handling
416 int corrected_idivl(Register reg);
417
418 // Long division/remainder for Java
419 // (as idivq, but checks for special case as described in JVM spec.)
420 // returns idivq instruction offset for implicit exception handling
421 int corrected_idivq(Register reg);
422
423 void int3();
424
425 // Long operation macros for a 32bit cpu
426 // Long negation for Java
427 void lneg(Register hi, Register lo);
428
429 // Long multiplication for Java
430 // (destroys contents of eax, ebx, ecx and edx)
431 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
432
433 // Long shifts for Java
434 // (semantics as described in JVM spec.)
435 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
436 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
437
438 // Long compare for Java
439 // (semantics as described in JVM spec.)
440 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
441
442
443 // misc
444
445 // Sign extension
446 void sign_extend_short(Register reg);
447 void sign_extend_byte(Register reg);
448
449 // Division by power of 2, rounding towards 0
450 void division_with_shift(Register reg, int shift_value);
451
452 #ifndef _LP64
453 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
454 //
455 // CF (corresponds to C0) if x < y
456 // PF (corresponds to C2) if unordered
457 // ZF (corresponds to C3) if x = y
458 //
459 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
460 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
461 void fcmp(Register tmp);
462 // Variant of the above which allows y to be further down the stack
463 // and which only pops x and y if specified. If pop_right is
464 // specified then pop_left must also be specified.
465 void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
466
467 // Floating-point comparison for Java
468 // Compares the top-most stack entries on the FPU stack and stores the result in dst.
469 // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
470 // (semantics as described in JVM spec.)
471 void fcmp2int(Register dst, bool unordered_is_less);
472 // Variant of the above which allows y to be further down the stack
473 // and which only pops x and y if specified. If pop_right is
474 // specified then pop_left must also be specified.
475 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
476
477 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
478 // tmp is a temporary register, if none is available use noreg
479 void fremr(Register tmp);
480
481 // only if +VerifyFPU
482 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
483 #endif // !LP64
484
485 // dst = c = a * b + c
486 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
487 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
488
489 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
490 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len);
491 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
492 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
493
494
495 // same as fcmp2int, but using SSE2
496 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
497 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
498
499 // branch to L if FPU flag C2 is set/not set
500 // tmp is a temporary register, if none is available use noreg
501 void jC2 (Register tmp, Label& L);
502 void jnC2(Register tmp, Label& L);
503
504 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into
505 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
506 void load_float(Address src);
507
508 // Store float value to 'address'. If UseSSE >= 1, the value is stored
509 // from register xmm0. Otherwise, the value is stored from the FPU stack.
510 void store_float(Address dst);
511
512 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into
513 // register xmm0. Otherwise, the value is loaded onto the FPU stack.
514 void load_double(Address src);
515
516 // Store double value to 'address'. If UseSSE >= 2, the value is stored
517 // from register xmm0. Otherwise, the value is stored from the FPU stack.
518 void store_double(Address dst);
519
520 #ifndef _LP64
521 // Pop ST (ffree & fincstp combined)
522 void fpop();
523
524 void empty_FPU_stack();
525 #endif // !_LP64
526
527 void push_IU_state();
528 void pop_IU_state();
529
530 void push_FPU_state();
531 void pop_FPU_state();
532
533 void push_CPU_state();
534 void pop_CPU_state();
535
536 // Round up to a power of two
537 void round_to(Register reg, int modulus);
538
539 // Callee saved registers handling
540 void push_callee_saved_registers();
541 void pop_callee_saved_registers();
542
543 // allocation
544 void eden_allocate(
545 Register thread, // Current thread
546 Register obj, // result: pointer to object after successful allocation
547 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
548 int con_size_in_bytes, // object size in bytes if known at compile time
549 Register t1, // temp register
550 Label& slow_case // continuation point if fast allocation fails
551 );
552 void tlab_allocate(
553 Register thread, // Current thread
554 Register obj, // result: pointer to object after successful allocation
555 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
556 int con_size_in_bytes, // object size in bytes if known at compile time
557 Register t1, // temp register
558 Register t2, // temp register
559 Label& slow_case // continuation point if fast allocation fails
560 );
561 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
562
563 // interface method calling
564 void lookup_interface_method(Register recv_klass,
565 Register intf_klass,
566 RegisterOrConstant itable_index,
567 Register method_result,
568 Register scan_temp,
569 Label& no_such_interface,
570 bool return_method = true);
571
572 // virtual method calling
573 void lookup_virtual_method(Register recv_klass,
574 RegisterOrConstant vtable_index,
575 Register method_result);
576
577 // Test sub_klass against super_klass, with fast and slow paths.
578
579 // The fast path produces a tri-state answer: yes / no / maybe-slow.
580 // One of the three labels can be NULL, meaning take the fall-through.
581 // If super_check_offset is -1, the value is loaded up from super_klass.
582 // No registers are killed, except temp_reg.
583 void check_klass_subtype_fast_path(Register sub_klass,
584 Register super_klass,
585 Register temp_reg,
586 Label* L_success,
587 Label* L_failure,
588 Label* L_slow_path,
589 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
590
591 // The rest of the type check; must be wired to a corresponding fast path.
592 // It does not repeat the fast path logic, so don't use it standalone.
593 // The temp_reg and temp2_reg can be noreg, if no temps are available.
594 // Updates the sub's secondary super cache as necessary.
595 // If set_cond_codes, condition codes will be Z on success, NZ on failure.
596 void check_klass_subtype_slow_path(Register sub_klass,
597 Register super_klass,
598 Register temp_reg,
599 Register temp2_reg,
600 Label* L_success,
601 Label* L_failure,
602 bool set_cond_codes = false);
603
604 // Simplified, combined version, good for typical uses.
605 // Falls through on failure.
606 void check_klass_subtype(Register sub_klass,
607 Register super_klass,
608 Register temp_reg,
609 Label& L_success);
610
611 void clinit_barrier(Register klass,
612 Register thread,
613 Label* L_fast_path = NULL,
614 Label* L_slow_path = NULL);
615
616 // method handles (JSR 292)
617 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
618
619 // Debugging
620
621 // only if +VerifyOops
622 void _verify_oop(Register reg, const char* s, const char* file, int line);
623 void _verify_oop_addr(Address addr, const char* s, const char* file, int line);
624
625 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) {
626 if (VerifyOops) {
627 _verify_oop(reg, s, file, line);
628 }
629 }
630 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) {
631 if (VerifyOops) {
632 _verify_oop_addr(reg, s, file, line);
633 }
634 }
635
636 // TODO: verify method and klass metadata (compare against vptr?)
637 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
638 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
639
640 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__)
641 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__)
642 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__)
643 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
644 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
645
646 // Verify or restore cpu control state after JNI call
647 void restore_cpu_control_state_after_jni();
648
649 // prints msg, dumps registers and stops execution
650 void stop(const char* msg);
651
652 // prints msg and continues
653 void warn(const char* msg);
654
655 // dumps registers and other state
656 void print_state();
657
658 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
659 static void debug64(char* msg, int64_t pc, int64_t regs[]);
660 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
661 static void print_state64(int64_t pc, int64_t regs[]);
662
663 void os_breakpoint();
664
665 void untested() { stop("untested"); }
666
667 void unimplemented(const char* what = "");
668
669 void should_not_reach_here() { stop("should not reach here"); }
670
671 void print_CPU_state();
672
673 // Stack overflow checking
674 void bang_stack_with_offset(int offset) {
675 // stack grows down, caller passes positive offset
676 assert(offset > 0, "must bang with negative offset");
677 movl(Address(rsp, (-offset)), rax);
678 }
679
680 // Writes to stack successive pages until offset reached to check for
681 // stack overflow + shadow pages. Also, clobbers tmp
682 void bang_stack_size(Register size, Register tmp);
683
684 // Check for reserved stack access in method being exited (for JIT)
685 void reserved_stack_check();
686
687 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod);
688
689 void verify_tlab();
690
691 // Biased locking support
692 // lock_reg and obj_reg must be loaded up with the appropriate values.
693 // swap_reg must be rax, and is killed.
694 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
695 // be killed; if not supplied, push/pop will be used internally to
696 // allocate a temporary (inefficient, avoid if possible).
697 // Optional slow case is for implementations (interpreter and C1) which branch to
698 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
699 void biased_locking_enter(Register lock_reg, Register obj_reg,
700 Register swap_reg, Register tmp_reg,
701 Register tmp_reg2, bool swap_reg_contains_mark,
702 Label& done, Label* slow_case = NULL,
703 BiasedLockingCounters* counters = NULL);
704 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
705
706 Condition negate_condition(Condition cond);
707
708 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
709 // operands. In general the names are modified to avoid hiding the instruction in Assembler
710 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
711 // here in MacroAssembler. The major exception to this rule is call
712
713 // Arithmetics
714
715
716 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
717 void addptr(Address dst, Register src);
718
719 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
720 void addptr(Register dst, int32_t src);
721 void addptr(Register dst, Register src);
722 void addptr(Register dst, RegisterOrConstant src) {
723 if (src.is_constant()) addptr(dst, (int) src.as_constant());
724 else addptr(dst, src.as_register());
725 }
726
727 void andptr(Register dst, int32_t src);
728 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
729
730 void cmp8(AddressLiteral src1, int imm);
731
732 // renamed to drag out the casting of address to int32_t/intptr_t
733 void cmp32(Register src1, int32_t imm);
734
735 void cmp32(AddressLiteral src1, int32_t imm);
736 // compare reg - mem, or reg - &mem
737 void cmp32(Register src1, AddressLiteral src2);
738
739 void cmp32(Register src1, Address src2);
740
741 #ifndef _LP64
742 void cmpklass(Address dst, Metadata* obj);
743 void cmpklass(Register dst, Metadata* obj);
744 void cmpoop(Address dst, jobject obj);
745 #endif // _LP64
746
747 void cmpoop(Register src1, Register src2);
748 void cmpoop(Register src1, Address src2);
749 void cmpoop(Register dst, jobject obj);
750
751 // NOTE src2 must be the lval. This is NOT an mem-mem compare
752 void cmpptr(Address src1, AddressLiteral src2);
753
754 void cmpptr(Register src1, AddressLiteral src2);
755
756 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
757 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
758 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
759
760 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
761 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
762
763 // cmp64 to avoild hiding cmpq
764 void cmp64(Register src1, AddressLiteral src);
765
766 void cmpxchgptr(Register reg, Address adr);
767
768 void locked_cmpxchgptr(Register reg, AddressLiteral adr);
769
770
771 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
772 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
773
774
775 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
776
777 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
778
779 void shlptr(Register dst, int32_t shift);
780 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
781
782 void shrptr(Register dst, int32_t shift);
783 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
784
785 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
786 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
787
788 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
789
790 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
791 void subptr(Register dst, int32_t src);
792 // Force generation of a 4 byte immediate value even if it fits into 8bit
793 void subptr_imm32(Register dst, int32_t src);
794 void subptr(Register dst, Register src);
795 void subptr(Register dst, RegisterOrConstant src) {
796 if (src.is_constant()) subptr(dst, (int) src.as_constant());
797 else subptr(dst, src.as_register());
798 }
799
800 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
801 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
802
803 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
804 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
805
806 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
807
808
809
810 // Helper functions for statistics gathering.
811 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
812 void cond_inc32(Condition cond, AddressLiteral counter_addr);
813 // Unconditional atomic increment.
814 void atomic_incl(Address counter_addr);
815 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
816 #ifdef _LP64
817 void atomic_incq(Address counter_addr);
818 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
819 #endif
820 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
821 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
822
823 void lea(Register dst, AddressLiteral adr);
824 void lea(Address dst, AddressLiteral adr);
825 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
826
827 void leal32(Register dst, Address src) { leal(dst, src); }
828
829 // Import other testl() methods from the parent class or else
830 // they will be hidden by the following overriding declaration.
831 using Assembler::testl;
832 void testl(Register dst, AddressLiteral src);
833
834 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
835 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
836 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
837 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
838
839 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
840 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); }
841 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
842 void testptr(Register src1, Register src2);
843
844 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
845 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
846
847 // Calls
848
849 void call(Label& L, relocInfo::relocType rtype);
850 void call(Register entry);
851 void call(Address addr) { Assembler::call(addr); }
852
853 // NOTE: this call transfers to the effective address of entry NOT
854 // the address contained by entry. This is because this is more natural
855 // for jumps/calls.
856 void call(AddressLiteral entry);
857
858 // Emit the CompiledIC call idiom
859 void ic_call(address entry, jint method_index = 0);
860
861 // Jumps
862
863 // NOTE: these jumps tranfer to the effective address of dst NOT
864 // the address contained by dst. This is because this is more natural
865 // for jumps/calls.
866 void jump(AddressLiteral dst);
867 void jump_cc(Condition cc, AddressLiteral dst);
868
869 // 32bit can do a case table jump in one instruction but we no longer allow the base
870 // to be installed in the Address class. This jump will tranfers to the address
871 // contained in the location described by entry (not the address of entry)
872 void jump(ArrayAddress entry);
873
874 // Floating
875
876 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
877 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
878 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
879
880 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
881 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
882 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
883
884 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
885 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
886 void comiss(XMMRegister dst, AddressLiteral src);
887
888 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
889 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
890 void comisd(XMMRegister dst, AddressLiteral src);
891
892 #ifndef _LP64
893 void fadd_s(Address src) { Assembler::fadd_s(src); }
894 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
895
896 void fldcw(Address src) { Assembler::fldcw(src); }
897 void fldcw(AddressLiteral src);
898
899 void fld_s(int index) { Assembler::fld_s(index); }
900 void fld_s(Address src) { Assembler::fld_s(src); }
901 void fld_s(AddressLiteral src);
902
903 void fld_d(Address src) { Assembler::fld_d(src); }
904 void fld_d(AddressLiteral src);
905
906 void fmul_s(Address src) { Assembler::fmul_s(src); }
907 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
908 #endif // _LP64
909
910 void fld_x(Address src) { Assembler::fld_x(src); }
911 void fld_x(AddressLiteral src);
912
913 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
914 void ldmxcsr(AddressLiteral src);
915
916 #ifdef _LP64
917 private:
918 void sha256_AVX2_one_round_compute(
919 Register reg_old_h,
920 Register reg_a,
921 Register reg_b,
922 Register reg_c,
923 Register reg_d,
924 Register reg_e,
925 Register reg_f,
926 Register reg_g,
927 Register reg_h,
928 int iter);
929 void sha256_AVX2_four_rounds_compute_first(int start);
930 void sha256_AVX2_four_rounds_compute_last(int start);
931 void sha256_AVX2_one_round_and_sched(
932 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
933 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
934 XMMRegister xmm_2, /* ymm6 */
935 XMMRegister xmm_3, /* ymm7 */
936 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
937 Register reg_b, /* ebx */ /* full cycle is 8 iterations */
938 Register reg_c, /* edi */
939 Register reg_d, /* esi */
940 Register reg_e, /* r8d */
941 Register reg_f, /* r9d */
942 Register reg_g, /* r10d */
943 Register reg_h, /* r11d */
944 int iter);
945
946 void addm(int disp, Register r1, Register r2);
947 void gfmul(XMMRegister tmp0, XMMRegister t);
948 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0,
949 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3);
950 void generateHtbl_one_block(Register htbl);
951 void generateHtbl_eight_blocks(Register htbl);
952 public:
953 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
954 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
955 Register buf, Register state, Register ofs, Register limit, Register rsp,
956 bool multi_block, XMMRegister shuf_mask);
957 void avx_ghash(Register state, Register htbl, Register data, Register blocks);
958 #endif
959
960 #ifdef _LP64
961 private:
962 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d,
963 Register e, Register f, Register g, Register h, int iteration);
964
965 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
966 Register a, Register b, Register c, Register d, Register e, Register f,
967 Register g, Register h, int iteration);
968
969 void addmq(int disp, Register r1, Register r2);
970 public:
971 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
972 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
973 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block,
974 XMMRegister shuf_mask);
975 private:
976 void roundEnc(XMMRegister key, int rnum);
977 void lastroundEnc(XMMRegister key, int rnum);
978 void roundDec(XMMRegister key, int rnum);
979 void lastroundDec(XMMRegister key, int rnum);
980 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask);
981 void ev_add128(XMMRegister xmmdst, XMMRegister xmmsrc1, XMMRegister xmmsrc2,
982 int vector_len, KRegister ktmp, Register rscratch = noreg);
983
984 public:
985 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len);
986 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len);
987 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter,
988 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start);
989
990 #endif
991
992 void fast_md5(Register buf, Address state, Address ofs, Address limit,
993 bool multi_block);
994
995 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
996 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
997 Register buf, Register state, Register ofs, Register limit, Register rsp,
998 bool multi_block);
999
1000 #ifdef _LP64
1001 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1002 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1003 Register buf, Register state, Register ofs, Register limit, Register rsp,
1004 bool multi_block, XMMRegister shuf_mask);
1005 #else
1006 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
1007 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
1008 Register buf, Register state, Register ofs, Register limit, Register rsp,
1009 bool multi_block);
1010 #endif
1011
1012 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1013 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1014 Register rax, Register rcx, Register rdx, Register tmp);
1015
1016 #ifdef _LP64
1017 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1018 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1019 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2);
1020
1021 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1022 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1023 Register rax, Register rcx, Register rdx, Register r11);
1024
1025 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1026 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1027 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4);
1028
1029 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1030 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1031 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2,
1032 Register tmp3, Register tmp4);
1033
1034 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1035 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1036 Register rax, Register rcx, Register rdx, Register tmp1,
1037 Register tmp2, Register tmp3, Register tmp4);
1038 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1039 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1040 Register rax, Register rcx, Register rdx, Register tmp1,
1041 Register tmp2, Register tmp3, Register tmp4);
1042 #else
1043 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1044 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1045 Register rax, Register rcx, Register rdx, Register tmp1);
1046
1047 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1048 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1049 Register rax, Register rcx, Register rdx, Register tmp);
1050
1051 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4,
1052 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx,
1053 Register rdx, Register tmp);
1054
1055 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1056 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1057 Register rax, Register rbx, Register rdx);
1058
1059 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1060 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1061 Register rax, Register rcx, Register rdx, Register tmp);
1062
1063 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1064 Register edx, Register ebx, Register esi, Register edi,
1065 Register ebp, Register esp);
1066
1067 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
1068 Register esi, Register edi, Register ebp, Register esp);
1069
1070 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
1071 Register edx, Register ebx, Register esi, Register edi,
1072 Register ebp, Register esp);
1073
1074 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
1075 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
1076 Register rax, Register rcx, Register rdx, Register tmp);
1077 #endif
1078
1079 private:
1080
1081 // these are private because users should be doing movflt/movdbl
1082
1083 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
1084 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
1085 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
1086 void movss(XMMRegister dst, AddressLiteral src);
1087
1088 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
1089 void movlpd(XMMRegister dst, AddressLiteral src);
1090
1091 public:
1092
1093 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
1094 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
1095 void addsd(XMMRegister dst, AddressLiteral src);
1096
1097 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
1098 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
1099 void addss(XMMRegister dst, AddressLiteral src);
1100
1101 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
1102 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
1103 void addpd(XMMRegister dst, AddressLiteral src);
1104
1105 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
1106 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
1107 void divsd(XMMRegister dst, AddressLiteral src);
1108
1109 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
1110 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
1111 void divss(XMMRegister dst, AddressLiteral src);
1112
1113 // Move Unaligned Double Quadword
1114 void movdqu(Address dst, XMMRegister src);
1115 void movdqu(XMMRegister dst, Address src);
1116 void movdqu(XMMRegister dst, XMMRegister src);
1117 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1);
1118
1119 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); }
1120 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); }
1121 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); }
1122 void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1123 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); }
1124 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); }
1125
1126 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); }
1127 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); }
1128 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); }
1129 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); }
1130 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); }
1131 void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1132
1133 // Safe move operation, lowers down to 16bit moves for targets supporting
1134 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature.
1135 void kmov(Address dst, KRegister src);
1136 void kmov(KRegister dst, Address src);
1137 void kmov(KRegister dst, KRegister src);
1138 void kmov(Register dst, KRegister src);
1139 void kmov(KRegister dst, Register src);
1140
1141 // AVX Unaligned forms
1142 void vmovdqu(Address dst, XMMRegister src);
1143 void vmovdqu(XMMRegister dst, Address src);
1144 void vmovdqu(XMMRegister dst, XMMRegister src);
1145 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1146
1147 // AVX512 Unaligned
1148 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len);
1149 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len);
1150
1151 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1152 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1153 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); }
1154 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1155 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); }
1156 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1157
1158 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1159 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1160 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); }
1161 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); }
1162 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1163
1164 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1165 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); }
1166 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
1167 if (dst->encoding() == src->encoding()) return;
1168 Assembler::evmovdqul(dst, src, vector_len);
1169 }
1170 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1171 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); }
1172 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1173 if (dst->encoding() == src->encoding() && mask == k0) return;
1174 Assembler::evmovdqul(dst, mask, src, merge, vector_len);
1175 }
1176 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1177
1178 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1179 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); }
1180 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch);
1181 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
1182 if (dst->encoding() == src->encoding()) return;
1183 Assembler::evmovdquq(dst, src, vector_len);
1184 }
1185 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1186 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); }
1187 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) {
1188 if (dst->encoding() == src->encoding() && mask == k0) return;
1189 Assembler::evmovdquq(dst, mask, src, merge, vector_len);
1190 }
1191 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1192
1193 // Move Aligned Double Quadword
1194 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
1195 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); }
1196 void movdqa(XMMRegister dst, AddressLiteral src);
1197
1198 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
1199 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
1200 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
1201 void movsd(XMMRegister dst, AddressLiteral src);
1202
1203 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
1204 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
1205 void mulpd(XMMRegister dst, AddressLiteral src);
1206
1207 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
1208 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
1209 void mulsd(XMMRegister dst, AddressLiteral src);
1210
1211 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
1212 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
1213 void mulss(XMMRegister dst, AddressLiteral src);
1214
1215 // Carry-Less Multiplication Quadword
1216 void pclmulldq(XMMRegister dst, XMMRegister src) {
1217 // 0x00 - multiply lower 64 bits [0:63]
1218 Assembler::pclmulqdq(dst, src, 0x00);
1219 }
1220 void pclmulhdq(XMMRegister dst, XMMRegister src) {
1221 // 0x11 - multiply upper 64 bits [64:127]
1222 Assembler::pclmulqdq(dst, src, 0x11);
1223 }
1224
1225 void pcmpeqb(XMMRegister dst, XMMRegister src);
1226 void pcmpeqw(XMMRegister dst, XMMRegister src);
1227
1228 void pcmpestri(XMMRegister dst, Address src, int imm8);
1229 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8);
1230
1231 void pmovzxbw(XMMRegister dst, XMMRegister src);
1232 void pmovzxbw(XMMRegister dst, Address src);
1233
1234 void pmovmskb(Register dst, XMMRegister src);
1235
1236 void ptest(XMMRegister dst, XMMRegister src);
1237
1238 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
1239 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
1240 void sqrtsd(XMMRegister dst, AddressLiteral src);
1241
1242 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1243 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); }
1244 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg);
1245
1246 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
1247 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
1248 void sqrtss(XMMRegister dst, AddressLiteral src);
1249
1250 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
1251 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
1252 void subsd(XMMRegister dst, AddressLiteral src);
1253
1254 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
1255 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
1256 void subss(XMMRegister dst, AddressLiteral src);
1257
1258 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
1259 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
1260 void ucomiss(XMMRegister dst, AddressLiteral src);
1261
1262 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
1263 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
1264 void ucomisd(XMMRegister dst, AddressLiteral src);
1265
1266 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
1267 void xorpd(XMMRegister dst, XMMRegister src);
1268 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
1269 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1270
1271 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
1272 void xorps(XMMRegister dst, XMMRegister src);
1273 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
1274 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
1275
1276 // Shuffle Bytes
1277 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
1278 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
1279 void pshufb(XMMRegister dst, AddressLiteral src);
1280 // AVX 3-operands instructions
1281
1282 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
1283 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
1284 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1285
1286 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
1287 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
1288 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1289
1290 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1291 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
1292
1293 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1294 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1295 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1296
1297 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1298 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1299
1300 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1301 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); }
1302 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch);
1303
1304 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1305 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
1306 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1307
1308 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
1309 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
1310
1311 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1312
1313 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1314 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1315
1316 // Vector compares
1317 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1318 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1319 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1320 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1321 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1322 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1323 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1324 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1325 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1326 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1327 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1328 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1329 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src,
1330 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); }
1331 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src,
1332 int comparison, bool is_signed, int vector_len, Register scratch_reg);
1333
1334
1335 // Emit comparison instruction for the specified comparison predicate.
1336 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg);
1337 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len);
1338
1339 void vpmovzxbw(XMMRegister dst, Address src, int vector_len);
1340 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); }
1341
1342 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit);
1343
1344 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1345 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1346 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1347 Assembler::vpmulld(dst, nds, src, vector_len);
1348 };
1349 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1350 Assembler::vpmulld(dst, nds, src, vector_len);
1351 }
1352 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1353
1354 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1355 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1356
1357 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1358 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1359
1360 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1361 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1362
1363 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1364 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1365
1366 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1367 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1368
1369 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
1370 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
1371
1372 void vptest(XMMRegister dst, XMMRegister src);
1373 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); }
1374
1375 void punpcklbw(XMMRegister dst, XMMRegister src);
1376 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
1377
1378 void pshufd(XMMRegister dst, Address src, int mode);
1379 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); }
1380
1381 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1382 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
1383
1384 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1385 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
1386 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1387
1388 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1389 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); }
1390 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1391
1392 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg);
1393
1394 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
1395 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
1396 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1397
1398 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
1399 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
1400 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1401
1402 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
1403 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
1404 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1405
1406 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
1407 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
1408 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1409
1410 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
1411 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
1412 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1413
1414 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
1415 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
1416 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1417
1418 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1419 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
1420
1421 // AVX Vector instructions
1422
1423 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1424 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); }
1425 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1426
1427 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1428 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); }
1429 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1430
1431 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1432 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1433 Assembler::vpxor(dst, nds, src, vector_len);
1434 else
1435 Assembler::vxorpd(dst, nds, src, vector_len);
1436 }
1437 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
1438 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2
1439 Assembler::vpxor(dst, nds, src, vector_len);
1440 else
1441 Assembler::vxorpd(dst, nds, src, vector_len);
1442 }
1443 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
1444
1445 // Simple version for AVX2 256bit vectors
1446 void vpxor(XMMRegister dst, XMMRegister src) {
1447 assert(UseAVX >= 2, "Should be at least AVX2");
1448 Assembler::vpxor(dst, dst, src, AVX_256bit);
1449 }
1450 void vpxor(XMMRegister dst, Address src) {
1451 assert(UseAVX >= 2, "Should be at least AVX2");
1452 Assembler::vpxor(dst, dst, src, AVX_256bit);
1453 }
1454
1455 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); }
1456 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg);
1457
1458 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
1459 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1460 Assembler::vinserti32x4(dst, nds, src, imm8);
1461 } else if (UseAVX > 1) {
1462 // vinserti128 is available only in AVX2
1463 Assembler::vinserti128(dst, nds, src, imm8);
1464 } else {
1465 Assembler::vinsertf128(dst, nds, src, imm8);
1466 }
1467 }
1468
1469 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
1470 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1471 Assembler::vinserti32x4(dst, nds, src, imm8);
1472 } else if (UseAVX > 1) {
1473 // vinserti128 is available only in AVX2
1474 Assembler::vinserti128(dst, nds, src, imm8);
1475 } else {
1476 Assembler::vinsertf128(dst, nds, src, imm8);
1477 }
1478 }
1479
1480 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1481 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1482 Assembler::vextracti32x4(dst, src, imm8);
1483 } else if (UseAVX > 1) {
1484 // vextracti128 is available only in AVX2
1485 Assembler::vextracti128(dst, src, imm8);
1486 } else {
1487 Assembler::vextractf128(dst, src, imm8);
1488 }
1489 }
1490
1491 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
1492 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1493 Assembler::vextracti32x4(dst, src, imm8);
1494 } else if (UseAVX > 1) {
1495 // vextracti128 is available only in AVX2
1496 Assembler::vextracti128(dst, src, imm8);
1497 } else {
1498 Assembler::vextractf128(dst, src, imm8);
1499 }
1500 }
1501
1502 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers
1503 void vinserti128_high(XMMRegister dst, XMMRegister src) {
1504 vinserti128(dst, dst, src, 1);
1505 }
1506 void vinserti128_high(XMMRegister dst, Address src) {
1507 vinserti128(dst, dst, src, 1);
1508 }
1509 void vextracti128_high(XMMRegister dst, XMMRegister src) {
1510 vextracti128(dst, src, 1);
1511 }
1512 void vextracti128_high(Address dst, XMMRegister src) {
1513 vextracti128(dst, src, 1);
1514 }
1515
1516 void vinsertf128_high(XMMRegister dst, XMMRegister src) {
1517 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1518 Assembler::vinsertf32x4(dst, dst, src, 1);
1519 } else {
1520 Assembler::vinsertf128(dst, dst, src, 1);
1521 }
1522 }
1523
1524 void vinsertf128_high(XMMRegister dst, Address src) {
1525 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1526 Assembler::vinsertf32x4(dst, dst, src, 1);
1527 } else {
1528 Assembler::vinsertf128(dst, dst, src, 1);
1529 }
1530 }
1531
1532 void vextractf128_high(XMMRegister dst, XMMRegister src) {
1533 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1534 Assembler::vextractf32x4(dst, src, 1);
1535 } else {
1536 Assembler::vextractf128(dst, src, 1);
1537 }
1538 }
1539
1540 void vextractf128_high(Address dst, XMMRegister src) {
1541 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1542 Assembler::vextractf32x4(dst, src, 1);
1543 } else {
1544 Assembler::vextractf128(dst, src, 1);
1545 }
1546 }
1547
1548 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers
1549 void vinserti64x4_high(XMMRegister dst, XMMRegister src) {
1550 Assembler::vinserti64x4(dst, dst, src, 1);
1551 }
1552 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) {
1553 Assembler::vinsertf64x4(dst, dst, src, 1);
1554 }
1555 void vextracti64x4_high(XMMRegister dst, XMMRegister src) {
1556 Assembler::vextracti64x4(dst, src, 1);
1557 }
1558 void vextractf64x4_high(XMMRegister dst, XMMRegister src) {
1559 Assembler::vextractf64x4(dst, src, 1);
1560 }
1561 void vextractf64x4_high(Address dst, XMMRegister src) {
1562 Assembler::vextractf64x4(dst, src, 1);
1563 }
1564 void vinsertf64x4_high(XMMRegister dst, Address src) {
1565 Assembler::vinsertf64x4(dst, dst, src, 1);
1566 }
1567
1568 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers
1569 void vinserti128_low(XMMRegister dst, XMMRegister src) {
1570 vinserti128(dst, dst, src, 0);
1571 }
1572 void vinserti128_low(XMMRegister dst, Address src) {
1573 vinserti128(dst, dst, src, 0);
1574 }
1575 void vextracti128_low(XMMRegister dst, XMMRegister src) {
1576 vextracti128(dst, src, 0);
1577 }
1578 void vextracti128_low(Address dst, XMMRegister src) {
1579 vextracti128(dst, src, 0);
1580 }
1581
1582 void vinsertf128_low(XMMRegister dst, XMMRegister src) {
1583 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1584 Assembler::vinsertf32x4(dst, dst, src, 0);
1585 } else {
1586 Assembler::vinsertf128(dst, dst, src, 0);
1587 }
1588 }
1589
1590 void vinsertf128_low(XMMRegister dst, Address src) {
1591 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1592 Assembler::vinsertf32x4(dst, dst, src, 0);
1593 } else {
1594 Assembler::vinsertf128(dst, dst, src, 0);
1595 }
1596 }
1597
1598 void vextractf128_low(XMMRegister dst, XMMRegister src) {
1599 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1600 Assembler::vextractf32x4(dst, src, 0);
1601 } else {
1602 Assembler::vextractf128(dst, src, 0);
1603 }
1604 }
1605
1606 void vextractf128_low(Address dst, XMMRegister src) {
1607 if (UseAVX > 2 && VM_Version::supports_avx512novl()) {
1608 Assembler::vextractf32x4(dst, src, 0);
1609 } else {
1610 Assembler::vextractf128(dst, src, 0);
1611 }
1612 }
1613
1614 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers
1615 void vinserti64x4_low(XMMRegister dst, XMMRegister src) {
1616 Assembler::vinserti64x4(dst, dst, src, 0);
1617 }
1618 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) {
1619 Assembler::vinsertf64x4(dst, dst, src, 0);
1620 }
1621 void vextracti64x4_low(XMMRegister dst, XMMRegister src) {
1622 Assembler::vextracti64x4(dst, src, 0);
1623 }
1624 void vextractf64x4_low(XMMRegister dst, XMMRegister src) {
1625 Assembler::vextractf64x4(dst, src, 0);
1626 }
1627 void vextractf64x4_low(Address dst, XMMRegister src) {
1628 Assembler::vextractf64x4(dst, src, 0);
1629 }
1630 void vinsertf64x4_low(XMMRegister dst, Address src) {
1631 Assembler::vinsertf64x4(dst, dst, src, 0);
1632 }
1633
1634 // Carry-Less Multiplication Quadword
1635 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1636 // 0x00 - multiply lower 64 bits [0:63]
1637 Assembler::vpclmulqdq(dst, nds, src, 0x00);
1638 }
1639 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1640 // 0x11 - multiply upper 64 bits [64:127]
1641 Assembler::vpclmulqdq(dst, nds, src, 0x11);
1642 }
1643 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1644 // 0x10 - multiply nds[0:63] and src[64:127]
1645 Assembler::vpclmulqdq(dst, nds, src, 0x10);
1646 }
1647 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) {
1648 //0x01 - multiply nds[64:127] and src[0:63]
1649 Assembler::vpclmulqdq(dst, nds, src, 0x01);
1650 }
1651
1652 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1653 // 0x00 - multiply lower 64 bits [0:63]
1654 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
1655 }
1656 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1657 // 0x11 - multiply upper 64 bits [64:127]
1658 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
1659 }
1660
1661 // Data
1662
1663 void cmov32( Condition cc, Register dst, Address src);
1664 void cmov32( Condition cc, Register dst, Register src);
1665
1666 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
1667
1668 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1669 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
1670
1671 void movoop(Register dst, jobject obj);
1672 void movoop(Address dst, jobject obj);
1673
1674 void mov_metadata(Register dst, Metadata* obj);
1675 void mov_metadata(Address dst, Metadata* obj);
1676
1677 void movptr(ArrayAddress dst, Register src);
1678 // can this do an lea?
1679 void movptr(Register dst, ArrayAddress src);
1680
1681 void movptr(Register dst, Address src);
1682
1683 #ifdef _LP64
1684 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
1685 #else
1686 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
1687 #endif
1688
1689 void movptr(Register dst, intptr_t src);
1690 void movptr(Register dst, Register src);
1691 void movptr(Address dst, intptr_t src);
1692
1693 void movptr(Address dst, Register src);
1694
1695 void movptr(Register dst, RegisterOrConstant src) {
1696 if (src.is_constant()) movptr(dst, src.as_constant());
1697 else movptr(dst, src.as_register());
1698 }
1699
1700 #ifdef _LP64
1701 // Generally the next two are only used for moving NULL
1702 // Although there are situations in initializing the mark word where
1703 // they could be used. They are dangerous.
1704
1705 // They only exist on LP64 so that int32_t and intptr_t are not the same
1706 // and we have ambiguous declarations.
1707
1708 void movptr(Address dst, int32_t imm32);
1709 void movptr(Register dst, int32_t imm32);
1710 #endif // _LP64
1711
1712 // to avoid hiding movl
1713 void mov32(AddressLiteral dst, Register src);
1714 void mov32(Register dst, AddressLiteral src);
1715
1716 // to avoid hiding movb
1717 void movbyte(ArrayAddress dst, int src);
1718
1719 // Import other mov() methods from the parent class or else
1720 // they will be hidden by the following overriding declaration.
1721 using Assembler::movdl;
1722 using Assembler::movq;
1723 void movdl(XMMRegister dst, AddressLiteral src);
1724 void movq(XMMRegister dst, AddressLiteral src);
1725
1726 // Can push value or effective address
1727 void pushptr(AddressLiteral src);
1728
1729 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
1730 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
1731
1732 void pushoop(jobject obj);
1733 void pushklass(Metadata* obj);
1734
1735 // sign extend as need a l to ptr sized element
1736 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
1737 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
1738
1739
1740 public:
1741 // C2 compiled method's prolog code.
1742 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub);
1743
1744 // clear memory of size 'cnt' qwords, starting at 'base';
1745 // if 'is_large' is set, do not try to produce short loop
1746 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
1747
1748 // clear memory initialization sequence for constant size;
1749 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1750
1751 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
1752 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
1753
1754 // Fill primitive arrays
1755 void generate_fill(BasicType t, bool aligned,
1756 Register to, Register value, Register count,
1757 Register rtmp, XMMRegister xtmp);
1758
1759 void encode_iso_array(Register src, Register dst, Register len,
1760 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1761 XMMRegister tmp4, Register tmp5, Register result, bool ascii);
1762
1763 #ifdef _LP64
1764 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
1765 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
1766 Register y, Register y_idx, Register z,
1767 Register carry, Register product,
1768 Register idx, Register kdx);
1769 void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1770 Register yz_idx, Register idx,
1771 Register carry, Register product, int offset);
1772 void multiply_128_x_128_bmi2_loop(Register y, Register z,
1773 Register carry, Register carry2,
1774 Register idx, Register jdx,
1775 Register yz_idx1, Register yz_idx2,
1776 Register tmp, Register tmp3, Register tmp4);
1777 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
1778 Register yz_idx, Register idx, Register jdx,
1779 Register carry, Register product,
1780 Register carry2);
1781 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
1782 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
1783 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
1784 Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1785 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
1786 Register tmp2);
1787 void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
1788 Register rdxReg, Register raxReg);
1789 void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
1790 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1791 Register tmp3, Register tmp4);
1792 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
1793 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
1794
1795 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
1796 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1797 Register raxReg);
1798 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
1799 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
1800 Register raxReg);
1801 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
1802 Register result, Register tmp1, Register tmp2,
1803 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3);
1804 #endif
1805
1806 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic.
1807 void update_byte_crc32(Register crc, Register val, Register table);
1808 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
1809
1810
1811 #ifdef _LP64
1812 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2);
1813 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos,
1814 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop,
1815 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup);
1816 void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale);
1817 #endif // _LP64
1818
1819 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic
1820 // Note on a naming convention:
1821 // Prefix w = register only used on a Westmere+ architecture
1822 // Prefix n = register only used on a Nehalem architecture
1823 #ifdef _LP64
1824 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1825 Register tmp1, Register tmp2, Register tmp3);
1826 #else
1827 void crc32c_ipl_alg4(Register in_out, uint32_t n,
1828 Register tmp1, Register tmp2, Register tmp3,
1829 XMMRegister xtmp1, XMMRegister xtmp2);
1830 #endif
1831 void crc32c_pclmulqdq(XMMRegister w_xtmp1,
1832 Register in_out,
1833 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported,
1834 XMMRegister w_xtmp2,
1835 Register tmp1,
1836 Register n_tmp2, Register n_tmp3);
1837 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2,
1838 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1839 Register tmp1, Register tmp2,
1840 Register n_tmp3);
1841 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported,
1842 Register in_out1, Register in_out2, Register in_out3,
1843 Register tmp1, Register tmp2, Register tmp3,
1844 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1845 Register tmp4, Register tmp5,
1846 Register n_tmp6);
1847 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2,
1848 Register tmp1, Register tmp2, Register tmp3,
1849 Register tmp4, Register tmp5, Register tmp6,
1850 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3,
1851 bool is_pclmulqdq_supported);
1852 // Fold 128-bit data chunk
1853 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
1854 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf);
1855 #ifdef _LP64
1856 // Fold 512-bit data chunk
1857 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset);
1858 #endif // _LP64
1859 // Fold 8-bit data
1860 void fold_8bit_crc32(Register crc, Register table, Register tmp);
1861 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
1862
1863 // Compress char[] array to byte[].
1864 void char_array_compress(Register src, Register dst, Register len,
1865 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
1866 XMMRegister tmp4, Register tmp5, Register result,
1867 KRegister mask1 = knoreg, KRegister mask2 = knoreg);
1868
1869 // Inflate byte[] array to char[].
1870 void byte_array_inflate(Register src, Register dst, Register len,
1871 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg);
1872
1873 void fill64_masked_avx(uint shift, Register dst, int disp,
1874 XMMRegister xmm, KRegister mask, Register length,
1875 Register temp, bool use64byteVector = false);
1876
1877 void fill32_masked_avx(uint shift, Register dst, int disp,
1878 XMMRegister xmm, KRegister mask, Register length,
1879 Register temp);
1880
1881 void fill32(Address dst, XMMRegister xmm);
1882
1883 void fill32_avx(Register dst, int disp, XMMRegister xmm);
1884
1885 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false);
1886
1887 void fill64_avx(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false);
1888
1889 #ifdef _LP64
1890 void convert_f2i(Register dst, XMMRegister src);
1891 void convert_d2i(Register dst, XMMRegister src);
1892 void convert_f2l(Register dst, XMMRegister src);
1893 void convert_d2l(Register dst, XMMRegister src);
1894
1895 void cache_wb(Address line);
1896 void cache_wbsync(bool is_pre);
1897
1898 #if COMPILER2_OR_JVMCI
1899 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from,
1900 Register to, Register count, int shift,
1901 Register index, Register temp,
1902 bool use64byteVector, Label& L_entry, Label& L_exit);
1903
1904 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from,
1905 Register to, Register start_index, Register end_index,
1906 Register count, int shift, Register temp,
1907 bool use64byteVector, Label& L_entry, Label& L_exit);
1908
1909 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm,
1910 KRegister mask, Register length, Register index,
1911 Register temp, int shift = Address::times_1, int offset = 0,
1912 bool use64byteVector = false);
1913
1914 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm,
1915 KRegister mask, Register length, Register index,
1916 Register temp, int shift = Address::times_1, int offset = 0);
1917
1918 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm,
1919 int shift = Address::times_1, int offset = 0);
1920
1921 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm,
1922 bool conjoint, int shift = Address::times_1, int offset = 0,
1923 bool use64byteVector = false);
1924 #endif // COMPILER2_OR_JVMCI
1925
1926 #endif // _LP64
1927
1928 void vallones(XMMRegister dst, int vector_len);
1929
1930 void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
1931 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
1932 };
1933
1934 /**
1935 * class SkipIfEqual:
1936 *
1937 * Instantiating this class will result in assembly code being output that will
1938 * jump around any code emitted between the creation of the instance and it's
1939 * automatic destruction at the end of a scope block, depending on the value of
1940 * the flag passed to the constructor, which will be checked at run-time.
1941 */
1942 class SkipIfEqual {
1943 private:
1944 MacroAssembler* _masm;
1945 Label _label;
1946
1947 public:
1948 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
1949 ~SkipIfEqual();
1950 };
1951
1952 #endif // CPU_X86_MACROASSEMBLER_X86_HPP