1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/vm_version.hpp" 34 #include "utilities/checkedCast.hpp" 35 36 // MacroAssembler extends Assembler by frequently used macros. 37 // 38 // Instructions for which a 'better' code sequence exists depending 39 // on arguments should also go in here. 40 41 class MacroAssembler: public Assembler { 42 friend class LIR_Assembler; 43 friend class Runtime1; // as_Address() 44 45 public: 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 protected: 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 63 // returns the register which contains the thread upon return. If a thread register has been 64 // specified, the return value will correspond to that register. If no last_java_sp is specified 65 // (noreg) than rsp will be used instead. 66 virtual void call_VM_base( // returns the register containing the thread upon return 67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 68 Register java_thread, // the thread if computed before ; use noreg otherwise 69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 70 address entry_point, // the entry point 71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 72 bool check_exceptions // whether to check for pending exceptions after return 73 ); 74 75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 76 77 // helpers for FPU flag access 78 // tmp is a temporary register, if none is available use noreg 79 void save_rax (Register tmp); 80 void restore_rax(Register tmp); 81 82 public: 83 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 84 85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 86 // The implementation is only non-empty for the InterpreterMacroAssembler, 87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 88 virtual void check_and_handle_popframe(Register java_thread); 89 virtual void check_and_handle_earlyret(Register java_thread); 90 91 Address as_Address(AddressLiteral adr); 92 Address as_Address(ArrayAddress adr, Register rscratch); 93 94 // Support for null-checks 95 // 96 // Generates code that causes a null OS exception if the content of reg is null. 97 // If the accessed location is M[reg + offset] and the offset is known, provide the 98 // offset. No explicit code generation is needed if the offset is within a certain 99 // range (0 <= offset <= page_size). 100 101 void null_check(Register reg, int offset = -1); 102 static bool needs_explicit_null_check(intptr_t offset); 103 static bool uses_implicit_null_check(void* address); 104 105 // Required platform-specific helpers for Label::patch_instructions. 106 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 107 void pd_patch_instruction(address branch, address target, const char* file, int line) { 108 unsigned char op = branch[0]; 109 assert(op == 0xE8 /* call */ || 110 op == 0xE9 /* jmp */ || 111 op == 0xEB /* short jmp */ || 112 (op & 0xF0) == 0x70 /* short jcc */ || 113 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 114 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ || 115 (op == 0x8D) /* lea */, 116 "Invalid opcode at patch point"); 117 118 if (op == 0xEB || (op & 0xF0) == 0x70) { 119 // short offset operators (jmp and jcc) 120 char* disp = (char*) &branch[1]; 121 int imm8 = checked_cast<int>(target - (address) &disp[1]); 122 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 123 file == nullptr ? "<null>" : file, line); 124 *disp = (char)imm8; 125 } else { 126 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1]; 127 int imm32 = checked_cast<int>(target - (address) &disp[1]); 128 *disp = imm32; 129 } 130 } 131 132 // The following 4 methods return the offset of the appropriate move instruction 133 134 // Support for fast byte/short loading with zero extension (depending on particular CPU) 135 int load_unsigned_byte(Register dst, Address src); 136 int load_unsigned_short(Register dst, Address src); 137 138 // Support for fast byte/short loading with sign extension (depending on particular CPU) 139 int load_signed_byte(Register dst, Address src); 140 int load_signed_short(Register dst, Address src); 141 142 // Support for sign-extension (hi:lo = extend_sign(lo)) 143 void extend_sign(Register hi, Register lo); 144 145 // Load and store values by size and signed-ness 146 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 147 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 148 149 // Support for inc/dec with optimal instruction selection depending on value 150 151 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 152 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 153 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 154 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 155 156 void decrementl(Address dst, int value = 1); 157 void decrementl(Register reg, int value = 1); 158 159 void decrementq(Register reg, int value = 1); 160 void decrementq(Address dst, int value = 1); 161 162 void incrementl(Address dst, int value = 1); 163 void incrementl(Register reg, int value = 1); 164 165 void incrementq(Register reg, int value = 1); 166 void incrementq(Address dst, int value = 1); 167 168 void incrementl(AddressLiteral dst, Register rscratch = noreg); 169 void incrementl(ArrayAddress dst, Register rscratch); 170 171 void incrementq(AddressLiteral dst, Register rscratch = noreg); 172 173 // Support optimal SSE move instructions. 174 void movflt(XMMRegister dst, XMMRegister src) { 175 if (dst-> encoding() == src->encoding()) return; 176 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 177 else { movss (dst, src); return; } 178 } 179 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 180 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 181 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 182 183 // Move with zero extension 184 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 185 186 void movdbl(XMMRegister dst, XMMRegister src) { 187 if (dst-> encoding() == src->encoding()) return; 188 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 189 else { movsd (dst, src); return; } 190 } 191 192 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 193 194 void movdbl(XMMRegister dst, Address src) { 195 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 196 else { movlpd(dst, src); return; } 197 } 198 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 199 200 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 201 // Use separate tmp XMM register because caller may 202 // requires src XMM register to be unchanged (as in x86.ad). 203 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 204 movdl(dst, tmp); 205 movswl(dst, dst); 206 } 207 208 void flt16_to_flt(XMMRegister dst, Register src) { 209 movdl(dst, src); 210 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 211 } 212 213 // Alignment 214 void align32(); 215 void align64(); 216 void align(uint modulus); 217 void align(uint modulus, uint target); 218 219 void post_call_nop(); 220 // A 5 byte nop that is safe for patching (see patch_verified_entry) 221 void fat_nop(); 222 223 // Stack frame creation/removal 224 void enter(); 225 void leave(); 226 227 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 228 // The pointer will be loaded into the thread register. 229 void get_thread(Register thread); 230 231 #ifdef _LP64 232 // Support for argument shuffling 233 234 // bias in bytes 235 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 236 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 237 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 238 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 239 void move_ptr(VMRegPair src, VMRegPair dst); 240 void object_move(OopMap* map, 241 int oop_handle_offset, 242 int framesize_in_slots, 243 VMRegPair src, 244 VMRegPair dst, 245 bool is_receiver, 246 int* receiver_offset); 247 #endif // _LP64 248 249 // Support for VM calls 250 // 251 // It is imperative that all calls into the VM are handled via the call_VM macros. 252 // They make sure that the stack linkage is setup correctly. call_VM's correspond 253 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 254 255 256 void call_VM(Register oop_result, 257 address entry_point, 258 bool check_exceptions = true); 259 void call_VM(Register oop_result, 260 address entry_point, 261 Register arg_1, 262 bool check_exceptions = true); 263 void call_VM(Register oop_result, 264 address entry_point, 265 Register arg_1, Register arg_2, 266 bool check_exceptions = true); 267 void call_VM(Register oop_result, 268 address entry_point, 269 Register arg_1, Register arg_2, Register arg_3, 270 bool check_exceptions = true); 271 272 // Overloadings with last_Java_sp 273 void call_VM(Register oop_result, 274 Register last_java_sp, 275 address entry_point, 276 int number_of_arguments = 0, 277 bool check_exceptions = true); 278 void call_VM(Register oop_result, 279 Register last_java_sp, 280 address entry_point, 281 Register arg_1, bool 282 check_exceptions = true); 283 void call_VM(Register oop_result, 284 Register last_java_sp, 285 address entry_point, 286 Register arg_1, Register arg_2, 287 bool check_exceptions = true); 288 void call_VM(Register oop_result, 289 Register last_java_sp, 290 address entry_point, 291 Register arg_1, Register arg_2, Register arg_3, 292 bool check_exceptions = true); 293 294 void get_vm_result (Register oop_result, Register thread); 295 void get_vm_result_2(Register metadata_result, Register thread); 296 297 // These always tightly bind to MacroAssembler::call_VM_base 298 // bypassing the virtual implementation 299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 303 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 304 305 void call_VM_leaf0(address entry_point); 306 void call_VM_leaf(address entry_point, 307 int number_of_arguments = 0); 308 void call_VM_leaf(address entry_point, 309 Register arg_1); 310 void call_VM_leaf(address entry_point, 311 Register arg_1, Register arg_2); 312 void call_VM_leaf(address entry_point, 313 Register arg_1, Register arg_2, Register arg_3); 314 315 void call_VM_leaf(address entry_point, 316 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 317 318 // These always tightly bind to MacroAssembler::call_VM_leaf_base 319 // bypassing the virtual implementation 320 void super_call_VM_leaf(address entry_point); 321 void super_call_VM_leaf(address entry_point, Register arg_1); 322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 324 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 325 326 // last Java Frame (fills frame anchor) 327 void set_last_Java_frame(Register thread, 328 Register last_java_sp, 329 Register last_java_fp, 330 address last_java_pc, 331 Register rscratch); 332 333 // thread in the default location (r15_thread on 64bit) 334 void set_last_Java_frame(Register last_java_sp, 335 Register last_java_fp, 336 address last_java_pc, 337 Register rscratch); 338 339 #ifdef _LP64 340 void set_last_Java_frame(Register last_java_sp, 341 Register last_java_fp, 342 Label &last_java_pc, 343 Register scratch); 344 #endif 345 346 void reset_last_Java_frame(Register thread, bool clear_fp); 347 348 // thread in the default location (r15_thread on 64bit) 349 void reset_last_Java_frame(bool clear_fp); 350 351 // jobjects 352 void clear_jobject_tag(Register possibly_non_local); 353 void resolve_jobject(Register value, Register thread, Register tmp); 354 void resolve_global_jobject(Register value, Register thread, Register tmp); 355 356 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 357 void c2bool(Register x); 358 359 // C++ bool manipulation 360 361 void movbool(Register dst, Address src); 362 void movbool(Address dst, bool boolconst); 363 void movbool(Address dst, Register src); 364 void testbool(Register dst); 365 366 void resolve_oop_handle(Register result, Register tmp); 367 void resolve_weak_handle(Register result, Register tmp); 368 void load_mirror(Register mirror, Register method, Register tmp); 369 void load_method_holder_cld(Register rresult, Register rmethod); 370 371 void load_method_holder(Register holder, Register method); 372 373 // oop manipulations 374 #ifdef _LP64 375 void load_narrow_klass_compact(Register dst, Register src); 376 #endif 377 void load_klass(Register dst, Register src, Register tmp); 378 void store_klass(Register dst, Register src, Register tmp); 379 380 // Compares the Klass pointer of an object to a given Klass (which might be narrow, 381 // depending on UseCompressedClassPointers). 382 void cmp_klass(Register klass, Register obj, Register tmp); 383 384 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. 385 // Uses tmp1 and tmp2 as temporary registers. 386 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); 387 388 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 389 Register tmp1, Register thread_tmp); 390 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 391 Register tmp1, Register tmp2, Register tmp3); 392 393 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 394 Register thread_tmp = noreg, DecoratorSet decorators = 0); 395 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 396 Register thread_tmp = noreg, DecoratorSet decorators = 0); 397 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 398 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 399 400 // Used for storing null. All other oop constants should be 401 // stored using routines that take a jobject. 402 void store_heap_oop_null(Address dst); 403 404 #ifdef _LP64 405 void store_klass_gap(Register dst, Register src); 406 407 // This dummy is to prevent a call to store_heap_oop from 408 // converting a zero (like null) into a Register by giving 409 // the compiler two choices it can't resolve 410 411 void store_heap_oop(Address dst, void* dummy); 412 413 void encode_heap_oop(Register r); 414 void decode_heap_oop(Register r); 415 void encode_heap_oop_not_null(Register r); 416 void decode_heap_oop_not_null(Register r); 417 void encode_heap_oop_not_null(Register dst, Register src); 418 void decode_heap_oop_not_null(Register dst, Register src); 419 420 void set_narrow_oop(Register dst, jobject obj); 421 void set_narrow_oop(Address dst, jobject obj); 422 void cmp_narrow_oop(Register dst, jobject obj); 423 void cmp_narrow_oop(Address dst, jobject obj); 424 425 void encode_klass_not_null(Register r, Register tmp); 426 void decode_klass_not_null(Register r, Register tmp); 427 void encode_and_move_klass_not_null(Register dst, Register src); 428 void decode_and_move_klass_not_null(Register dst, Register src); 429 void set_narrow_klass(Register dst, Klass* k); 430 void set_narrow_klass(Address dst, Klass* k); 431 void cmp_narrow_klass(Register dst, Klass* k); 432 void cmp_narrow_klass(Address dst, Klass* k); 433 434 // if heap base register is used - reinit it with the correct value 435 void reinit_heapbase(); 436 437 DEBUG_ONLY(void verify_heapbase(const char* msg);) 438 439 #endif // _LP64 440 441 // Int division/remainder for Java 442 // (as idivl, but checks for special case as described in JVM spec.) 443 // returns idivl instruction offset for implicit exception handling 444 int corrected_idivl(Register reg); 445 446 // Long division/remainder for Java 447 // (as idivq, but checks for special case as described in JVM spec.) 448 // returns idivq instruction offset for implicit exception handling 449 int corrected_idivq(Register reg); 450 451 void int3(); 452 453 // Long operation macros for a 32bit cpu 454 // Long negation for Java 455 void lneg(Register hi, Register lo); 456 457 // Long multiplication for Java 458 // (destroys contents of eax, ebx, ecx and edx) 459 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 460 461 // Long shifts for Java 462 // (semantics as described in JVM spec.) 463 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 464 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 465 466 // Long compare for Java 467 // (semantics as described in JVM spec.) 468 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 469 470 471 // misc 472 473 // Sign extension 474 void sign_extend_short(Register reg); 475 void sign_extend_byte(Register reg); 476 477 // Division by power of 2, rounding towards 0 478 void division_with_shift(Register reg, int shift_value); 479 480 #ifndef _LP64 481 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 482 // 483 // CF (corresponds to C0) if x < y 484 // PF (corresponds to C2) if unordered 485 // ZF (corresponds to C3) if x = y 486 // 487 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 488 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 489 void fcmp(Register tmp); 490 // Variant of the above which allows y to be further down the stack 491 // and which only pops x and y if specified. If pop_right is 492 // specified then pop_left must also be specified. 493 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 494 495 // Floating-point comparison for Java 496 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 497 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 498 // (semantics as described in JVM spec.) 499 void fcmp2int(Register dst, bool unordered_is_less); 500 // Variant of the above which allows y to be further down the stack 501 // and which only pops x and y if specified. If pop_right is 502 // specified then pop_left must also be specified. 503 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 504 505 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 506 // tmp is a temporary register, if none is available use noreg 507 void fremr(Register tmp); 508 509 // only if +VerifyFPU 510 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 511 #endif // !LP64 512 513 // dst = c = a * b + c 514 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 515 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 516 517 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 518 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 519 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 520 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 521 522 523 // same as fcmp2int, but using SSE2 524 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 525 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 526 527 // branch to L if FPU flag C2 is set/not set 528 // tmp is a temporary register, if none is available use noreg 529 void jC2 (Register tmp, Label& L); 530 void jnC2(Register tmp, Label& L); 531 532 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 533 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 534 void load_float(Address src); 535 536 // Store float value to 'address'. If UseSSE >= 1, the value is stored 537 // from register xmm0. Otherwise, the value is stored from the FPU stack. 538 void store_float(Address dst); 539 540 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 541 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 542 void load_double(Address src); 543 544 // Store double value to 'address'. If UseSSE >= 2, the value is stored 545 // from register xmm0. Otherwise, the value is stored from the FPU stack. 546 void store_double(Address dst); 547 548 #ifndef _LP64 549 // Pop ST (ffree & fincstp combined) 550 void fpop(); 551 552 void empty_FPU_stack(); 553 #endif // !_LP64 554 555 void push_IU_state(); 556 void pop_IU_state(); 557 558 void push_FPU_state(); 559 void pop_FPU_state(); 560 561 void push_CPU_state(); 562 void pop_CPU_state(); 563 564 void push_cont_fastpath(); 565 void pop_cont_fastpath(); 566 567 void inc_held_monitor_count(); 568 void dec_held_monitor_count(); 569 570 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 571 572 // Round up to a power of two 573 void round_to(Register reg, int modulus); 574 575 private: 576 // General purpose and XMM registers potentially clobbered by native code; there 577 // is no need for FPU or AVX opmask related methods because C1/interpreter 578 // - we save/restore FPU state as a whole always 579 // - do not care about AVX-512 opmask 580 static RegSet call_clobbered_gp_registers(); 581 static XMMRegSet call_clobbered_xmm_registers(); 582 583 void push_set(XMMRegSet set, int offset); 584 void pop_set(XMMRegSet set, int offset); 585 586 public: 587 void push_set(RegSet set, int offset = -1); 588 void pop_set(RegSet set, int offset = -1); 589 590 // Push and pop everything that might be clobbered by a native 591 // runtime call. 592 // Only save the lower 64 bits of each vector register. 593 // Additional registers can be excluded in a passed RegSet. 594 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 595 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 596 597 void push_call_clobbered_registers(bool save_fpu = true) { 598 push_call_clobbered_registers_except(RegSet(), save_fpu); 599 } 600 void pop_call_clobbered_registers(bool restore_fpu = true) { 601 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 602 } 603 604 // allocation 605 void tlab_allocate( 606 Register thread, // Current thread 607 Register obj, // result: pointer to object after successful allocation 608 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 609 int con_size_in_bytes, // object size in bytes if known at compile time 610 Register t1, // temp register 611 Register t2, // temp register 612 Label& slow_case // continuation point if fast allocation fails 613 ); 614 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 615 616 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 617 618 // interface method calling 619 void lookup_interface_method(Register recv_klass, 620 Register intf_klass, 621 RegisterOrConstant itable_index, 622 Register method_result, 623 Register scan_temp, 624 Label& no_such_interface, 625 bool return_method = true); 626 627 void lookup_interface_method_stub(Register recv_klass, 628 Register holder_klass, 629 Register resolved_klass, 630 Register method_result, 631 Register scan_temp, 632 Register temp_reg2, 633 Register receiver, 634 int itable_index, 635 Label& L_no_such_interface); 636 637 // virtual method calling 638 void lookup_virtual_method(Register recv_klass, 639 RegisterOrConstant vtable_index, 640 Register method_result); 641 642 // Test sub_klass against super_klass, with fast and slow paths. 643 644 // The fast path produces a tri-state answer: yes / no / maybe-slow. 645 // One of the three labels can be null, meaning take the fall-through. 646 // If super_check_offset is -1, the value is loaded up from super_klass. 647 // No registers are killed, except temp_reg. 648 void check_klass_subtype_fast_path(Register sub_klass, 649 Register super_klass, 650 Register temp_reg, 651 Label* L_success, 652 Label* L_failure, 653 Label* L_slow_path, 654 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 655 656 // The rest of the type check; must be wired to a corresponding fast path. 657 // It does not repeat the fast path logic, so don't use it standalone. 658 // The temp_reg and temp2_reg can be noreg, if no temps are available. 659 // Updates the sub's secondary super cache as necessary. 660 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 661 void check_klass_subtype_slow_path(Register sub_klass, 662 Register super_klass, 663 Register temp_reg, 664 Register temp2_reg, 665 Label* L_success, 666 Label* L_failure, 667 bool set_cond_codes = false); 668 669 #ifdef _LP64 670 // The 64-bit version, which may do a hashed subclass lookup. 671 void check_klass_subtype_slow_path(Register sub_klass, 672 Register super_klass, 673 Register temp_reg, 674 Register temp2_reg, 675 Register temp3_reg, 676 Register temp4_reg, 677 Label* L_success, 678 Label* L_failure); 679 #endif 680 681 // Three parts of a hashed subclass lookup: a simple linear search, 682 // a table lookup, and a fallback that does linear probing in the 683 // event of a hash collision. 684 void check_klass_subtype_slow_path_linear(Register sub_klass, 685 Register super_klass, 686 Register temp_reg, 687 Register temp2_reg, 688 Label* L_success, 689 Label* L_failure, 690 bool set_cond_codes = false); 691 void check_klass_subtype_slow_path_table(Register sub_klass, 692 Register super_klass, 693 Register temp_reg, 694 Register temp2_reg, 695 Register temp3_reg, 696 Register result_reg, 697 Label* L_success, 698 Label* L_failure); 699 void hashed_check_klass_subtype_slow_path(Register sub_klass, 700 Register super_klass, 701 Register temp_reg, 702 Label* L_success, 703 Label* L_failure); 704 705 // As above, but with a constant super_klass. 706 // The result is in Register result, not the condition codes. 707 void lookup_secondary_supers_table_const(Register sub_klass, 708 Register super_klass, 709 Register temp1, 710 Register temp2, 711 Register temp3, 712 Register temp4, 713 Register result, 714 u1 super_klass_slot); 715 716 #ifdef _LP64 717 using Assembler::salq; 718 void salq(Register dest, Register count); 719 using Assembler::rorq; 720 void rorq(Register dest, Register count); 721 void lookup_secondary_supers_table_var(Register sub_klass, 722 Register super_klass, 723 Register temp1, 724 Register temp2, 725 Register temp3, 726 Register temp4, 727 Register result); 728 729 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 730 Register r_array_base, 731 Register r_array_index, 732 Register r_bitmap, 733 Register temp1, 734 Register temp2, 735 Label* L_success, 736 Label* L_failure = nullptr); 737 738 void verify_secondary_supers_table(Register r_sub_klass, 739 Register r_super_klass, 740 Register expected, 741 Register temp1, 742 Register temp2, 743 Register temp3); 744 #endif 745 746 void repne_scanq(Register addr, Register value, Register count, Register limit, 747 Label* L_success, 748 Label* L_failure = nullptr); 749 750 // If r is valid, return r. 751 // If r is invalid, remove a register r2 from available_regs, add r2 752 // to regs_to_push, then return r2. 753 Register allocate_if_noreg(const Register r, 754 RegSetIterator<Register> &available_regs, 755 RegSet ®s_to_push); 756 757 // Simplified, combined version, good for typical uses. 758 // Falls through on failure. 759 void check_klass_subtype(Register sub_klass, 760 Register super_klass, 761 Register temp_reg, 762 Label& L_success); 763 764 void clinit_barrier(Register klass, 765 Register thread, 766 Label* L_fast_path = nullptr, 767 Label* L_slow_path = nullptr); 768 769 // method handles (JSR 292) 770 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 771 772 // Debugging 773 774 // only if +VerifyOops 775 void _verify_oop(Register reg, const char* s, const char* file, int line); 776 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 777 778 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 779 if (VerifyOops) { 780 _verify_oop(reg, s, file, line); 781 } 782 } 783 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 784 if (VerifyOops) { 785 _verify_oop_addr(reg, s, file, line); 786 } 787 } 788 789 // TODO: verify method and klass metadata (compare against vptr?) 790 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 791 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 792 793 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 794 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 795 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 796 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 797 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 798 799 // Verify or restore cpu control state after JNI call 800 void restore_cpu_control_state_after_jni(Register rscratch); 801 802 // prints msg, dumps registers and stops execution 803 void stop(const char* msg); 804 805 // prints msg and continues 806 void warn(const char* msg); 807 808 // dumps registers and other state 809 void print_state(); 810 811 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 812 static void debug64(char* msg, int64_t pc, int64_t regs[]); 813 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 814 static void print_state64(int64_t pc, int64_t regs[]); 815 816 void os_breakpoint(); 817 818 void untested() { stop("untested"); } 819 820 void unimplemented(const char* what = ""); 821 822 void should_not_reach_here() { stop("should not reach here"); } 823 824 void print_CPU_state(); 825 826 // Stack overflow checking 827 void bang_stack_with_offset(int offset) { 828 // stack grows down, caller passes positive offset 829 assert(offset > 0, "must bang with negative offset"); 830 movl(Address(rsp, (-offset)), rax); 831 } 832 833 // Writes to stack successive pages until offset reached to check for 834 // stack overflow + shadow pages. Also, clobbers tmp 835 void bang_stack_size(Register size, Register tmp); 836 837 // Check for reserved stack access in method being exited (for JIT) 838 void reserved_stack_check(); 839 840 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 841 842 void verify_tlab(); 843 844 static Condition negate_condition(Condition cond); 845 846 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 847 // operands. In general the names are modified to avoid hiding the instruction in Assembler 848 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 849 // here in MacroAssembler. The major exception to this rule is call 850 851 // Arithmetics 852 853 854 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 855 void addptr(Address dst, Register src); 856 857 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 858 void addptr(Register dst, int32_t src); 859 void addptr(Register dst, Register src); 860 void addptr(Register dst, RegisterOrConstant src) { 861 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 862 else addptr(dst, src.as_register()); 863 } 864 865 void andptr(Register dst, int32_t src); 866 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 867 868 #ifdef _LP64 869 using Assembler::andq; 870 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 871 #endif 872 873 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 874 875 // renamed to drag out the casting of address to int32_t/intptr_t 876 void cmp32(Register src1, int32_t imm); 877 878 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 879 // compare reg - mem, or reg - &mem 880 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 881 882 void cmp32(Register src1, Address src2); 883 884 #ifndef _LP64 885 void cmpklass(Address dst, Metadata* obj); 886 void cmpklass(Register dst, Metadata* obj); 887 void cmpoop(Address dst, jobject obj); 888 #endif // _LP64 889 890 void cmpoop(Register src1, Register src2); 891 void cmpoop(Register src1, Address src2); 892 void cmpoop(Register dst, jobject obj, Register rscratch); 893 894 // NOTE src2 must be the lval. This is NOT an mem-mem compare 895 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 896 897 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 898 899 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 900 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 901 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 902 903 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 904 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 905 906 // cmp64 to avoild hiding cmpq 907 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 908 909 void cmpxchgptr(Register reg, Address adr); 910 911 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 912 913 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 914 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 915 916 917 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 918 919 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 920 921 void shlptr(Register dst, int32_t shift); 922 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 923 924 void shrptr(Register dst, int32_t shift); 925 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 926 927 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 928 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 929 930 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 931 932 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 933 void subptr(Register dst, int32_t src); 934 // Force generation of a 4 byte immediate value even if it fits into 8bit 935 void subptr_imm32(Register dst, int32_t src); 936 void subptr(Register dst, Register src); 937 void subptr(Register dst, RegisterOrConstant src) { 938 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 939 else subptr(dst, src.as_register()); 940 } 941 942 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 943 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 944 945 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 946 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 947 948 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 949 950 951 952 // Helper functions for statistics gathering. 953 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 954 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 955 // Unconditional atomic increment. 956 void atomic_incl(Address counter_addr); 957 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 958 #ifdef _LP64 959 void atomic_incq(Address counter_addr); 960 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 961 #endif 962 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 963 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 964 965 using Assembler::lea; 966 void lea(Register dst, AddressLiteral adr); 967 void lea(Address dst, AddressLiteral adr, Register rscratch); 968 969 void leal32(Register dst, Address src) { leal(dst, src); } 970 971 // Import other testl() methods from the parent class or else 972 // they will be hidden by the following overriding declaration. 973 using Assembler::testl; 974 void testl(Address dst, int32_t imm32); 975 void testl(Register dst, int32_t imm32); 976 void testl(Register dst, AddressLiteral src); // requires reachable address 977 using Assembler::testq; 978 void testq(Address dst, int32_t imm32); 979 void testq(Register dst, int32_t imm32); 980 981 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 982 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 983 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 984 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 985 986 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 987 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 988 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 989 void testptr(Register src1, Register src2); 990 991 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 992 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 993 994 // Calls 995 996 void call(Label& L, relocInfo::relocType rtype); 997 void call(Register entry); 998 void call(Address addr) { Assembler::call(addr); } 999 1000 // NOTE: this call transfers to the effective address of entry NOT 1001 // the address contained by entry. This is because this is more natural 1002 // for jumps/calls. 1003 void call(AddressLiteral entry, Register rscratch = rax); 1004 1005 // Emit the CompiledIC call idiom 1006 void ic_call(address entry, jint method_index = 0); 1007 static int ic_check_size(); 1008 int ic_check(int end_alignment); 1009 1010 void emit_static_call_stub(); 1011 1012 // Jumps 1013 1014 // NOTE: these jumps transfer to the effective address of dst NOT 1015 // the address contained by dst. This is because this is more natural 1016 // for jumps/calls. 1017 void jump(AddressLiteral dst, Register rscratch = noreg); 1018 1019 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 1020 1021 // 32bit can do a case table jump in one instruction but we no longer allow the base 1022 // to be installed in the Address class. This jump will transfer to the address 1023 // contained in the location described by entry (not the address of entry) 1024 void jump(ArrayAddress entry, Register rscratch); 1025 1026 // Adding more natural conditional jump instructions 1027 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 1028 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 1029 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 1030 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 1031 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 1032 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 1033 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 1034 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 1035 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1036 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1037 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 1038 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1039 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1040 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 1041 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1042 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1043 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1044 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1045 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1046 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1047 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1048 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1049 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1050 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1051 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1052 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1053 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1054 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1055 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1056 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1057 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1058 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1059 1060 // Short versions of the above 1061 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 1062 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 1063 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 1064 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 1065 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 1066 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 1067 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 1068 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1069 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1070 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1071 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1072 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1073 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1074 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1075 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1076 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1077 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1078 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1079 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1080 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1081 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1082 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1083 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1084 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1085 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1086 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1087 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1088 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1089 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1090 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1091 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1092 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1093 1094 // Floating 1095 1096 void push_f(XMMRegister r); 1097 void pop_f(XMMRegister r); 1098 void push_d(XMMRegister r); 1099 void pop_d(XMMRegister r); 1100 1101 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1102 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1103 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1104 1105 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1106 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1107 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1108 1109 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1110 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1111 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1112 1113 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1114 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1115 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1116 1117 #ifndef _LP64 1118 void fadd_s(Address src) { Assembler::fadd_s(src); } 1119 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1120 1121 void fldcw(Address src) { Assembler::fldcw(src); } 1122 void fldcw(AddressLiteral src); 1123 1124 void fld_s(int index) { Assembler::fld_s(index); } 1125 void fld_s(Address src) { Assembler::fld_s(src); } 1126 void fld_s(AddressLiteral src); 1127 1128 void fld_d(Address src) { Assembler::fld_d(src); } 1129 void fld_d(AddressLiteral src); 1130 1131 void fld_x(Address src) { Assembler::fld_x(src); } 1132 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1133 1134 void fmul_s(Address src) { Assembler::fmul_s(src); } 1135 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1136 #endif // !_LP64 1137 1138 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg); 1139 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1140 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1141 1142 #ifdef _LP64 1143 private: 1144 void sha256_AVX2_one_round_compute( 1145 Register reg_old_h, 1146 Register reg_a, 1147 Register reg_b, 1148 Register reg_c, 1149 Register reg_d, 1150 Register reg_e, 1151 Register reg_f, 1152 Register reg_g, 1153 Register reg_h, 1154 int iter); 1155 void sha256_AVX2_four_rounds_compute_first(int start); 1156 void sha256_AVX2_four_rounds_compute_last(int start); 1157 void sha256_AVX2_one_round_and_sched( 1158 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1159 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1160 XMMRegister xmm_2, /* ymm6 */ 1161 XMMRegister xmm_3, /* ymm7 */ 1162 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1163 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1164 Register reg_c, /* edi */ 1165 Register reg_d, /* esi */ 1166 Register reg_e, /* r8d */ 1167 Register reg_f, /* r9d */ 1168 Register reg_g, /* r10d */ 1169 Register reg_h, /* r11d */ 1170 int iter); 1171 1172 void addm(int disp, Register r1, Register r2); 1173 1174 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1175 Register e, Register f, Register g, Register h, int iteration); 1176 1177 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1178 Register a, Register b, Register c, Register d, Register e, Register f, 1179 Register g, Register h, int iteration); 1180 1181 void addmq(int disp, Register r1, Register r2); 1182 public: 1183 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1184 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1185 Register buf, Register state, Register ofs, Register limit, Register rsp, 1186 bool multi_block, XMMRegister shuf_mask); 1187 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1188 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1189 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1190 XMMRegister shuf_mask); 1191 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block); 1192 #endif // _LP64 1193 1194 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1195 bool multi_block); 1196 1197 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1198 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1199 Register buf, Register state, Register ofs, Register limit, Register rsp, 1200 bool multi_block); 1201 1202 #ifdef _LP64 1203 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1204 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1205 Register buf, Register state, Register ofs, Register limit, Register rsp, 1206 bool multi_block, XMMRegister shuf_mask); 1207 #else 1208 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1209 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1210 Register buf, Register state, Register ofs, Register limit, Register rsp, 1211 bool multi_block); 1212 #endif 1213 1214 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1215 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1216 Register rax, Register rcx, Register rdx, Register tmp); 1217 1218 #ifndef _LP64 1219 private: 1220 // Initialized in macroAssembler_x86_constants.cpp 1221 static address ONES; 1222 static address L_2IL0FLOATPACKET_0; 1223 static address PI4_INV; 1224 static address PI4X3; 1225 static address PI4X4; 1226 1227 public: 1228 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1229 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1230 Register rax, Register rcx, Register rdx, Register tmp1); 1231 1232 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1233 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1234 Register rax, Register rcx, Register rdx, Register tmp); 1235 1236 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1237 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1238 Register rdx, Register tmp); 1239 1240 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1241 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1242 Register rax, Register rbx, Register rdx); 1243 1244 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1245 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1246 Register rax, Register rcx, Register rdx, Register tmp); 1247 1248 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1249 Register edx, Register ebx, Register esi, Register edi, 1250 Register ebp, Register esp); 1251 1252 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1253 Register esi, Register edi, Register ebp, Register esp); 1254 1255 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1256 Register edx, Register ebx, Register esi, Register edi, 1257 Register ebp, Register esp); 1258 1259 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1260 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1261 Register rax, Register rcx, Register rdx, Register tmp); 1262 #endif // !_LP64 1263 1264 private: 1265 1266 // these are private because users should be doing movflt/movdbl 1267 1268 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1269 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1270 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1271 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1272 1273 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1274 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1275 1276 public: 1277 1278 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1279 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1280 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1281 1282 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1283 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1284 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1285 1286 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1287 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1288 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1289 1290 using Assembler::vbroadcasti128; 1291 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1292 1293 using Assembler::vbroadcastsd; 1294 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1295 1296 using Assembler::vbroadcastss; 1297 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1298 1299 // Vector float blend 1300 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1301 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1302 1303 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1304 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1305 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1306 1307 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1308 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1309 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1310 1311 // Move Unaligned Double Quadword 1312 void movdqu(Address dst, XMMRegister src); 1313 void movdqu(XMMRegister dst, XMMRegister src); 1314 void movdqu(XMMRegister dst, Address src); 1315 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1316 1317 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1318 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1319 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1320 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1321 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1322 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1323 1324 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1325 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1326 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1327 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1328 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1329 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1330 1331 // Safe move operation, lowers down to 16bit moves for targets supporting 1332 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1333 void kmov(Address dst, KRegister src); 1334 void kmov(KRegister dst, Address src); 1335 void kmov(KRegister dst, KRegister src); 1336 void kmov(Register dst, KRegister src); 1337 void kmov(KRegister dst, Register src); 1338 1339 using Assembler::movddup; 1340 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1341 1342 using Assembler::vmovddup; 1343 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1344 1345 // AVX Unaligned forms 1346 void vmovdqu(Address dst, XMMRegister src); 1347 void vmovdqu(XMMRegister dst, Address src); 1348 void vmovdqu(XMMRegister dst, XMMRegister src); 1349 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1350 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1351 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len); 1352 void vmovdqu(XMMRegister dst, Address src, int vector_len); 1353 void vmovdqu(Address dst, XMMRegister src, int vector_len); 1354 1355 // AVX Aligned forms 1356 using Assembler::vmovdqa; 1357 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1358 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1359 1360 // AVX512 Unaligned 1361 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1362 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1363 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1364 1365 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1366 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1367 1368 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1369 if (dst->encoding() != src->encoding() || mask != k0) { 1370 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1371 } 1372 } 1373 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1374 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1375 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1376 1377 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1378 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1379 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1380 1381 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1382 if (dst->encoding() != src->encoding() || mask != k0) { 1383 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1384 } 1385 } 1386 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1387 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1388 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1389 1390 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1391 if (dst->encoding() != src->encoding()) { 1392 Assembler::evmovdqul(dst, src, vector_len); 1393 } 1394 } 1395 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1396 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1397 1398 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1399 if (dst->encoding() != src->encoding() || mask != k0) { 1400 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1401 } 1402 } 1403 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1404 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1405 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1406 1407 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1408 if (dst->encoding() != src->encoding()) { 1409 Assembler::evmovdquq(dst, src, vector_len); 1410 } 1411 } 1412 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1413 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1414 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1415 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1416 1417 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1418 if (dst->encoding() != src->encoding() || mask != k0) { 1419 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1420 } 1421 } 1422 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1423 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1424 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1425 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1426 1427 // Move Aligned Double Quadword 1428 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1429 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1430 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1431 1432 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1433 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1434 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1435 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1436 1437 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1438 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1439 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1440 1441 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1442 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1443 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1444 1445 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1446 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1447 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1448 1449 // Carry-Less Multiplication Quadword 1450 void pclmulldq(XMMRegister dst, XMMRegister src) { 1451 // 0x00 - multiply lower 64 bits [0:63] 1452 Assembler::pclmulqdq(dst, src, 0x00); 1453 } 1454 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1455 // 0x11 - multiply upper 64 bits [64:127] 1456 Assembler::pclmulqdq(dst, src, 0x11); 1457 } 1458 1459 void pcmpeqb(XMMRegister dst, XMMRegister src); 1460 void pcmpeqw(XMMRegister dst, XMMRegister src); 1461 1462 void pcmpestri(XMMRegister dst, Address src, int imm8); 1463 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1464 1465 void pmovzxbw(XMMRegister dst, XMMRegister src); 1466 void pmovzxbw(XMMRegister dst, Address src); 1467 1468 void pmovmskb(Register dst, XMMRegister src); 1469 1470 void ptest(XMMRegister dst, XMMRegister src); 1471 1472 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1473 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1474 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1475 1476 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1477 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1478 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1479 1480 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1481 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1482 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1483 1484 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1485 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1486 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1487 1488 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1489 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1490 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1491 1492 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1493 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1494 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1495 1496 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1497 void xorpd(XMMRegister dst, XMMRegister src); 1498 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1499 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1500 1501 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1502 void xorps(XMMRegister dst, XMMRegister src); 1503 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1504 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1505 1506 // Shuffle Bytes 1507 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1508 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1509 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1510 // AVX 3-operands instructions 1511 1512 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1513 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1514 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1515 1516 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1517 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1518 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1519 1520 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1521 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1522 1523 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1524 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1525 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1526 1527 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1528 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1529 1530 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1531 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1532 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1533 1534 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1535 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1536 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1537 1538 using Assembler::vpbroadcastd; 1539 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1540 1541 using Assembler::vpbroadcastq; 1542 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1543 1544 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1545 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1546 1547 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1548 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1549 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1550 1551 // Vector compares 1552 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1553 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1554 } 1555 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1556 1557 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1558 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1559 } 1560 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1561 1562 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1563 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1564 } 1565 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1566 1567 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1568 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1569 } 1570 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1571 1572 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1573 1574 // Emit comparison instruction for the specified comparison predicate. 1575 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1576 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1577 1578 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1579 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1580 1581 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1582 1583 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1584 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1585 1586 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1587 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1588 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1589 1590 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); } 1591 1592 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1593 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1594 1595 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1596 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1597 1598 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1599 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1600 1601 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1602 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1603 1604 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1605 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1606 1607 using Assembler::evpsllw; 1608 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1609 if (!is_varshift) { 1610 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1611 } else { 1612 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1613 } 1614 } 1615 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1616 if (!is_varshift) { 1617 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1618 } else { 1619 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1620 } 1621 } 1622 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1623 if (!is_varshift) { 1624 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1625 } else { 1626 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1627 } 1628 } 1629 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1630 if (!is_varshift) { 1631 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1632 } else { 1633 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1634 } 1635 } 1636 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1637 if (!is_varshift) { 1638 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1639 } else { 1640 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1641 } 1642 } 1643 1644 using Assembler::evpsrlq; 1645 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1646 if (!is_varshift) { 1647 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1648 } else { 1649 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1650 } 1651 } 1652 using Assembler::evpsraw; 1653 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1654 if (!is_varshift) { 1655 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1656 } else { 1657 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1658 } 1659 } 1660 using Assembler::evpsrad; 1661 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1662 if (!is_varshift) { 1663 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1664 } else { 1665 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1666 } 1667 } 1668 using Assembler::evpsraq; 1669 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1670 if (!is_varshift) { 1671 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1672 } else { 1673 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1674 } 1675 } 1676 1677 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1678 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1679 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1680 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1681 1682 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1683 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1684 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1685 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1686 1687 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1688 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1689 1690 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1691 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1692 1693 void vptest(XMMRegister dst, XMMRegister src); 1694 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1695 1696 void punpcklbw(XMMRegister dst, XMMRegister src); 1697 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1698 1699 void pshufd(XMMRegister dst, Address src, int mode); 1700 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1701 1702 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1703 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1704 1705 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1706 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1707 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1708 1709 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1710 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1711 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1712 1713 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1714 1715 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1716 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1717 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1718 1719 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1720 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1721 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1722 1723 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1724 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1725 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1726 1727 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1728 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1729 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1730 1731 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1732 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1733 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1734 1735 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1736 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1737 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1738 1739 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1740 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1741 1742 // AVX Vector instructions 1743 1744 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1745 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1746 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1747 1748 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1749 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1750 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1751 1752 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1753 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1754 Assembler::vpxor(dst, nds, src, vector_len); 1755 else 1756 Assembler::vxorpd(dst, nds, src, vector_len); 1757 } 1758 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1759 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1760 Assembler::vpxor(dst, nds, src, vector_len); 1761 else 1762 Assembler::vxorpd(dst, nds, src, vector_len); 1763 } 1764 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1765 1766 // Simple version for AVX2 256bit vectors 1767 void vpxor(XMMRegister dst, XMMRegister src) { 1768 assert(UseAVX >= 2, "Should be at least AVX2"); 1769 Assembler::vpxor(dst, dst, src, AVX_256bit); 1770 } 1771 void vpxor(XMMRegister dst, Address src) { 1772 assert(UseAVX >= 2, "Should be at least AVX2"); 1773 Assembler::vpxor(dst, dst, src, AVX_256bit); 1774 } 1775 1776 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1777 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1778 1779 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1780 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1781 Assembler::vinserti32x4(dst, nds, src, imm8); 1782 } else if (UseAVX > 1) { 1783 // vinserti128 is available only in AVX2 1784 Assembler::vinserti128(dst, nds, src, imm8); 1785 } else { 1786 Assembler::vinsertf128(dst, nds, src, imm8); 1787 } 1788 } 1789 1790 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1791 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1792 Assembler::vinserti32x4(dst, nds, src, imm8); 1793 } else if (UseAVX > 1) { 1794 // vinserti128 is available only in AVX2 1795 Assembler::vinserti128(dst, nds, src, imm8); 1796 } else { 1797 Assembler::vinsertf128(dst, nds, src, imm8); 1798 } 1799 } 1800 1801 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1802 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1803 Assembler::vextracti32x4(dst, src, imm8); 1804 } else if (UseAVX > 1) { 1805 // vextracti128 is available only in AVX2 1806 Assembler::vextracti128(dst, src, imm8); 1807 } else { 1808 Assembler::vextractf128(dst, src, imm8); 1809 } 1810 } 1811 1812 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1813 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1814 Assembler::vextracti32x4(dst, src, imm8); 1815 } else if (UseAVX > 1) { 1816 // vextracti128 is available only in AVX2 1817 Assembler::vextracti128(dst, src, imm8); 1818 } else { 1819 Assembler::vextractf128(dst, src, imm8); 1820 } 1821 } 1822 1823 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1824 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1825 vinserti128(dst, dst, src, 1); 1826 } 1827 void vinserti128_high(XMMRegister dst, Address src) { 1828 vinserti128(dst, dst, src, 1); 1829 } 1830 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1831 vextracti128(dst, src, 1); 1832 } 1833 void vextracti128_high(Address dst, XMMRegister src) { 1834 vextracti128(dst, src, 1); 1835 } 1836 1837 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1838 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1839 Assembler::vinsertf32x4(dst, dst, src, 1); 1840 } else { 1841 Assembler::vinsertf128(dst, dst, src, 1); 1842 } 1843 } 1844 1845 void vinsertf128_high(XMMRegister dst, Address src) { 1846 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1847 Assembler::vinsertf32x4(dst, dst, src, 1); 1848 } else { 1849 Assembler::vinsertf128(dst, dst, src, 1); 1850 } 1851 } 1852 1853 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1854 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1855 Assembler::vextractf32x4(dst, src, 1); 1856 } else { 1857 Assembler::vextractf128(dst, src, 1); 1858 } 1859 } 1860 1861 void vextractf128_high(Address dst, XMMRegister src) { 1862 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1863 Assembler::vextractf32x4(dst, src, 1); 1864 } else { 1865 Assembler::vextractf128(dst, src, 1); 1866 } 1867 } 1868 1869 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1870 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1871 Assembler::vinserti64x4(dst, dst, src, 1); 1872 } 1873 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1874 Assembler::vinsertf64x4(dst, dst, src, 1); 1875 } 1876 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1877 Assembler::vextracti64x4(dst, src, 1); 1878 } 1879 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1880 Assembler::vextractf64x4(dst, src, 1); 1881 } 1882 void vextractf64x4_high(Address dst, XMMRegister src) { 1883 Assembler::vextractf64x4(dst, src, 1); 1884 } 1885 void vinsertf64x4_high(XMMRegister dst, Address src) { 1886 Assembler::vinsertf64x4(dst, dst, src, 1); 1887 } 1888 1889 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1890 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1891 vinserti128(dst, dst, src, 0); 1892 } 1893 void vinserti128_low(XMMRegister dst, Address src) { 1894 vinserti128(dst, dst, src, 0); 1895 } 1896 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1897 vextracti128(dst, src, 0); 1898 } 1899 void vextracti128_low(Address dst, XMMRegister src) { 1900 vextracti128(dst, src, 0); 1901 } 1902 1903 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1904 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1905 Assembler::vinsertf32x4(dst, dst, src, 0); 1906 } else { 1907 Assembler::vinsertf128(dst, dst, src, 0); 1908 } 1909 } 1910 1911 void vinsertf128_low(XMMRegister dst, Address src) { 1912 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1913 Assembler::vinsertf32x4(dst, dst, src, 0); 1914 } else { 1915 Assembler::vinsertf128(dst, dst, src, 0); 1916 } 1917 } 1918 1919 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1920 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1921 Assembler::vextractf32x4(dst, src, 0); 1922 } else { 1923 Assembler::vextractf128(dst, src, 0); 1924 } 1925 } 1926 1927 void vextractf128_low(Address dst, XMMRegister src) { 1928 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1929 Assembler::vextractf32x4(dst, src, 0); 1930 } else { 1931 Assembler::vextractf128(dst, src, 0); 1932 } 1933 } 1934 1935 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1936 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1937 Assembler::vinserti64x4(dst, dst, src, 0); 1938 } 1939 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1940 Assembler::vinsertf64x4(dst, dst, src, 0); 1941 } 1942 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1943 Assembler::vextracti64x4(dst, src, 0); 1944 } 1945 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1946 Assembler::vextractf64x4(dst, src, 0); 1947 } 1948 void vextractf64x4_low(Address dst, XMMRegister src) { 1949 Assembler::vextractf64x4(dst, src, 0); 1950 } 1951 void vinsertf64x4_low(XMMRegister dst, Address src) { 1952 Assembler::vinsertf64x4(dst, dst, src, 0); 1953 } 1954 1955 // Carry-Less Multiplication Quadword 1956 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1957 // 0x00 - multiply lower 64 bits [0:63] 1958 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1959 } 1960 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1961 // 0x11 - multiply upper 64 bits [64:127] 1962 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1963 } 1964 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1965 // 0x10 - multiply nds[0:63] and src[64:127] 1966 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1967 } 1968 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1969 //0x01 - multiply nds[64:127] and src[0:63] 1970 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1971 } 1972 1973 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1974 // 0x00 - multiply lower 64 bits [0:63] 1975 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1976 } 1977 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1978 // 0x11 - multiply upper 64 bits [64:127] 1979 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1980 } 1981 1982 // AVX-512 mask operations. 1983 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1984 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1985 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1986 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1987 void kortest(uint masklen, KRegister src1, KRegister src2); 1988 void ktest(uint masklen, KRegister src1, KRegister src2); 1989 1990 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1991 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1992 1993 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1994 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1995 1996 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1997 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1998 1999 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2000 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2001 2002 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2003 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2004 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2005 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2006 2007 using Assembler::evpandq; 2008 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2009 2010 using Assembler::evpaddq; 2011 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 2012 2013 using Assembler::evporq; 2014 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2015 2016 using Assembler::vpshufb; 2017 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2018 2019 using Assembler::vpor; 2020 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2021 2022 using Assembler::vpternlogq; 2023 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 2024 2025 void cmov32( Condition cc, Register dst, Address src); 2026 void cmov32( Condition cc, Register dst, Register src); 2027 2028 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 2029 2030 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2031 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2032 2033 void movoop(Register dst, jobject obj); 2034 void movoop(Address dst, jobject obj, Register rscratch); 2035 2036 void mov_metadata(Register dst, Metadata* obj); 2037 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 2038 2039 void movptr(Register dst, Register src); 2040 void movptr(Register dst, Address src); 2041 void movptr(Register dst, AddressLiteral src); 2042 void movptr(Register dst, ArrayAddress src); 2043 void movptr(Register dst, intptr_t src); 2044 void movptr(Address dst, Register src); 2045 void movptr(Address dst, int32_t imm); 2046 void movptr(Address dst, intptr_t src, Register rscratch); 2047 void movptr(ArrayAddress dst, Register src, Register rscratch); 2048 2049 void movptr(Register dst, RegisterOrConstant src) { 2050 if (src.is_constant()) movptr(dst, src.as_constant()); 2051 else movptr(dst, src.as_register()); 2052 } 2053 2054 2055 // to avoid hiding movl 2056 void mov32(Register dst, AddressLiteral src); 2057 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 2058 2059 // Import other mov() methods from the parent class or else 2060 // they will be hidden by the following overriding declaration. 2061 using Assembler::movdl; 2062 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2063 2064 using Assembler::movq; 2065 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2066 2067 // Can push value or effective address 2068 void pushptr(AddressLiteral src, Register rscratch); 2069 2070 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2071 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2072 2073 void pushoop(jobject obj, Register rscratch); 2074 void pushklass(Metadata* obj, Register rscratch); 2075 2076 // sign extend as need a l to ptr sized element 2077 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2078 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2079 2080 2081 public: 2082 // clear memory of size 'cnt' qwords, starting at 'base'; 2083 // if 'is_large' is set, do not try to produce short loop 2084 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 2085 2086 // clear memory initialization sequence for constant size; 2087 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2088 2089 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 2090 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2091 2092 // Fill primitive arrays 2093 void generate_fill(BasicType t, bool aligned, 2094 Register to, Register value, Register count, 2095 Register rtmp, XMMRegister xtmp); 2096 2097 void encode_iso_array(Register src, Register dst, Register len, 2098 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2099 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2100 2101 #ifdef _LP64 2102 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2103 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2104 Register y, Register y_idx, Register z, 2105 Register carry, Register product, 2106 Register idx, Register kdx); 2107 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2108 Register yz_idx, Register idx, 2109 Register carry, Register product, int offset); 2110 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2111 Register carry, Register carry2, 2112 Register idx, Register jdx, 2113 Register yz_idx1, Register yz_idx2, 2114 Register tmp, Register tmp3, Register tmp4); 2115 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2116 Register yz_idx, Register idx, Register jdx, 2117 Register carry, Register product, 2118 Register carry2); 2119 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2120 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2121 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2122 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2123 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2124 Register tmp2); 2125 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2126 Register rdxReg, Register raxReg); 2127 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2128 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2129 Register tmp3, Register tmp4); 2130 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2131 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2132 2133 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2134 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2135 Register raxReg); 2136 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2137 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2138 Register raxReg); 2139 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2140 Register result, Register tmp1, Register tmp2, 2141 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2142 #endif 2143 2144 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2145 void update_byte_crc32(Register crc, Register val, Register table); 2146 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2147 2148 2149 #ifdef _LP64 2150 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2151 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2152 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2153 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2154 #endif // _LP64 2155 2156 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2157 // Note on a naming convention: 2158 // Prefix w = register only used on a Westmere+ architecture 2159 // Prefix n = register only used on a Nehalem architecture 2160 #ifdef _LP64 2161 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2162 Register tmp1, Register tmp2, Register tmp3); 2163 #else 2164 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2165 Register tmp1, Register tmp2, Register tmp3, 2166 XMMRegister xtmp1, XMMRegister xtmp2); 2167 #endif 2168 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2169 Register in_out, 2170 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2171 XMMRegister w_xtmp2, 2172 Register tmp1, 2173 Register n_tmp2, Register n_tmp3); 2174 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2175 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2176 Register tmp1, Register tmp2, 2177 Register n_tmp3); 2178 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2179 Register in_out1, Register in_out2, Register in_out3, 2180 Register tmp1, Register tmp2, Register tmp3, 2181 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2182 Register tmp4, Register tmp5, 2183 Register n_tmp6); 2184 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2185 Register tmp1, Register tmp2, Register tmp3, 2186 Register tmp4, Register tmp5, Register tmp6, 2187 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2188 bool is_pclmulqdq_supported); 2189 // Fold 128-bit data chunk 2190 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2191 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2192 #ifdef _LP64 2193 // Fold 512-bit data chunk 2194 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2195 #endif // _LP64 2196 // Fold 8-bit data 2197 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2198 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2199 2200 // Compress char[] array to byte[]. 2201 void char_array_compress(Register src, Register dst, Register len, 2202 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2203 XMMRegister tmp4, Register tmp5, Register result, 2204 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2205 2206 // Inflate byte[] array to char[]. 2207 void byte_array_inflate(Register src, Register dst, Register len, 2208 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2209 2210 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2211 Register length, Register temp, int vec_enc); 2212 2213 void fill64_masked(uint shift, Register dst, int disp, 2214 XMMRegister xmm, KRegister mask, Register length, 2215 Register temp, bool use64byteVector = false); 2216 2217 void fill32_masked(uint shift, Register dst, int disp, 2218 XMMRegister xmm, KRegister mask, Register length, 2219 Register temp); 2220 2221 void fill32(Address dst, XMMRegister xmm); 2222 2223 void fill32(Register dst, int disp, XMMRegister xmm); 2224 2225 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2226 2227 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2228 2229 #ifdef _LP64 2230 void convert_f2i(Register dst, XMMRegister src); 2231 void convert_d2i(Register dst, XMMRegister src); 2232 void convert_f2l(Register dst, XMMRegister src); 2233 void convert_d2l(Register dst, XMMRegister src); 2234 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2235 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2236 2237 void cache_wb(Address line); 2238 void cache_wbsync(bool is_pre); 2239 2240 #ifdef COMPILER2_OR_JVMCI 2241 void generate_fill_avx3(BasicType type, Register to, Register value, 2242 Register count, Register rtmp, XMMRegister xtmp); 2243 #endif // COMPILER2_OR_JVMCI 2244 #endif // _LP64 2245 2246 void vallones(XMMRegister dst, int vector_len); 2247 2248 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2249 2250 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2251 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2252 2253 #ifdef _LP64 2254 void save_legacy_gprs(); 2255 void restore_legacy_gprs(); 2256 void setcc(Assembler::Condition comparison, Register dst); 2257 #endif 2258 }; 2259 2260 #endif // CPU_X86_MACROASSEMBLER_X86_HPP