1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/rtmLocking.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 // MacroAssembler extends Assembler by frequently used macros. 38 // 39 // Instructions for which a 'better' code sequence exists depending 40 // on arguments should also go in here. 41 42 class MacroAssembler: public Assembler { 43 friend class LIR_Assembler; 44 friend class Runtime1; // as_Address() 45 46 public: 47 // Support for VM calls 48 // 49 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 50 // may customize this version by overriding it for its purposes (e.g., to save/restore 51 // additional registers when doing a VM call). 52 53 virtual void call_VM_leaf_base( 54 address entry_point, // the entry point 55 int number_of_arguments // the number of arguments to pop after the call 56 ); 57 58 protected: 59 // This is the base routine called by the different versions of call_VM. The interpreter 60 // may customize this version by overriding it for its purposes (e.g., to save/restore 61 // additional registers when doing a VM call). 62 // 63 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 64 // returns the register which contains the thread upon return. If a thread register has been 65 // specified, the return value will correspond to that register. If no last_java_sp is specified 66 // (noreg) than rsp will be used instead. 67 virtual void call_VM_base( // returns the register containing the thread upon return 68 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 69 Register java_thread, // the thread if computed before ; use noreg otherwise 70 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 71 address entry_point, // the entry point 72 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 73 bool check_exceptions // whether to check for pending exceptions after return 74 ); 75 76 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 77 78 // helpers for FPU flag access 79 // tmp is a temporary register, if none is available use noreg 80 void save_rax (Register tmp); 81 void restore_rax(Register tmp); 82 83 public: 84 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 85 86 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 87 // The implementation is only non-empty for the InterpreterMacroAssembler, 88 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 89 virtual void check_and_handle_popframe(Register java_thread); 90 virtual void check_and_handle_earlyret(Register java_thread); 91 92 Address as_Address(AddressLiteral adr); 93 Address as_Address(ArrayAddress adr, Register rscratch); 94 95 // Support for null-checks 96 // 97 // Generates code that causes a null OS exception if the content of reg is null. 98 // If the accessed location is M[reg + offset] and the offset is known, provide the 99 // offset. No explicit code generation is needed if the offset is within a certain 100 // range (0 <= offset <= page_size). 101 102 void null_check(Register reg, int offset = -1); 103 static bool needs_explicit_null_check(intptr_t offset); 104 static bool uses_implicit_null_check(void* address); 105 106 // Required platform-specific helpers for Label::patch_instructions. 107 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 108 void pd_patch_instruction(address branch, address target, const char* file, int line) { 109 unsigned char op = branch[0]; 110 assert(op == 0xE8 /* call */ || 111 op == 0xE9 /* jmp */ || 112 op == 0xEB /* short jmp */ || 113 (op & 0xF0) == 0x70 /* short jcc */ || 114 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 115 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 116 "Invalid opcode at patch point"); 117 118 if (op == 0xEB || (op & 0xF0) == 0x70) { 119 // short offset operators (jmp and jcc) 120 char* disp = (char*) &branch[1]; 121 int imm8 = checked_cast<int>(target - (address) &disp[1]); 122 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 123 file == nullptr ? "<null>" : file, line); 124 *disp = (char)imm8; 125 } else { 126 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 127 int imm32 = checked_cast<int>(target - (address) &disp[1]); 128 *disp = imm32; 129 } 130 } 131 132 // The following 4 methods return the offset of the appropriate move instruction 133 134 // Support for fast byte/short loading with zero extension (depending on particular CPU) 135 int load_unsigned_byte(Register dst, Address src); 136 int load_unsigned_short(Register dst, Address src); 137 138 // Support for fast byte/short loading with sign extension (depending on particular CPU) 139 int load_signed_byte(Register dst, Address src); 140 int load_signed_short(Register dst, Address src); 141 142 // Support for sign-extension (hi:lo = extend_sign(lo)) 143 void extend_sign(Register hi, Register lo); 144 145 // Load and store values by size and signed-ness 146 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 147 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 148 149 // Support for inc/dec with optimal instruction selection depending on value 150 151 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 152 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 153 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 154 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 155 156 void decrementl(Address dst, int value = 1); 157 void decrementl(Register reg, int value = 1); 158 159 void decrementq(Register reg, int value = 1); 160 void decrementq(Address dst, int value = 1); 161 162 void incrementl(Address dst, int value = 1); 163 void incrementl(Register reg, int value = 1); 164 165 void incrementq(Register reg, int value = 1); 166 void incrementq(Address dst, int value = 1); 167 168 void incrementl(AddressLiteral dst, Register rscratch = noreg); 169 void incrementl(ArrayAddress dst, Register rscratch); 170 171 void incrementq(AddressLiteral dst, Register rscratch = noreg); 172 173 // Support optimal SSE move instructions. 174 void movflt(XMMRegister dst, XMMRegister src) { 175 if (dst-> encoding() == src->encoding()) return; 176 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 177 else { movss (dst, src); return; } 178 } 179 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 180 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 181 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 182 183 // Move with zero extension 184 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 185 186 void movdbl(XMMRegister dst, XMMRegister src) { 187 if (dst-> encoding() == src->encoding()) return; 188 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 189 else { movsd (dst, src); return; } 190 } 191 192 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 193 194 void movdbl(XMMRegister dst, Address src) { 195 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 196 else { movlpd(dst, src); return; } 197 } 198 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 199 200 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 201 // Use separate tmp XMM register because caller may 202 // requires src XMM register to be unchanged (as in x86.ad). 203 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 204 movdl(dst, tmp); 205 movswl(dst, dst); 206 } 207 208 void flt16_to_flt(XMMRegister dst, Register src) { 209 movdl(dst, src); 210 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 211 } 212 213 // Alignment 214 void align32(); 215 void align64(); 216 void align(int modulus); 217 void align(int modulus, int target); 218 219 void post_call_nop(); 220 // A 5 byte nop that is safe for patching (see patch_verified_entry) 221 void fat_nop(); 222 223 // Stack frame creation/removal 224 void enter(); 225 void leave(); 226 227 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 228 // The pointer will be loaded into the thread register. 229 void get_thread(Register thread); 230 231 #ifdef _LP64 232 // Support for argument shuffling 233 234 // bias in bytes 235 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 236 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 237 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 238 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 239 void move_ptr(VMRegPair src, VMRegPair dst); 240 void object_move(OopMap* map, 241 int oop_handle_offset, 242 int framesize_in_slots, 243 VMRegPair src, 244 VMRegPair dst, 245 bool is_receiver, 246 int* receiver_offset); 247 #endif // _LP64 248 249 // Support for VM calls 250 // 251 // It is imperative that all calls into the VM are handled via the call_VM macros. 252 // They make sure that the stack linkage is setup correctly. call_VM's correspond 253 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 254 255 256 void call_VM(Register oop_result, 257 address entry_point, 258 bool check_exceptions = true); 259 void call_VM(Register oop_result, 260 address entry_point, 261 Register arg_1, 262 bool check_exceptions = true); 263 void call_VM(Register oop_result, 264 address entry_point, 265 Register arg_1, Register arg_2, 266 bool check_exceptions = true); 267 void call_VM(Register oop_result, 268 address entry_point, 269 Register arg_1, Register arg_2, Register arg_3, 270 bool check_exceptions = true); 271 272 // Overloadings with last_Java_sp 273 void call_VM(Register oop_result, 274 Register last_java_sp, 275 address entry_point, 276 int number_of_arguments = 0, 277 bool check_exceptions = true); 278 void call_VM(Register oop_result, 279 Register last_java_sp, 280 address entry_point, 281 Register arg_1, bool 282 check_exceptions = true); 283 void call_VM(Register oop_result, 284 Register last_java_sp, 285 address entry_point, 286 Register arg_1, Register arg_2, 287 bool check_exceptions = true); 288 void call_VM(Register oop_result, 289 Register last_java_sp, 290 address entry_point, 291 Register arg_1, Register arg_2, Register arg_3, 292 bool check_exceptions = true); 293 294 void get_vm_result (Register oop_result, Register thread); 295 void get_vm_result_2(Register metadata_result, Register thread); 296 297 // These always tightly bind to MacroAssembler::call_VM_base 298 // bypassing the virtual implementation 299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 303 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 304 305 void call_VM_leaf0(address entry_point); 306 void call_VM_leaf(address entry_point, 307 int number_of_arguments = 0); 308 void call_VM_leaf(address entry_point, 309 Register arg_1); 310 void call_VM_leaf(address entry_point, 311 Register arg_1, Register arg_2); 312 void call_VM_leaf(address entry_point, 313 Register arg_1, Register arg_2, Register arg_3); 314 315 void call_VM_leaf(address entry_point, 316 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 317 318 // These always tightly bind to MacroAssembler::call_VM_leaf_base 319 // bypassing the virtual implementation 320 void super_call_VM_leaf(address entry_point); 321 void super_call_VM_leaf(address entry_point, Register arg_1); 322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 324 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 325 326 // last Java Frame (fills frame anchor) 327 void set_last_Java_frame(Register thread, 328 Register last_java_sp, 329 Register last_java_fp, 330 address last_java_pc, 331 Register rscratch); 332 333 // thread in the default location (r15_thread on 64bit) 334 void set_last_Java_frame(Register last_java_sp, 335 Register last_java_fp, 336 address last_java_pc, 337 Register rscratch); 338 339 void reset_last_Java_frame(Register thread, bool clear_fp); 340 341 // thread in the default location (r15_thread on 64bit) 342 void reset_last_Java_frame(bool clear_fp); 343 344 // jobjects 345 void clear_jobject_tag(Register possibly_non_local); 346 void resolve_jobject(Register value, Register thread, Register tmp); 347 void resolve_global_jobject(Register value, Register thread, Register tmp); 348 349 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 350 void c2bool(Register x); 351 352 // C++ bool manipulation 353 354 void movbool(Register dst, Address src); 355 void movbool(Address dst, bool boolconst); 356 void movbool(Address dst, Register src); 357 void testbool(Register dst); 358 359 void resolve_oop_handle(Register result, Register tmp); 360 void resolve_weak_handle(Register result, Register tmp); 361 void load_mirror(Register mirror, Register method, Register tmp); 362 void load_method_holder_cld(Register rresult, Register rmethod); 363 364 void load_method_holder(Register holder, Register method); 365 366 // oop manipulations 367 void load_klass(Register dst, Register src, Register tmp); 368 void store_klass(Register dst, Register src, Register tmp); 369 370 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 371 Register tmp1, Register thread_tmp); 372 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 373 Register tmp1, Register tmp2, Register tmp3); 374 375 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 376 Register thread_tmp = noreg, DecoratorSet decorators = 0); 377 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 378 Register thread_tmp = noreg, DecoratorSet decorators = 0); 379 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 380 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 381 382 // Used for storing null. All other oop constants should be 383 // stored using routines that take a jobject. 384 void store_heap_oop_null(Address dst); 385 386 #ifdef _LP64 387 void store_klass_gap(Register dst, Register src); 388 389 // This dummy is to prevent a call to store_heap_oop from 390 // converting a zero (like null) into a Register by giving 391 // the compiler two choices it can't resolve 392 393 void store_heap_oop(Address dst, void* dummy); 394 395 void encode_heap_oop(Register r); 396 void decode_heap_oop(Register r); 397 void encode_heap_oop_not_null(Register r); 398 void decode_heap_oop_not_null(Register r); 399 void encode_heap_oop_not_null(Register dst, Register src); 400 void decode_heap_oop_not_null(Register dst, Register src); 401 402 void set_narrow_oop(Register dst, jobject obj); 403 void set_narrow_oop(Address dst, jobject obj); 404 void cmp_narrow_oop(Register dst, jobject obj); 405 void cmp_narrow_oop(Address dst, jobject obj); 406 407 void encode_klass_not_null(Register r, Register tmp); 408 void decode_klass_not_null(Register r, Register tmp); 409 void encode_and_move_klass_not_null(Register dst, Register src); 410 void decode_and_move_klass_not_null(Register dst, Register src); 411 void set_narrow_klass(Register dst, Klass* k); 412 void set_narrow_klass(Address dst, Klass* k); 413 void cmp_narrow_klass(Register dst, Klass* k); 414 void cmp_narrow_klass(Address dst, Klass* k); 415 416 // if heap base register is used - reinit it with the correct value 417 void reinit_heapbase(); 418 419 DEBUG_ONLY(void verify_heapbase(const char* msg);) 420 421 #endif // _LP64 422 423 // Int division/remainder for Java 424 // (as idivl, but checks for special case as described in JVM spec.) 425 // returns idivl instruction offset for implicit exception handling 426 int corrected_idivl(Register reg); 427 428 // Long division/remainder for Java 429 // (as idivq, but checks for special case as described in JVM spec.) 430 // returns idivq instruction offset for implicit exception handling 431 int corrected_idivq(Register reg); 432 433 void int3(); 434 435 // Long operation macros for a 32bit cpu 436 // Long negation for Java 437 void lneg(Register hi, Register lo); 438 439 // Long multiplication for Java 440 // (destroys contents of eax, ebx, ecx and edx) 441 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 442 443 // Long shifts for Java 444 // (semantics as described in JVM spec.) 445 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 446 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 447 448 // Long compare for Java 449 // (semantics as described in JVM spec.) 450 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 451 452 453 // misc 454 455 // Sign extension 456 void sign_extend_short(Register reg); 457 void sign_extend_byte(Register reg); 458 459 // Division by power of 2, rounding towards 0 460 void division_with_shift(Register reg, int shift_value); 461 462 #ifndef _LP64 463 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 464 // 465 // CF (corresponds to C0) if x < y 466 // PF (corresponds to C2) if unordered 467 // ZF (corresponds to C3) if x = y 468 // 469 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 470 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 471 void fcmp(Register tmp); 472 // Variant of the above which allows y to be further down the stack 473 // and which only pops x and y if specified. If pop_right is 474 // specified then pop_left must also be specified. 475 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 476 477 // Floating-point comparison for Java 478 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 479 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 480 // (semantics as described in JVM spec.) 481 void fcmp2int(Register dst, bool unordered_is_less); 482 // Variant of the above which allows y to be further down the stack 483 // and which only pops x and y if specified. If pop_right is 484 // specified then pop_left must also be specified. 485 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 486 487 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 488 // tmp is a temporary register, if none is available use noreg 489 void fremr(Register tmp); 490 491 // only if +VerifyFPU 492 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 493 #endif // !LP64 494 495 // dst = c = a * b + c 496 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 497 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 498 499 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 500 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 501 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 502 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 503 504 505 // same as fcmp2int, but using SSE2 506 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 507 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 508 509 // branch to L if FPU flag C2 is set/not set 510 // tmp is a temporary register, if none is available use noreg 511 void jC2 (Register tmp, Label& L); 512 void jnC2(Register tmp, Label& L); 513 514 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 515 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 516 void load_float(Address src); 517 518 // Store float value to 'address'. If UseSSE >= 1, the value is stored 519 // from register xmm0. Otherwise, the value is stored from the FPU stack. 520 void store_float(Address dst); 521 522 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 523 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 524 void load_double(Address src); 525 526 // Store double value to 'address'. If UseSSE >= 2, the value is stored 527 // from register xmm0. Otherwise, the value is stored from the FPU stack. 528 void store_double(Address dst); 529 530 #ifndef _LP64 531 // Pop ST (ffree & fincstp combined) 532 void fpop(); 533 534 void empty_FPU_stack(); 535 #endif // !_LP64 536 537 void push_IU_state(); 538 void pop_IU_state(); 539 540 void push_FPU_state(); 541 void pop_FPU_state(); 542 543 void push_CPU_state(); 544 void pop_CPU_state(); 545 546 void push_cont_fastpath(); 547 void pop_cont_fastpath(); 548 549 void inc_held_monitor_count(); 550 void dec_held_monitor_count(); 551 552 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 553 554 // Round up to a power of two 555 void round_to(Register reg, int modulus); 556 557 private: 558 // General purpose and XMM registers potentially clobbered by native code; there 559 // is no need for FPU or AVX opmask related methods because C1/interpreter 560 // - we save/restore FPU state as a whole always 561 // - do not care about AVX-512 opmask 562 static RegSet call_clobbered_gp_registers(); 563 static XMMRegSet call_clobbered_xmm_registers(); 564 565 void push_set(XMMRegSet set, int offset); 566 void pop_set(XMMRegSet set, int offset); 567 568 public: 569 void push_set(RegSet set, int offset = -1); 570 void pop_set(RegSet set, int offset = -1); 571 572 // Push and pop everything that might be clobbered by a native 573 // runtime call. 574 // Only save the lower 64 bits of each vector register. 575 // Additional registers can be excluded in a passed RegSet. 576 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 577 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 578 579 void push_call_clobbered_registers(bool save_fpu = true) { 580 push_call_clobbered_registers_except(RegSet(), save_fpu); 581 } 582 void pop_call_clobbered_registers(bool restore_fpu = true) { 583 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 584 } 585 586 // allocation 587 void tlab_allocate( 588 Register thread, // Current thread 589 Register obj, // result: pointer to object after successful allocation 590 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 591 int con_size_in_bytes, // object size in bytes if known at compile time 592 Register t1, // temp register 593 Register t2, // temp register 594 Label& slow_case // continuation point if fast allocation fails 595 ); 596 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 597 598 // interface method calling 599 void lookup_interface_method(Register recv_klass, 600 Register intf_klass, 601 RegisterOrConstant itable_index, 602 Register method_result, 603 Register scan_temp, 604 Label& no_such_interface, 605 bool return_method = true); 606 607 void lookup_interface_method_stub(Register recv_klass, 608 Register holder_klass, 609 Register resolved_klass, 610 Register method_result, 611 Register scan_temp, 612 Register temp_reg2, 613 Register receiver, 614 int itable_index, 615 Label& L_no_such_interface); 616 617 // virtual method calling 618 void lookup_virtual_method(Register recv_klass, 619 RegisterOrConstant vtable_index, 620 Register method_result); 621 622 // Test sub_klass against super_klass, with fast and slow paths. 623 624 // The fast path produces a tri-state answer: yes / no / maybe-slow. 625 // One of the three labels can be null, meaning take the fall-through. 626 // If super_check_offset is -1, the value is loaded up from super_klass. 627 // No registers are killed, except temp_reg. 628 void check_klass_subtype_fast_path(Register sub_klass, 629 Register super_klass, 630 Register temp_reg, 631 Label* L_success, 632 Label* L_failure, 633 Label* L_slow_path, 634 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 635 636 // The rest of the type check; must be wired to a corresponding fast path. 637 // It does not repeat the fast path logic, so don't use it standalone. 638 // The temp_reg and temp2_reg can be noreg, if no temps are available. 639 // Updates the sub's secondary super cache as necessary. 640 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 641 void check_klass_subtype_slow_path(Register sub_klass, 642 Register super_klass, 643 Register temp_reg, 644 Register temp2_reg, 645 Label* L_success, 646 Label* L_failure, 647 bool set_cond_codes = false); 648 649 // Simplified, combined version, good for typical uses. 650 // Falls through on failure. 651 void check_klass_subtype(Register sub_klass, 652 Register super_klass, 653 Register temp_reg, 654 Label& L_success); 655 656 void clinit_barrier(Register klass, 657 Register thread, 658 Label* L_fast_path = nullptr, 659 Label* L_slow_path = nullptr); 660 661 // method handles (JSR 292) 662 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 663 664 // Debugging 665 666 // only if +VerifyOops 667 void _verify_oop(Register reg, const char* s, const char* file, int line); 668 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 669 670 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 671 if (VerifyOops) { 672 _verify_oop(reg, s, file, line); 673 } 674 } 675 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 676 if (VerifyOops) { 677 _verify_oop_addr(reg, s, file, line); 678 } 679 } 680 681 // TODO: verify method and klass metadata (compare against vptr?) 682 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 683 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 684 685 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 686 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 687 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 688 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 689 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 690 691 // Verify or restore cpu control state after JNI call 692 void restore_cpu_control_state_after_jni(Register rscratch); 693 694 // prints msg, dumps registers and stops execution 695 void stop(const char* msg); 696 697 // prints msg and continues 698 void warn(const char* msg); 699 700 // dumps registers and other state 701 void print_state(); 702 703 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 704 static void debug64(char* msg, int64_t pc, int64_t regs[]); 705 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 706 static void print_state64(int64_t pc, int64_t regs[]); 707 708 void os_breakpoint(); 709 710 void untested() { stop("untested"); } 711 712 void unimplemented(const char* what = ""); 713 714 void should_not_reach_here() { stop("should not reach here"); } 715 716 void print_CPU_state(); 717 718 // Stack overflow checking 719 void bang_stack_with_offset(int offset) { 720 // stack grows down, caller passes positive offset 721 assert(offset > 0, "must bang with negative offset"); 722 movl(Address(rsp, (-offset)), rax); 723 } 724 725 // Writes to stack successive pages until offset reached to check for 726 // stack overflow + shadow pages. Also, clobbers tmp 727 void bang_stack_size(Register size, Register tmp); 728 729 // Check for reserved stack access in method being exited (for JIT) 730 void reserved_stack_check(); 731 732 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 733 734 void verify_tlab(); 735 736 static Condition negate_condition(Condition cond); 737 738 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 739 // operands. In general the names are modified to avoid hiding the instruction in Assembler 740 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 741 // here in MacroAssembler. The major exception to this rule is call 742 743 // Arithmetics 744 745 746 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 747 void addptr(Address dst, Register src); 748 749 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 750 void addptr(Register dst, int32_t src); 751 void addptr(Register dst, Register src); 752 void addptr(Register dst, RegisterOrConstant src) { 753 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 754 else addptr(dst, src.as_register()); 755 } 756 757 void andptr(Register dst, int32_t src); 758 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 759 760 #ifdef _LP64 761 using Assembler::andq; 762 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 763 #endif 764 765 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 766 767 // renamed to drag out the casting of address to int32_t/intptr_t 768 void cmp32(Register src1, int32_t imm); 769 770 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 771 // compare reg - mem, or reg - &mem 772 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 773 774 void cmp32(Register src1, Address src2); 775 776 #ifndef _LP64 777 void cmpklass(Address dst, Metadata* obj); 778 void cmpklass(Register dst, Metadata* obj); 779 void cmpoop(Address dst, jobject obj); 780 #endif // _LP64 781 782 void cmpoop(Register src1, Register src2); 783 void cmpoop(Register src1, Address src2); 784 void cmpoop(Register dst, jobject obj, Register rscratch); 785 786 // NOTE src2 must be the lval. This is NOT an mem-mem compare 787 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 788 789 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 790 791 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 792 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 793 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 794 795 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 796 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 797 798 // cmp64 to avoild hiding cmpq 799 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 800 801 void cmpxchgptr(Register reg, Address adr); 802 803 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 804 805 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 806 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 807 808 809 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 810 811 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 812 813 void shlptr(Register dst, int32_t shift); 814 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 815 816 void shrptr(Register dst, int32_t shift); 817 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 818 819 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 820 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 821 822 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 823 824 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 825 void subptr(Register dst, int32_t src); 826 // Force generation of a 4 byte immediate value even if it fits into 8bit 827 void subptr_imm32(Register dst, int32_t src); 828 void subptr(Register dst, Register src); 829 void subptr(Register dst, RegisterOrConstant src) { 830 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 831 else subptr(dst, src.as_register()); 832 } 833 834 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 835 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 836 837 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 838 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 839 840 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 841 842 843 844 // Helper functions for statistics gathering. 845 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 846 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 847 // Unconditional atomic increment. 848 void atomic_incl(Address counter_addr); 849 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 850 #ifdef _LP64 851 void atomic_incq(Address counter_addr); 852 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 853 #endif 854 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 855 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 856 857 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 858 void lea(Register dst, AddressLiteral adr); 859 void lea(Address dst, AddressLiteral adr, Register rscratch); 860 861 void leal32(Register dst, Address src) { leal(dst, src); } 862 863 // Import other testl() methods from the parent class or else 864 // they will be hidden by the following overriding declaration. 865 using Assembler::testl; 866 void testl(Address dst, int32_t imm32); 867 void testl(Register dst, int32_t imm32); 868 void testl(Register dst, AddressLiteral src); // requires reachable address 869 using Assembler::testq; 870 void testq(Address dst, int32_t imm32); 871 void testq(Register dst, int32_t imm32); 872 873 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 874 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 875 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 876 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 877 878 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 879 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 880 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 881 void testptr(Register src1, Register src2); 882 883 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 884 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 885 886 // Calls 887 888 void call(Label& L, relocInfo::relocType rtype); 889 void call(Register entry); 890 void call(Address addr) { Assembler::call(addr); } 891 892 // NOTE: this call transfers to the effective address of entry NOT 893 // the address contained by entry. This is because this is more natural 894 // for jumps/calls. 895 void call(AddressLiteral entry, Register rscratch = rax); 896 897 // Emit the CompiledIC call idiom 898 void ic_call(address entry, jint method_index = 0); 899 static int ic_check_size(); 900 int ic_check(int end_alignment); 901 902 void emit_static_call_stub(); 903 904 // Jumps 905 906 // NOTE: these jumps transfer to the effective address of dst NOT 907 // the address contained by dst. This is because this is more natural 908 // for jumps/calls. 909 void jump(AddressLiteral dst, Register rscratch = noreg); 910 911 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 912 913 // 32bit can do a case table jump in one instruction but we no longer allow the base 914 // to be installed in the Address class. This jump will transfer to the address 915 // contained in the location described by entry (not the address of entry) 916 void jump(ArrayAddress entry, Register rscratch); 917 918 // Floating 919 920 void push_f(XMMRegister r); 921 void pop_f(XMMRegister r); 922 void push_d(XMMRegister r); 923 void pop_d(XMMRegister r); 924 925 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 926 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 927 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 928 929 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 930 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 931 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 932 933 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 934 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 935 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 936 937 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 938 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 939 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 940 941 #ifndef _LP64 942 void fadd_s(Address src) { Assembler::fadd_s(src); } 943 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 944 945 void fldcw(Address src) { Assembler::fldcw(src); } 946 void fldcw(AddressLiteral src); 947 948 void fld_s(int index) { Assembler::fld_s(index); } 949 void fld_s(Address src) { Assembler::fld_s(src); } 950 void fld_s(AddressLiteral src); 951 952 void fld_d(Address src) { Assembler::fld_d(src); } 953 void fld_d(AddressLiteral src); 954 955 void fld_x(Address src) { Assembler::fld_x(src); } 956 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 957 958 void fmul_s(Address src) { Assembler::fmul_s(src); } 959 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 960 #endif // !_LP64 961 962 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 963 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 964 965 #ifdef _LP64 966 private: 967 void sha256_AVX2_one_round_compute( 968 Register reg_old_h, 969 Register reg_a, 970 Register reg_b, 971 Register reg_c, 972 Register reg_d, 973 Register reg_e, 974 Register reg_f, 975 Register reg_g, 976 Register reg_h, 977 int iter); 978 void sha256_AVX2_four_rounds_compute_first(int start); 979 void sha256_AVX2_four_rounds_compute_last(int start); 980 void sha256_AVX2_one_round_and_sched( 981 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 982 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 983 XMMRegister xmm_2, /* ymm6 */ 984 XMMRegister xmm_3, /* ymm7 */ 985 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 986 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 987 Register reg_c, /* edi */ 988 Register reg_d, /* esi */ 989 Register reg_e, /* r8d */ 990 Register reg_f, /* r9d */ 991 Register reg_g, /* r10d */ 992 Register reg_h, /* r11d */ 993 int iter); 994 995 void addm(int disp, Register r1, Register r2); 996 997 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 998 Register e, Register f, Register g, Register h, int iteration); 999 1000 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1001 Register a, Register b, Register c, Register d, Register e, Register f, 1002 Register g, Register h, int iteration); 1003 1004 void addmq(int disp, Register r1, Register r2); 1005 public: 1006 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1007 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1008 Register buf, Register state, Register ofs, Register limit, Register rsp, 1009 bool multi_block, XMMRegister shuf_mask); 1010 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1011 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1012 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1013 XMMRegister shuf_mask); 1014 #endif // _LP64 1015 1016 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1017 bool multi_block); 1018 1019 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1020 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1021 Register buf, Register state, Register ofs, Register limit, Register rsp, 1022 bool multi_block); 1023 1024 #ifdef _LP64 1025 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1026 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1027 Register buf, Register state, Register ofs, Register limit, Register rsp, 1028 bool multi_block, XMMRegister shuf_mask); 1029 #else 1030 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1031 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1032 Register buf, Register state, Register ofs, Register limit, Register rsp, 1033 bool multi_block); 1034 #endif 1035 1036 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1037 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1038 Register rax, Register rcx, Register rdx, Register tmp); 1039 1040 #ifndef _LP64 1041 private: 1042 // Initialized in macroAssembler_x86_constants.cpp 1043 static address ONES; 1044 static address L_2IL0FLOATPACKET_0; 1045 static address PI4_INV; 1046 static address PI4X3; 1047 static address PI4X4; 1048 1049 public: 1050 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1051 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1052 Register rax, Register rcx, Register rdx, Register tmp1); 1053 1054 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1055 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1056 Register rax, Register rcx, Register rdx, Register tmp); 1057 1058 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1059 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1060 Register rdx, Register tmp); 1061 1062 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1063 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1064 Register rax, Register rbx, Register rdx); 1065 1066 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1067 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1068 Register rax, Register rcx, Register rdx, Register tmp); 1069 1070 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1071 Register edx, Register ebx, Register esi, Register edi, 1072 Register ebp, Register esp); 1073 1074 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1075 Register esi, Register edi, Register ebp, Register esp); 1076 1077 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1078 Register edx, Register ebx, Register esi, Register edi, 1079 Register ebp, Register esp); 1080 1081 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1082 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1083 Register rax, Register rcx, Register rdx, Register tmp); 1084 #endif // !_LP64 1085 1086 private: 1087 1088 // these are private because users should be doing movflt/movdbl 1089 1090 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1091 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1092 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1093 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1094 1095 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1096 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1097 1098 public: 1099 1100 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1101 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1102 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1103 1104 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1105 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1106 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1107 1108 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1109 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1110 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1111 1112 using Assembler::vbroadcastsd; 1113 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1114 1115 using Assembler::vbroadcastss; 1116 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1117 1118 // Vector float blend 1119 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1120 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1121 1122 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1123 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1124 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1125 1126 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1127 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1128 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1129 1130 // Move Unaligned Double Quadword 1131 void movdqu(Address dst, XMMRegister src); 1132 void movdqu(XMMRegister dst, XMMRegister src); 1133 void movdqu(XMMRegister dst, Address src); 1134 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1135 1136 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1137 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1138 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1139 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1140 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1141 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1142 1143 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1144 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1145 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1146 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1147 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1148 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1149 1150 // Safe move operation, lowers down to 16bit moves for targets supporting 1151 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1152 void kmov(Address dst, KRegister src); 1153 void kmov(KRegister dst, Address src); 1154 void kmov(KRegister dst, KRegister src); 1155 void kmov(Register dst, KRegister src); 1156 void kmov(KRegister dst, Register src); 1157 1158 using Assembler::movddup; 1159 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1160 1161 using Assembler::vmovddup; 1162 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1163 1164 // AVX Unaligned forms 1165 void vmovdqu(Address dst, XMMRegister src); 1166 void vmovdqu(XMMRegister dst, Address src); 1167 void vmovdqu(XMMRegister dst, XMMRegister src); 1168 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1169 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1170 1171 // AVX512 Unaligned 1172 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1173 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1174 1175 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1176 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1177 1178 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1179 if (dst->encoding() != src->encoding() || mask != k0) { 1180 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1181 } 1182 } 1183 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1184 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1185 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1186 1187 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1188 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1189 1190 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1191 if (dst->encoding() != src->encoding() || mask != k0) { 1192 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1193 } 1194 } 1195 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1196 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1197 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1198 1199 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1200 if (dst->encoding() != src->encoding()) { 1201 Assembler::evmovdqul(dst, src, vector_len); 1202 } 1203 } 1204 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1205 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1206 1207 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1208 if (dst->encoding() != src->encoding() || mask != k0) { 1209 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1210 } 1211 } 1212 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1213 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1214 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1215 1216 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1217 if (dst->encoding() != src->encoding()) { 1218 Assembler::evmovdquq(dst, src, vector_len); 1219 } 1220 } 1221 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1222 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1223 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1224 1225 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1226 if (dst->encoding() != src->encoding() || mask != k0) { 1227 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1228 } 1229 } 1230 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1231 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1232 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1233 1234 // Move Aligned Double Quadword 1235 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1236 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1237 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1238 1239 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1240 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1241 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1242 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1243 1244 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1245 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1246 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1247 1248 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1249 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1250 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1251 1252 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1253 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1254 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1255 1256 // Carry-Less Multiplication Quadword 1257 void pclmulldq(XMMRegister dst, XMMRegister src) { 1258 // 0x00 - multiply lower 64 bits [0:63] 1259 Assembler::pclmulqdq(dst, src, 0x00); 1260 } 1261 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1262 // 0x11 - multiply upper 64 bits [64:127] 1263 Assembler::pclmulqdq(dst, src, 0x11); 1264 } 1265 1266 void pcmpeqb(XMMRegister dst, XMMRegister src); 1267 void pcmpeqw(XMMRegister dst, XMMRegister src); 1268 1269 void pcmpestri(XMMRegister dst, Address src, int imm8); 1270 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1271 1272 void pmovzxbw(XMMRegister dst, XMMRegister src); 1273 void pmovzxbw(XMMRegister dst, Address src); 1274 1275 void pmovmskb(Register dst, XMMRegister src); 1276 1277 void ptest(XMMRegister dst, XMMRegister src); 1278 1279 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1280 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1281 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1282 1283 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1284 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1285 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1286 1287 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1288 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1289 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1290 1291 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1292 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1293 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1294 1295 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1296 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1297 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1298 1299 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1300 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1301 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1302 1303 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1304 void xorpd(XMMRegister dst, XMMRegister src); 1305 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1306 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1307 1308 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1309 void xorps(XMMRegister dst, XMMRegister src); 1310 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1311 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1312 1313 // Shuffle Bytes 1314 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1315 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1316 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1317 // AVX 3-operands instructions 1318 1319 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1320 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1321 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1322 1323 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1324 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1325 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1326 1327 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1328 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1329 1330 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1331 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1332 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1333 1334 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1335 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1336 1337 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1338 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1339 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1340 1341 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1342 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1343 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1344 1345 using Assembler::vpbroadcastd; 1346 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1347 1348 using Assembler::vpbroadcastq; 1349 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1350 1351 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1352 1353 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1354 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1355 1356 // Vector compares 1357 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1358 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1359 } 1360 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1361 1362 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1363 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1364 } 1365 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1366 1367 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1368 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1369 } 1370 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1371 1372 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1373 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1374 } 1375 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1376 1377 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1378 1379 // Emit comparison instruction for the specified comparison predicate. 1380 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1381 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1382 1383 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1384 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1385 1386 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1387 1388 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1389 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1390 1391 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1392 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1393 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1394 1395 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1396 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1397 1398 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1399 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1400 1401 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1402 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1403 1404 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1405 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1406 1407 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1408 if (!is_varshift) { 1409 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1410 } else { 1411 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1412 } 1413 } 1414 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1415 if (!is_varshift) { 1416 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1417 } else { 1418 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1419 } 1420 } 1421 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1422 if (!is_varshift) { 1423 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1424 } else { 1425 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1426 } 1427 } 1428 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1429 if (!is_varshift) { 1430 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1431 } else { 1432 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1433 } 1434 } 1435 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1436 if (!is_varshift) { 1437 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1438 } else { 1439 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1440 } 1441 } 1442 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1443 if (!is_varshift) { 1444 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1445 } else { 1446 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1447 } 1448 } 1449 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1450 if (!is_varshift) { 1451 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1452 } else { 1453 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1454 } 1455 } 1456 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1457 if (!is_varshift) { 1458 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1459 } else { 1460 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1461 } 1462 } 1463 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1464 if (!is_varshift) { 1465 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1466 } else { 1467 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1468 } 1469 } 1470 1471 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1472 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1473 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1474 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1475 1476 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1477 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1478 1479 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1480 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1481 1482 void vptest(XMMRegister dst, XMMRegister src); 1483 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1484 1485 void punpcklbw(XMMRegister dst, XMMRegister src); 1486 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1487 1488 void pshufd(XMMRegister dst, Address src, int mode); 1489 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1490 1491 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1492 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1493 1494 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1495 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1496 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1497 1498 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1499 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1500 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1501 1502 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1503 1504 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1505 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1506 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1507 1508 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1509 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1510 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1511 1512 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1513 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1514 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1515 1516 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1517 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1518 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1519 1520 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1521 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1522 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1523 1524 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1525 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1526 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1527 1528 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1529 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1530 1531 // AVX Vector instructions 1532 1533 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1534 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1535 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1536 1537 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1538 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1539 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1540 1541 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1542 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1543 Assembler::vpxor(dst, nds, src, vector_len); 1544 else 1545 Assembler::vxorpd(dst, nds, src, vector_len); 1546 } 1547 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1548 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1549 Assembler::vpxor(dst, nds, src, vector_len); 1550 else 1551 Assembler::vxorpd(dst, nds, src, vector_len); 1552 } 1553 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1554 1555 // Simple version for AVX2 256bit vectors 1556 void vpxor(XMMRegister dst, XMMRegister src) { 1557 assert(UseAVX >= 2, "Should be at least AVX2"); 1558 Assembler::vpxor(dst, dst, src, AVX_256bit); 1559 } 1560 void vpxor(XMMRegister dst, Address src) { 1561 assert(UseAVX >= 2, "Should be at least AVX2"); 1562 Assembler::vpxor(dst, dst, src, AVX_256bit); 1563 } 1564 1565 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1566 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1567 1568 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1569 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1570 Assembler::vinserti32x4(dst, nds, src, imm8); 1571 } else if (UseAVX > 1) { 1572 // vinserti128 is available only in AVX2 1573 Assembler::vinserti128(dst, nds, src, imm8); 1574 } else { 1575 Assembler::vinsertf128(dst, nds, src, imm8); 1576 } 1577 } 1578 1579 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1580 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1581 Assembler::vinserti32x4(dst, nds, src, imm8); 1582 } else if (UseAVX > 1) { 1583 // vinserti128 is available only in AVX2 1584 Assembler::vinserti128(dst, nds, src, imm8); 1585 } else { 1586 Assembler::vinsertf128(dst, nds, src, imm8); 1587 } 1588 } 1589 1590 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1591 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1592 Assembler::vextracti32x4(dst, src, imm8); 1593 } else if (UseAVX > 1) { 1594 // vextracti128 is available only in AVX2 1595 Assembler::vextracti128(dst, src, imm8); 1596 } else { 1597 Assembler::vextractf128(dst, src, imm8); 1598 } 1599 } 1600 1601 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1602 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1603 Assembler::vextracti32x4(dst, src, imm8); 1604 } else if (UseAVX > 1) { 1605 // vextracti128 is available only in AVX2 1606 Assembler::vextracti128(dst, src, imm8); 1607 } else { 1608 Assembler::vextractf128(dst, src, imm8); 1609 } 1610 } 1611 1612 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1613 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1614 vinserti128(dst, dst, src, 1); 1615 } 1616 void vinserti128_high(XMMRegister dst, Address src) { 1617 vinserti128(dst, dst, src, 1); 1618 } 1619 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1620 vextracti128(dst, src, 1); 1621 } 1622 void vextracti128_high(Address dst, XMMRegister src) { 1623 vextracti128(dst, src, 1); 1624 } 1625 1626 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1627 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1628 Assembler::vinsertf32x4(dst, dst, src, 1); 1629 } else { 1630 Assembler::vinsertf128(dst, dst, src, 1); 1631 } 1632 } 1633 1634 void vinsertf128_high(XMMRegister dst, Address src) { 1635 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1636 Assembler::vinsertf32x4(dst, dst, src, 1); 1637 } else { 1638 Assembler::vinsertf128(dst, dst, src, 1); 1639 } 1640 } 1641 1642 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1643 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1644 Assembler::vextractf32x4(dst, src, 1); 1645 } else { 1646 Assembler::vextractf128(dst, src, 1); 1647 } 1648 } 1649 1650 void vextractf128_high(Address dst, XMMRegister src) { 1651 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1652 Assembler::vextractf32x4(dst, src, 1); 1653 } else { 1654 Assembler::vextractf128(dst, src, 1); 1655 } 1656 } 1657 1658 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1659 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1660 Assembler::vinserti64x4(dst, dst, src, 1); 1661 } 1662 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1663 Assembler::vinsertf64x4(dst, dst, src, 1); 1664 } 1665 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1666 Assembler::vextracti64x4(dst, src, 1); 1667 } 1668 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1669 Assembler::vextractf64x4(dst, src, 1); 1670 } 1671 void vextractf64x4_high(Address dst, XMMRegister src) { 1672 Assembler::vextractf64x4(dst, src, 1); 1673 } 1674 void vinsertf64x4_high(XMMRegister dst, Address src) { 1675 Assembler::vinsertf64x4(dst, dst, src, 1); 1676 } 1677 1678 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1679 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1680 vinserti128(dst, dst, src, 0); 1681 } 1682 void vinserti128_low(XMMRegister dst, Address src) { 1683 vinserti128(dst, dst, src, 0); 1684 } 1685 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1686 vextracti128(dst, src, 0); 1687 } 1688 void vextracti128_low(Address dst, XMMRegister src) { 1689 vextracti128(dst, src, 0); 1690 } 1691 1692 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1693 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1694 Assembler::vinsertf32x4(dst, dst, src, 0); 1695 } else { 1696 Assembler::vinsertf128(dst, dst, src, 0); 1697 } 1698 } 1699 1700 void vinsertf128_low(XMMRegister dst, Address src) { 1701 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1702 Assembler::vinsertf32x4(dst, dst, src, 0); 1703 } else { 1704 Assembler::vinsertf128(dst, dst, src, 0); 1705 } 1706 } 1707 1708 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1709 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1710 Assembler::vextractf32x4(dst, src, 0); 1711 } else { 1712 Assembler::vextractf128(dst, src, 0); 1713 } 1714 } 1715 1716 void vextractf128_low(Address dst, XMMRegister src) { 1717 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1718 Assembler::vextractf32x4(dst, src, 0); 1719 } else { 1720 Assembler::vextractf128(dst, src, 0); 1721 } 1722 } 1723 1724 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1725 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1726 Assembler::vinserti64x4(dst, dst, src, 0); 1727 } 1728 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1729 Assembler::vinsertf64x4(dst, dst, src, 0); 1730 } 1731 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1732 Assembler::vextracti64x4(dst, src, 0); 1733 } 1734 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1735 Assembler::vextractf64x4(dst, src, 0); 1736 } 1737 void vextractf64x4_low(Address dst, XMMRegister src) { 1738 Assembler::vextractf64x4(dst, src, 0); 1739 } 1740 void vinsertf64x4_low(XMMRegister dst, Address src) { 1741 Assembler::vinsertf64x4(dst, dst, src, 0); 1742 } 1743 1744 // Carry-Less Multiplication Quadword 1745 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1746 // 0x00 - multiply lower 64 bits [0:63] 1747 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1748 } 1749 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1750 // 0x11 - multiply upper 64 bits [64:127] 1751 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1752 } 1753 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1754 // 0x10 - multiply nds[0:63] and src[64:127] 1755 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1756 } 1757 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1758 //0x01 - multiply nds[64:127] and src[0:63] 1759 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1760 } 1761 1762 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1763 // 0x00 - multiply lower 64 bits [0:63] 1764 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1765 } 1766 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1767 // 0x11 - multiply upper 64 bits [64:127] 1768 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1769 } 1770 1771 // AVX-512 mask operations. 1772 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1773 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1774 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1775 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1776 void kortest(uint masklen, KRegister src1, KRegister src2); 1777 void ktest(uint masklen, KRegister src1, KRegister src2); 1778 1779 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1780 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1781 1782 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1783 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1784 1785 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1786 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1787 1788 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1789 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1790 1791 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1792 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1793 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1794 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1795 1796 using Assembler::evpandq; 1797 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1798 1799 using Assembler::evpaddq; 1800 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1801 1802 using Assembler::evporq; 1803 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1804 1805 using Assembler::vpshufb; 1806 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1807 1808 using Assembler::vpternlogq; 1809 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1810 1811 void cmov32( Condition cc, Register dst, Address src); 1812 void cmov32( Condition cc, Register dst, Register src); 1813 1814 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1815 1816 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1817 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1818 1819 void movoop(Register dst, jobject obj); 1820 void movoop(Address dst, jobject obj, Register rscratch); 1821 1822 void mov_metadata(Register dst, Metadata* obj); 1823 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1824 1825 void movptr(Register dst, Register src); 1826 void movptr(Register dst, Address src); 1827 void movptr(Register dst, AddressLiteral src); 1828 void movptr(Register dst, ArrayAddress src); 1829 void movptr(Register dst, intptr_t src); 1830 void movptr(Address dst, Register src); 1831 void movptr(Address dst, int32_t imm); 1832 void movptr(Address dst, intptr_t src, Register rscratch); 1833 void movptr(ArrayAddress dst, Register src, Register rscratch); 1834 1835 void movptr(Register dst, RegisterOrConstant src) { 1836 if (src.is_constant()) movptr(dst, src.as_constant()); 1837 else movptr(dst, src.as_register()); 1838 } 1839 1840 1841 // to avoid hiding movl 1842 void mov32(Register dst, AddressLiteral src); 1843 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 1844 1845 // Import other mov() methods from the parent class or else 1846 // they will be hidden by the following overriding declaration. 1847 using Assembler::movdl; 1848 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1849 1850 using Assembler::movq; 1851 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1852 1853 // Can push value or effective address 1854 void pushptr(AddressLiteral src, Register rscratch); 1855 1856 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1857 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1858 1859 void pushoop(jobject obj, Register rscratch); 1860 void pushklass(Metadata* obj, Register rscratch); 1861 1862 // sign extend as need a l to ptr sized element 1863 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1864 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1865 1866 1867 public: 1868 // clear memory of size 'cnt' qwords, starting at 'base'; 1869 // if 'is_large' is set, do not try to produce short loop 1870 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1871 1872 // clear memory initialization sequence for constant size; 1873 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1874 1875 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1876 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1877 1878 // Fill primitive arrays 1879 void generate_fill(BasicType t, bool aligned, 1880 Register to, Register value, Register count, 1881 Register rtmp, XMMRegister xtmp); 1882 1883 void encode_iso_array(Register src, Register dst, Register len, 1884 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1885 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1886 1887 #ifdef _LP64 1888 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1889 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1890 Register y, Register y_idx, Register z, 1891 Register carry, Register product, 1892 Register idx, Register kdx); 1893 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1894 Register yz_idx, Register idx, 1895 Register carry, Register product, int offset); 1896 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1897 Register carry, Register carry2, 1898 Register idx, Register jdx, 1899 Register yz_idx1, Register yz_idx2, 1900 Register tmp, Register tmp3, Register tmp4); 1901 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1902 Register yz_idx, Register idx, Register jdx, 1903 Register carry, Register product, 1904 Register carry2); 1905 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1906 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1907 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1908 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1909 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1910 Register tmp2); 1911 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1912 Register rdxReg, Register raxReg); 1913 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1914 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1915 Register tmp3, Register tmp4); 1916 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1917 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1918 1919 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1920 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1921 Register raxReg); 1922 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1923 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1924 Register raxReg); 1925 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1926 Register result, Register tmp1, Register tmp2, 1927 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1928 #endif 1929 1930 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1931 void update_byte_crc32(Register crc, Register val, Register table); 1932 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1933 1934 1935 #ifdef _LP64 1936 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1937 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1938 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1939 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1940 #endif // _LP64 1941 1942 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1943 // Note on a naming convention: 1944 // Prefix w = register only used on a Westmere+ architecture 1945 // Prefix n = register only used on a Nehalem architecture 1946 #ifdef _LP64 1947 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1948 Register tmp1, Register tmp2, Register tmp3); 1949 #else 1950 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1951 Register tmp1, Register tmp2, Register tmp3, 1952 XMMRegister xtmp1, XMMRegister xtmp2); 1953 #endif 1954 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1955 Register in_out, 1956 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1957 XMMRegister w_xtmp2, 1958 Register tmp1, 1959 Register n_tmp2, Register n_tmp3); 1960 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1961 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1962 Register tmp1, Register tmp2, 1963 Register n_tmp3); 1964 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1965 Register in_out1, Register in_out2, Register in_out3, 1966 Register tmp1, Register tmp2, Register tmp3, 1967 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1968 Register tmp4, Register tmp5, 1969 Register n_tmp6); 1970 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1971 Register tmp1, Register tmp2, Register tmp3, 1972 Register tmp4, Register tmp5, Register tmp6, 1973 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1974 bool is_pclmulqdq_supported); 1975 // Fold 128-bit data chunk 1976 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1977 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1978 #ifdef _LP64 1979 // Fold 512-bit data chunk 1980 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 1981 #endif // _LP64 1982 // Fold 8-bit data 1983 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1984 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1985 1986 // Compress char[] array to byte[]. 1987 void char_array_compress(Register src, Register dst, Register len, 1988 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1989 XMMRegister tmp4, Register tmp5, Register result, 1990 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 1991 1992 // Inflate byte[] array to char[]. 1993 void byte_array_inflate(Register src, Register dst, Register len, 1994 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 1995 1996 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 1997 Register length, Register temp, int vec_enc); 1998 1999 void fill64_masked(uint shift, Register dst, int disp, 2000 XMMRegister xmm, KRegister mask, Register length, 2001 Register temp, bool use64byteVector = false); 2002 2003 void fill32_masked(uint shift, Register dst, int disp, 2004 XMMRegister xmm, KRegister mask, Register length, 2005 Register temp); 2006 2007 void fill32(Address dst, XMMRegister xmm); 2008 2009 void fill32(Register dst, int disp, XMMRegister xmm); 2010 2011 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2012 2013 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2014 2015 #ifdef _LP64 2016 void convert_f2i(Register dst, XMMRegister src); 2017 void convert_d2i(Register dst, XMMRegister src); 2018 void convert_f2l(Register dst, XMMRegister src); 2019 void convert_d2l(Register dst, XMMRegister src); 2020 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2021 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2022 2023 void cache_wb(Address line); 2024 void cache_wbsync(bool is_pre); 2025 2026 #ifdef COMPILER2_OR_JVMCI 2027 void generate_fill_avx3(BasicType type, Register to, Register value, 2028 Register count, Register rtmp, XMMRegister xtmp); 2029 #endif // COMPILER2_OR_JVMCI 2030 #endif // _LP64 2031 2032 void vallones(XMMRegister dst, int vector_len); 2033 2034 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2035 2036 void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2037 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2038 }; 2039 2040 /** 2041 * class SkipIfEqual: 2042 * 2043 * Instantiating this class will result in assembly code being output that will 2044 * jump around any code emitted between the creation of the instance and it's 2045 * automatic destruction at the end of a scope block, depending on the value of 2046 * the flag passed to the constructor, which will be checked at run-time. 2047 */ 2048 class SkipIfEqual { 2049 private: 2050 MacroAssembler* _masm; 2051 Label _label; 2052 2053 public: 2054 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); 2055 ~SkipIfEqual(); 2056 }; 2057 2058 #endif // CPU_X86_MACROASSEMBLER_X86_HPP