1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/rtmLocking.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 // MacroAssembler extends Assembler by frequently used macros. 38 // 39 // Instructions for which a 'better' code sequence exists depending 40 // on arguments should also go in here. 41 42 class MacroAssembler: public Assembler { 43 friend class LIR_Assembler; 44 friend class Runtime1; // as_Address() 45 46 public: 47 // Support for VM calls 48 // 49 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 50 // may customize this version by overriding it for its purposes (e.g., to save/restore 51 // additional registers when doing a VM call). 52 53 virtual void call_VM_leaf_base( 54 address entry_point, // the entry point 55 int number_of_arguments // the number of arguments to pop after the call 56 ); 57 58 protected: 59 // This is the base routine called by the different versions of call_VM. The interpreter 60 // may customize this version by overriding it for its purposes (e.g., to save/restore 61 // additional registers when doing a VM call). 62 // 63 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 64 // returns the register which contains the thread upon return. If a thread register has been 65 // specified, the return value will correspond to that register. If no last_java_sp is specified 66 // (noreg) than rsp will be used instead. 67 virtual void call_VM_base( // returns the register containing the thread upon return 68 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 69 Register java_thread, // the thread if computed before ; use noreg otherwise 70 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 71 address entry_point, // the entry point 72 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 73 bool check_exceptions // whether to check for pending exceptions after return 74 ); 75 76 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 77 78 // helpers for FPU flag access 79 // tmp is a temporary register, if none is available use noreg 80 void save_rax (Register tmp); 81 void restore_rax(Register tmp); 82 83 public: 84 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 85 86 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 87 // The implementation is only non-empty for the InterpreterMacroAssembler, 88 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 89 virtual void check_and_handle_popframe(Register java_thread); 90 virtual void check_and_handle_earlyret(Register java_thread); 91 92 Address as_Address(AddressLiteral adr); 93 Address as_Address(ArrayAddress adr, Register rscratch); 94 95 // Support for null-checks 96 // 97 // Generates code that causes a null OS exception if the content of reg is null. 98 // If the accessed location is M[reg + offset] and the offset is known, provide the 99 // offset. No explicit code generation is needed if the offset is within a certain 100 // range (0 <= offset <= page_size). 101 102 void null_check(Register reg, int offset = -1); 103 static bool needs_explicit_null_check(intptr_t offset); 104 static bool uses_implicit_null_check(void* address); 105 106 // Required platform-specific helpers for Label::patch_instructions. 107 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 108 void pd_patch_instruction(address branch, address target, const char* file, int line) { 109 unsigned char op = branch[0]; 110 assert(op == 0xE8 /* call */ || 111 op == 0xE9 /* jmp */ || 112 op == 0xEB /* short jmp */ || 113 (op & 0xF0) == 0x70 /* short jcc */ || 114 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 115 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 116 "Invalid opcode at patch point"); 117 118 if (op == 0xEB || (op & 0xF0) == 0x70) { 119 // short offset operators (jmp and jcc) 120 char* disp = (char*) &branch[1]; 121 int imm8 = checked_cast<int>(target - (address) &disp[1]); 122 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 123 file == nullptr ? "<null>" : file, line); 124 *disp = (char)imm8; 125 } else { 126 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 127 int imm32 = checked_cast<int>(target - (address) &disp[1]); 128 *disp = imm32; 129 } 130 } 131 132 // The following 4 methods return the offset of the appropriate move instruction 133 134 // Support for fast byte/short loading with zero extension (depending on particular CPU) 135 int load_unsigned_byte(Register dst, Address src); 136 int load_unsigned_short(Register dst, Address src); 137 138 // Support for fast byte/short loading with sign extension (depending on particular CPU) 139 int load_signed_byte(Register dst, Address src); 140 int load_signed_short(Register dst, Address src); 141 142 // Support for sign-extension (hi:lo = extend_sign(lo)) 143 void extend_sign(Register hi, Register lo); 144 145 // Load and store values by size and signed-ness 146 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 147 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 148 149 // Support for inc/dec with optimal instruction selection depending on value 150 151 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 152 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 153 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 154 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 155 156 void decrementl(Address dst, int value = 1); 157 void decrementl(Register reg, int value = 1); 158 159 void decrementq(Register reg, int value = 1); 160 void decrementq(Address dst, int value = 1); 161 162 void incrementl(Address dst, int value = 1); 163 void incrementl(Register reg, int value = 1); 164 165 void incrementq(Register reg, int value = 1); 166 void incrementq(Address dst, int value = 1); 167 168 void incrementl(AddressLiteral dst, Register rscratch = noreg); 169 void incrementl(ArrayAddress dst, Register rscratch); 170 171 void incrementq(AddressLiteral dst, Register rscratch = noreg); 172 173 // Support optimal SSE move instructions. 174 void movflt(XMMRegister dst, XMMRegister src) { 175 if (dst-> encoding() == src->encoding()) return; 176 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 177 else { movss (dst, src); return; } 178 } 179 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 180 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 181 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 182 183 // Move with zero extension 184 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 185 186 void movdbl(XMMRegister dst, XMMRegister src) { 187 if (dst-> encoding() == src->encoding()) return; 188 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 189 else { movsd (dst, src); return; } 190 } 191 192 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 193 194 void movdbl(XMMRegister dst, Address src) { 195 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 196 else { movlpd(dst, src); return; } 197 } 198 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 199 200 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 201 // Use separate tmp XMM register because caller may 202 // requires src XMM register to be unchanged (as in x86.ad). 203 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 204 movdl(dst, tmp); 205 movswl(dst, dst); 206 } 207 208 void flt16_to_flt(XMMRegister dst, Register src) { 209 movdl(dst, src); 210 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 211 } 212 213 // Alignment 214 void align32(); 215 void align64(); 216 void align(int modulus); 217 void align(int modulus, int target); 218 219 void post_call_nop(); 220 // A 5 byte nop that is safe for patching (see patch_verified_entry) 221 void fat_nop(); 222 223 // Stack frame creation/removal 224 void enter(); 225 void leave(); 226 227 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 228 // The pointer will be loaded into the thread register. 229 void get_thread(Register thread); 230 231 #ifdef _LP64 232 // Support for argument shuffling 233 234 // bias in bytes 235 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 236 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 237 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 238 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 239 void move_ptr(VMRegPair src, VMRegPair dst); 240 void object_move(OopMap* map, 241 int oop_handle_offset, 242 int framesize_in_slots, 243 VMRegPair src, 244 VMRegPair dst, 245 bool is_receiver, 246 int* receiver_offset); 247 #endif // _LP64 248 249 // Support for VM calls 250 // 251 // It is imperative that all calls into the VM are handled via the call_VM macros. 252 // They make sure that the stack linkage is setup correctly. call_VM's correspond 253 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 254 255 256 void call_VM(Register oop_result, 257 address entry_point, 258 bool check_exceptions = true); 259 void call_VM(Register oop_result, 260 address entry_point, 261 Register arg_1, 262 bool check_exceptions = true); 263 void call_VM(Register oop_result, 264 address entry_point, 265 Register arg_1, Register arg_2, 266 bool check_exceptions = true); 267 void call_VM(Register oop_result, 268 address entry_point, 269 Register arg_1, Register arg_2, Register arg_3, 270 bool check_exceptions = true); 271 272 // Overloadings with last_Java_sp 273 void call_VM(Register oop_result, 274 Register last_java_sp, 275 address entry_point, 276 int number_of_arguments = 0, 277 bool check_exceptions = true); 278 void call_VM(Register oop_result, 279 Register last_java_sp, 280 address entry_point, 281 Register arg_1, bool 282 check_exceptions = true); 283 void call_VM(Register oop_result, 284 Register last_java_sp, 285 address entry_point, 286 Register arg_1, Register arg_2, 287 bool check_exceptions = true); 288 void call_VM(Register oop_result, 289 Register last_java_sp, 290 address entry_point, 291 Register arg_1, Register arg_2, Register arg_3, 292 bool check_exceptions = true); 293 294 void get_vm_result (Register oop_result, Register thread); 295 void get_vm_result_2(Register metadata_result, Register thread); 296 297 // These always tightly bind to MacroAssembler::call_VM_base 298 // bypassing the virtual implementation 299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 303 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 304 305 void call_VM_leaf0(address entry_point); 306 void call_VM_leaf(address entry_point, 307 int number_of_arguments = 0); 308 void call_VM_leaf(address entry_point, 309 Register arg_1); 310 void call_VM_leaf(address entry_point, 311 Register arg_1, Register arg_2); 312 void call_VM_leaf(address entry_point, 313 Register arg_1, Register arg_2, Register arg_3); 314 315 void call_VM_leaf(address entry_point, 316 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 317 318 // These always tightly bind to MacroAssembler::call_VM_leaf_base 319 // bypassing the virtual implementation 320 void super_call_VM_leaf(address entry_point); 321 void super_call_VM_leaf(address entry_point, Register arg_1); 322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 324 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 325 326 // last Java Frame (fills frame anchor) 327 void set_last_Java_frame(Register thread, 328 Register last_java_sp, 329 Register last_java_fp, 330 address last_java_pc, 331 Register rscratch); 332 333 // thread in the default location (r15_thread on 64bit) 334 void set_last_Java_frame(Register last_java_sp, 335 Register last_java_fp, 336 address last_java_pc, 337 Register rscratch); 338 339 void reset_last_Java_frame(Register thread, bool clear_fp); 340 341 // thread in the default location (r15_thread on 64bit) 342 void reset_last_Java_frame(bool clear_fp); 343 344 // jobjects 345 void clear_jobject_tag(Register possibly_non_local); 346 void resolve_jobject(Register value, Register thread, Register tmp); 347 void resolve_global_jobject(Register value, Register thread, Register tmp); 348 349 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 350 void c2bool(Register x); 351 352 // C++ bool manipulation 353 354 void movbool(Register dst, Address src); 355 void movbool(Address dst, bool boolconst); 356 void movbool(Address dst, Register src); 357 void testbool(Register dst); 358 359 void resolve_oop_handle(Register result, Register tmp); 360 void resolve_weak_handle(Register result, Register tmp); 361 void load_mirror(Register mirror, Register method, Register tmp); 362 void load_method_holder_cld(Register rresult, Register rmethod); 363 364 void load_method_holder(Register holder, Register method); 365 366 // oop manipulations 367 void load_klass(Register dst, Register src, Register tmp); 368 void store_klass(Register dst, Register src, Register tmp); 369 370 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 371 Register tmp1, Register thread_tmp); 372 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 373 Register tmp1, Register tmp2, Register tmp3); 374 375 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 376 Register thread_tmp = noreg, DecoratorSet decorators = 0); 377 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 378 Register thread_tmp = noreg, DecoratorSet decorators = 0); 379 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 380 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 381 382 // Used for storing null. All other oop constants should be 383 // stored using routines that take a jobject. 384 void store_heap_oop_null(Address dst); 385 386 #ifdef _LP64 387 void store_klass_gap(Register dst, Register src); 388 389 // This dummy is to prevent a call to store_heap_oop from 390 // converting a zero (like null) into a Register by giving 391 // the compiler two choices it can't resolve 392 393 void store_heap_oop(Address dst, void* dummy); 394 395 void encode_heap_oop(Register r); 396 void decode_heap_oop(Register r); 397 void encode_heap_oop_not_null(Register r); 398 void decode_heap_oop_not_null(Register r); 399 void encode_heap_oop_not_null(Register dst, Register src); 400 void decode_heap_oop_not_null(Register dst, Register src); 401 402 void set_narrow_oop(Register dst, jobject obj); 403 void set_narrow_oop(Address dst, jobject obj); 404 void cmp_narrow_oop(Register dst, jobject obj); 405 void cmp_narrow_oop(Address dst, jobject obj); 406 407 void encode_klass_not_null(Register r, Register tmp); 408 void decode_klass_not_null(Register r, Register tmp); 409 void encode_and_move_klass_not_null(Register dst, Register src); 410 void decode_and_move_klass_not_null(Register dst, Register src); 411 void set_narrow_klass(Register dst, Klass* k); 412 void set_narrow_klass(Address dst, Klass* k); 413 void cmp_narrow_klass(Register dst, Klass* k); 414 void cmp_narrow_klass(Address dst, Klass* k); 415 416 // if heap base register is used - reinit it with the correct value 417 void reinit_heapbase(); 418 419 DEBUG_ONLY(void verify_heapbase(const char* msg);) 420 421 #endif // _LP64 422 423 // Int division/remainder for Java 424 // (as idivl, but checks for special case as described in JVM spec.) 425 // returns idivl instruction offset for implicit exception handling 426 int corrected_idivl(Register reg); 427 428 // Long division/remainder for Java 429 // (as idivq, but checks for special case as described in JVM spec.) 430 // returns idivq instruction offset for implicit exception handling 431 int corrected_idivq(Register reg); 432 433 void int3(); 434 435 // Long operation macros for a 32bit cpu 436 // Long negation for Java 437 void lneg(Register hi, Register lo); 438 439 // Long multiplication for Java 440 // (destroys contents of eax, ebx, ecx and edx) 441 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 442 443 // Long shifts for Java 444 // (semantics as described in JVM spec.) 445 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 446 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 447 448 // Long compare for Java 449 // (semantics as described in JVM spec.) 450 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 451 452 453 // misc 454 455 // Sign extension 456 void sign_extend_short(Register reg); 457 void sign_extend_byte(Register reg); 458 459 // Division by power of 2, rounding towards 0 460 void division_with_shift(Register reg, int shift_value); 461 462 #ifndef _LP64 463 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 464 // 465 // CF (corresponds to C0) if x < y 466 // PF (corresponds to C2) if unordered 467 // ZF (corresponds to C3) if x = y 468 // 469 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 470 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 471 void fcmp(Register tmp); 472 // Variant of the above which allows y to be further down the stack 473 // and which only pops x and y if specified. If pop_right is 474 // specified then pop_left must also be specified. 475 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 476 477 // Floating-point comparison for Java 478 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 479 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 480 // (semantics as described in JVM spec.) 481 void fcmp2int(Register dst, bool unordered_is_less); 482 // Variant of the above which allows y to be further down the stack 483 // and which only pops x and y if specified. If pop_right is 484 // specified then pop_left must also be specified. 485 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 486 487 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 488 // tmp is a temporary register, if none is available use noreg 489 void fremr(Register tmp); 490 491 // only if +VerifyFPU 492 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 493 #endif // !LP64 494 495 // dst = c = a * b + c 496 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 497 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 498 499 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 500 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 501 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 502 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 503 504 505 // same as fcmp2int, but using SSE2 506 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 507 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 508 509 // branch to L if FPU flag C2 is set/not set 510 // tmp is a temporary register, if none is available use noreg 511 void jC2 (Register tmp, Label& L); 512 void jnC2(Register tmp, Label& L); 513 514 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 515 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 516 void load_float(Address src); 517 518 // Store float value to 'address'. If UseSSE >= 1, the value is stored 519 // from register xmm0. Otherwise, the value is stored from the FPU stack. 520 void store_float(Address dst); 521 522 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 523 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 524 void load_double(Address src); 525 526 // Store double value to 'address'. If UseSSE >= 2, the value is stored 527 // from register xmm0. Otherwise, the value is stored from the FPU stack. 528 void store_double(Address dst); 529 530 #ifndef _LP64 531 // Pop ST (ffree & fincstp combined) 532 void fpop(); 533 534 void empty_FPU_stack(); 535 #endif // !_LP64 536 537 void push_IU_state(); 538 void pop_IU_state(); 539 540 void push_FPU_state(); 541 void pop_FPU_state(); 542 543 void push_CPU_state(); 544 void pop_CPU_state(); 545 546 void push_cont_fastpath(); 547 void pop_cont_fastpath(); 548 549 void inc_held_monitor_count(); 550 void dec_held_monitor_count(); 551 552 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 553 554 // Round up to a power of two 555 void round_to(Register reg, int modulus); 556 557 private: 558 // General purpose and XMM registers potentially clobbered by native code; there 559 // is no need for FPU or AVX opmask related methods because C1/interpreter 560 // - we save/restore FPU state as a whole always 561 // - do not care about AVX-512 opmask 562 static RegSet call_clobbered_gp_registers(); 563 static XMMRegSet call_clobbered_xmm_registers(); 564 565 void push_set(XMMRegSet set, int offset); 566 void pop_set(XMMRegSet set, int offset); 567 568 public: 569 void push_set(RegSet set, int offset = -1); 570 void pop_set(RegSet set, int offset = -1); 571 572 // Push and pop everything that might be clobbered by a native 573 // runtime call. 574 // Only save the lower 64 bits of each vector register. 575 // Additional registers can be excluded in a passed RegSet. 576 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 577 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 578 579 void push_call_clobbered_registers(bool save_fpu = true) { 580 push_call_clobbered_registers_except(RegSet(), save_fpu); 581 } 582 void pop_call_clobbered_registers(bool restore_fpu = true) { 583 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 584 } 585 586 // allocation 587 void tlab_allocate( 588 Register thread, // Current thread 589 Register obj, // result: pointer to object after successful allocation 590 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 591 int con_size_in_bytes, // object size in bytes if known at compile time 592 Register t1, // temp register 593 Register t2, // temp register 594 Label& slow_case // continuation point if fast allocation fails 595 ); 596 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 597 598 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 599 600 // interface method calling 601 void lookup_interface_method(Register recv_klass, 602 Register intf_klass, 603 RegisterOrConstant itable_index, 604 Register method_result, 605 Register scan_temp, 606 Label& no_such_interface, 607 bool return_method = true); 608 609 void lookup_interface_method_stub(Register recv_klass, 610 Register holder_klass, 611 Register resolved_klass, 612 Register method_result, 613 Register scan_temp, 614 Register temp_reg2, 615 Register receiver, 616 int itable_index, 617 Label& L_no_such_interface); 618 619 // virtual method calling 620 void lookup_virtual_method(Register recv_klass, 621 RegisterOrConstant vtable_index, 622 Register method_result); 623 624 // Test sub_klass against super_klass, with fast and slow paths. 625 626 // The fast path produces a tri-state answer: yes / no / maybe-slow. 627 // One of the three labels can be null, meaning take the fall-through. 628 // If super_check_offset is -1, the value is loaded up from super_klass. 629 // No registers are killed, except temp_reg. 630 void check_klass_subtype_fast_path(Register sub_klass, 631 Register super_klass, 632 Register temp_reg, 633 Label* L_success, 634 Label* L_failure, 635 Label* L_slow_path, 636 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 637 638 // The rest of the type check; must be wired to a corresponding fast path. 639 // It does not repeat the fast path logic, so don't use it standalone. 640 // The temp_reg and temp2_reg can be noreg, if no temps are available. 641 // Updates the sub's secondary super cache as necessary. 642 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 643 void check_klass_subtype_slow_path(Register sub_klass, 644 Register super_klass, 645 Register temp_reg, 646 Register temp2_reg, 647 Label* L_success, 648 Label* L_failure, 649 bool set_cond_codes = false); 650 void hashed_check_klass_subtype_slow_path(Register sub_klass, 651 Register super_klass, 652 Register temp_reg, 653 Register temp2_reg, 654 Label* L_success, 655 Label* L_failure, 656 bool set_cond_codes = false); 657 658 // As above, but with a constant super_klass. 659 // The result is in Register result, not the condition codes. 660 void lookup_secondary_supers_table(Register sub_klass, 661 Register super_klass, 662 Register temp1, 663 Register temp2, 664 Register temp3, 665 Register temp4, 666 Register result, 667 u1 super_klass_slot); 668 669 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 670 Register r_array_base, 671 Register r_array_index, 672 Register r_bitmap, 673 Register temp1, 674 Register temp2, 675 Label* L_success, 676 Label* L_failure = nullptr); 677 678 void verify_secondary_supers_table(Register r_sub_klass, 679 Register r_super_klass, 680 Register expected, 681 Register temp1, 682 Register temp2, 683 Register temp3); 684 685 void repne_scanq(Register addr, Register value, Register count, Register limit, 686 Label* L_success, 687 Label* L_failure = nullptr); 688 689 // Simplified, combined version, good for typical uses. 690 // Falls through on failure. 691 void check_klass_subtype(Register sub_klass, 692 Register super_klass, 693 Register temp_reg, 694 Label& L_success); 695 696 void clinit_barrier(Register klass, 697 Register thread, 698 Label* L_fast_path = nullptr, 699 Label* L_slow_path = nullptr); 700 701 // method handles (JSR 292) 702 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 703 704 // Debugging 705 706 // only if +VerifyOops 707 void _verify_oop(Register reg, const char* s, const char* file, int line); 708 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 709 710 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 711 if (VerifyOops) { 712 _verify_oop(reg, s, file, line); 713 } 714 } 715 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 716 if (VerifyOops) { 717 _verify_oop_addr(reg, s, file, line); 718 } 719 } 720 721 // TODO: verify method and klass metadata (compare against vptr?) 722 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 723 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 724 725 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 726 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 727 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 728 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 729 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 730 731 // Verify or restore cpu control state after JNI call 732 void restore_cpu_control_state_after_jni(Register rscratch); 733 734 // prints msg, dumps registers and stops execution 735 void stop(const char* msg); 736 737 // prints msg and continues 738 void warn(const char* msg); 739 740 // dumps registers and other state 741 void print_state(); 742 743 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 744 static void debug64(char* msg, int64_t pc, int64_t regs[]); 745 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 746 static void print_state64(int64_t pc, int64_t regs[]); 747 748 void os_breakpoint(); 749 750 void untested() { stop("untested"); } 751 752 void unimplemented(const char* what = ""); 753 754 void should_not_reach_here() { stop("should not reach here"); } 755 756 void print_CPU_state(); 757 758 // Stack overflow checking 759 void bang_stack_with_offset(int offset) { 760 // stack grows down, caller passes positive offset 761 assert(offset > 0, "must bang with negative offset"); 762 movl(Address(rsp, (-offset)), rax); 763 } 764 765 // Writes to stack successive pages until offset reached to check for 766 // stack overflow + shadow pages. Also, clobbers tmp 767 void bang_stack_size(Register size, Register tmp); 768 769 // Check for reserved stack access in method being exited (for JIT) 770 void reserved_stack_check(); 771 772 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 773 774 void verify_tlab(); 775 776 static Condition negate_condition(Condition cond); 777 778 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 779 // operands. In general the names are modified to avoid hiding the instruction in Assembler 780 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 781 // here in MacroAssembler. The major exception to this rule is call 782 783 // Arithmetics 784 785 786 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 787 void addptr(Address dst, Register src); 788 789 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 790 void addptr(Register dst, int32_t src); 791 void addptr(Register dst, Register src); 792 void addptr(Register dst, RegisterOrConstant src) { 793 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 794 else addptr(dst, src.as_register()); 795 } 796 797 void andptr(Register dst, int32_t src); 798 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 799 800 #ifdef _LP64 801 using Assembler::andq; 802 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 803 #endif 804 805 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 806 807 // renamed to drag out the casting of address to int32_t/intptr_t 808 void cmp32(Register src1, int32_t imm); 809 810 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 811 // compare reg - mem, or reg - &mem 812 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 813 814 void cmp32(Register src1, Address src2); 815 816 #ifndef _LP64 817 void cmpklass(Address dst, Metadata* obj); 818 void cmpklass(Register dst, Metadata* obj); 819 void cmpoop(Address dst, jobject obj); 820 #endif // _LP64 821 822 void cmpoop(Register src1, Register src2); 823 void cmpoop(Register src1, Address src2); 824 void cmpoop(Register dst, jobject obj, Register rscratch); 825 826 // NOTE src2 must be the lval. This is NOT an mem-mem compare 827 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 828 829 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 830 831 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 832 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 833 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 834 835 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 836 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 837 838 // cmp64 to avoild hiding cmpq 839 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 840 841 void cmpxchgptr(Register reg, Address adr); 842 843 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 844 845 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 846 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 847 848 849 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 850 851 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 852 853 void shlptr(Register dst, int32_t shift); 854 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 855 856 void shrptr(Register dst, int32_t shift); 857 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 858 859 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 860 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 861 862 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 863 864 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 865 void subptr(Register dst, int32_t src); 866 // Force generation of a 4 byte immediate value even if it fits into 8bit 867 void subptr_imm32(Register dst, int32_t src); 868 void subptr(Register dst, Register src); 869 void subptr(Register dst, RegisterOrConstant src) { 870 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 871 else subptr(dst, src.as_register()); 872 } 873 874 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 875 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 876 877 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 878 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 879 880 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 881 882 883 884 // Helper functions for statistics gathering. 885 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 886 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 887 // Unconditional atomic increment. 888 void atomic_incl(Address counter_addr); 889 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 890 #ifdef _LP64 891 void atomic_incq(Address counter_addr); 892 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 893 #endif 894 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 895 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 896 897 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 898 void lea(Register dst, AddressLiteral adr); 899 void lea(Address dst, AddressLiteral adr, Register rscratch); 900 901 void leal32(Register dst, Address src) { leal(dst, src); } 902 903 // Import other testl() methods from the parent class or else 904 // they will be hidden by the following overriding declaration. 905 using Assembler::testl; 906 void testl(Address dst, int32_t imm32); 907 void testl(Register dst, int32_t imm32); 908 void testl(Register dst, AddressLiteral src); // requires reachable address 909 using Assembler::testq; 910 void testq(Address dst, int32_t imm32); 911 void testq(Register dst, int32_t imm32); 912 913 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 914 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 915 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 916 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 917 918 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 919 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 920 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 921 void testptr(Register src1, Register src2); 922 923 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 924 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 925 926 // Calls 927 928 void call(Label& L, relocInfo::relocType rtype); 929 void call(Register entry); 930 void call(Address addr) { Assembler::call(addr); } 931 932 // NOTE: this call transfers to the effective address of entry NOT 933 // the address contained by entry. This is because this is more natural 934 // for jumps/calls. 935 void call(AddressLiteral entry, Register rscratch = rax); 936 937 // Emit the CompiledIC call idiom 938 void ic_call(address entry, jint method_index = 0); 939 static int ic_check_size(); 940 int ic_check(int end_alignment); 941 942 void emit_static_call_stub(); 943 944 // Jumps 945 946 // NOTE: these jumps transfer to the effective address of dst NOT 947 // the address contained by dst. This is because this is more natural 948 // for jumps/calls. 949 void jump(AddressLiteral dst, Register rscratch = noreg); 950 951 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 952 953 // 32bit can do a case table jump in one instruction but we no longer allow the base 954 // to be installed in the Address class. This jump will transfer to the address 955 // contained in the location described by entry (not the address of entry) 956 void jump(ArrayAddress entry, Register rscratch); 957 958 // Floating 959 960 void push_f(XMMRegister r); 961 void pop_f(XMMRegister r); 962 void push_d(XMMRegister r); 963 void pop_d(XMMRegister r); 964 965 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 966 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 967 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 968 969 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 970 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 971 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 972 973 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 974 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 975 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 976 977 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 978 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 979 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 980 981 #ifndef _LP64 982 void fadd_s(Address src) { Assembler::fadd_s(src); } 983 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 984 985 void fldcw(Address src) { Assembler::fldcw(src); } 986 void fldcw(AddressLiteral src); 987 988 void fld_s(int index) { Assembler::fld_s(index); } 989 void fld_s(Address src) { Assembler::fld_s(src); } 990 void fld_s(AddressLiteral src); 991 992 void fld_d(Address src) { Assembler::fld_d(src); } 993 void fld_d(AddressLiteral src); 994 995 void fld_x(Address src) { Assembler::fld_x(src); } 996 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 997 998 void fmul_s(Address src) { Assembler::fmul_s(src); } 999 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1000 #endif // !_LP64 1001 1002 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1003 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1004 1005 #ifdef _LP64 1006 private: 1007 void sha256_AVX2_one_round_compute( 1008 Register reg_old_h, 1009 Register reg_a, 1010 Register reg_b, 1011 Register reg_c, 1012 Register reg_d, 1013 Register reg_e, 1014 Register reg_f, 1015 Register reg_g, 1016 Register reg_h, 1017 int iter); 1018 void sha256_AVX2_four_rounds_compute_first(int start); 1019 void sha256_AVX2_four_rounds_compute_last(int start); 1020 void sha256_AVX2_one_round_and_sched( 1021 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1022 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1023 XMMRegister xmm_2, /* ymm6 */ 1024 XMMRegister xmm_3, /* ymm7 */ 1025 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1026 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1027 Register reg_c, /* edi */ 1028 Register reg_d, /* esi */ 1029 Register reg_e, /* r8d */ 1030 Register reg_f, /* r9d */ 1031 Register reg_g, /* r10d */ 1032 Register reg_h, /* r11d */ 1033 int iter); 1034 1035 void addm(int disp, Register r1, Register r2); 1036 1037 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1038 Register e, Register f, Register g, Register h, int iteration); 1039 1040 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1041 Register a, Register b, Register c, Register d, Register e, Register f, 1042 Register g, Register h, int iteration); 1043 1044 void addmq(int disp, Register r1, Register r2); 1045 public: 1046 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1047 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1048 Register buf, Register state, Register ofs, Register limit, Register rsp, 1049 bool multi_block, XMMRegister shuf_mask); 1050 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1051 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1052 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1053 XMMRegister shuf_mask); 1054 #endif // _LP64 1055 1056 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1057 bool multi_block); 1058 1059 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1060 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1061 Register buf, Register state, Register ofs, Register limit, Register rsp, 1062 bool multi_block); 1063 1064 #ifdef _LP64 1065 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1066 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1067 Register buf, Register state, Register ofs, Register limit, Register rsp, 1068 bool multi_block, XMMRegister shuf_mask); 1069 #else 1070 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1071 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1072 Register buf, Register state, Register ofs, Register limit, Register rsp, 1073 bool multi_block); 1074 #endif 1075 1076 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1077 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1078 Register rax, Register rcx, Register rdx, Register tmp); 1079 1080 #ifndef _LP64 1081 private: 1082 // Initialized in macroAssembler_x86_constants.cpp 1083 static address ONES; 1084 static address L_2IL0FLOATPACKET_0; 1085 static address PI4_INV; 1086 static address PI4X3; 1087 static address PI4X4; 1088 1089 public: 1090 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1091 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1092 Register rax, Register rcx, Register rdx, Register tmp1); 1093 1094 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1095 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1096 Register rax, Register rcx, Register rdx, Register tmp); 1097 1098 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1099 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1100 Register rdx, Register tmp); 1101 1102 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1103 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1104 Register rax, Register rbx, Register rdx); 1105 1106 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1107 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1108 Register rax, Register rcx, Register rdx, Register tmp); 1109 1110 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1111 Register edx, Register ebx, Register esi, Register edi, 1112 Register ebp, Register esp); 1113 1114 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1115 Register esi, Register edi, Register ebp, Register esp); 1116 1117 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1118 Register edx, Register ebx, Register esi, Register edi, 1119 Register ebp, Register esp); 1120 1121 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1122 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1123 Register rax, Register rcx, Register rdx, Register tmp); 1124 #endif // !_LP64 1125 1126 private: 1127 1128 // these are private because users should be doing movflt/movdbl 1129 1130 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1131 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1132 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1133 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1134 1135 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1136 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1137 1138 public: 1139 1140 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1141 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1142 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1143 1144 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1145 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1146 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1147 1148 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1149 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1150 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1151 1152 using Assembler::vbroadcastsd; 1153 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1154 1155 using Assembler::vbroadcastss; 1156 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1157 1158 // Vector float blend 1159 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1160 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1161 1162 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1163 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1164 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1165 1166 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1167 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1168 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1169 1170 // Move Unaligned Double Quadword 1171 void movdqu(Address dst, XMMRegister src); 1172 void movdqu(XMMRegister dst, XMMRegister src); 1173 void movdqu(XMMRegister dst, Address src); 1174 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1175 1176 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1177 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1178 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1179 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1180 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1181 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1182 1183 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1184 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1185 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1186 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1187 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1188 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1189 1190 // Safe move operation, lowers down to 16bit moves for targets supporting 1191 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1192 void kmov(Address dst, KRegister src); 1193 void kmov(KRegister dst, Address src); 1194 void kmov(KRegister dst, KRegister src); 1195 void kmov(Register dst, KRegister src); 1196 void kmov(KRegister dst, Register src); 1197 1198 using Assembler::movddup; 1199 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1200 1201 using Assembler::vmovddup; 1202 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1203 1204 // AVX Unaligned forms 1205 void vmovdqu(Address dst, XMMRegister src); 1206 void vmovdqu(XMMRegister dst, Address src); 1207 void vmovdqu(XMMRegister dst, XMMRegister src); 1208 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1209 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1210 1211 // AVX512 Unaligned 1212 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1213 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1214 1215 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1216 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1217 1218 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1219 if (dst->encoding() != src->encoding() || mask != k0) { 1220 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1221 } 1222 } 1223 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1224 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1225 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1226 1227 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1228 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1229 1230 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1231 if (dst->encoding() != src->encoding() || mask != k0) { 1232 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1233 } 1234 } 1235 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1236 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1237 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1238 1239 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1240 if (dst->encoding() != src->encoding()) { 1241 Assembler::evmovdqul(dst, src, vector_len); 1242 } 1243 } 1244 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1245 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1246 1247 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1248 if (dst->encoding() != src->encoding() || mask != k0) { 1249 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1250 } 1251 } 1252 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1253 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1254 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1255 1256 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1257 if (dst->encoding() != src->encoding()) { 1258 Assembler::evmovdquq(dst, src, vector_len); 1259 } 1260 } 1261 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1262 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1263 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1264 1265 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1266 if (dst->encoding() != src->encoding() || mask != k0) { 1267 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1268 } 1269 } 1270 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1271 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1272 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1273 1274 // Move Aligned Double Quadword 1275 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1276 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1277 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1278 1279 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1280 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1281 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1282 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1283 1284 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1285 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1286 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1287 1288 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1289 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1290 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1291 1292 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1293 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1294 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1295 1296 // Carry-Less Multiplication Quadword 1297 void pclmulldq(XMMRegister dst, XMMRegister src) { 1298 // 0x00 - multiply lower 64 bits [0:63] 1299 Assembler::pclmulqdq(dst, src, 0x00); 1300 } 1301 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1302 // 0x11 - multiply upper 64 bits [64:127] 1303 Assembler::pclmulqdq(dst, src, 0x11); 1304 } 1305 1306 void pcmpeqb(XMMRegister dst, XMMRegister src); 1307 void pcmpeqw(XMMRegister dst, XMMRegister src); 1308 1309 void pcmpestri(XMMRegister dst, Address src, int imm8); 1310 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1311 1312 void pmovzxbw(XMMRegister dst, XMMRegister src); 1313 void pmovzxbw(XMMRegister dst, Address src); 1314 1315 void pmovmskb(Register dst, XMMRegister src); 1316 1317 void ptest(XMMRegister dst, XMMRegister src); 1318 1319 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1320 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1321 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1322 1323 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1324 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1325 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1326 1327 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1328 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1329 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1330 1331 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1332 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1333 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1334 1335 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1336 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1337 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1338 1339 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1340 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1341 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1342 1343 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1344 void xorpd(XMMRegister dst, XMMRegister src); 1345 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1346 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1347 1348 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1349 void xorps(XMMRegister dst, XMMRegister src); 1350 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1351 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1352 1353 // Shuffle Bytes 1354 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1355 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1356 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1357 // AVX 3-operands instructions 1358 1359 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1360 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1361 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1362 1363 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1364 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1365 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1366 1367 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1368 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1369 1370 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1371 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1372 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1373 1374 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1375 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1376 1377 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1378 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1379 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1380 1381 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1382 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1383 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1384 1385 using Assembler::vpbroadcastd; 1386 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1387 1388 using Assembler::vpbroadcastq; 1389 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1390 1391 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1392 1393 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1394 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1395 1396 // Vector compares 1397 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1398 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1399 } 1400 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1401 1402 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1403 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1404 } 1405 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1406 1407 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1408 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1409 } 1410 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1411 1412 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1413 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1414 } 1415 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1416 1417 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1418 1419 // Emit comparison instruction for the specified comparison predicate. 1420 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1421 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1422 1423 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1424 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1425 1426 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1427 1428 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1429 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1430 1431 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1432 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1433 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1434 1435 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1436 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1437 1438 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1439 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1440 1441 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1442 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1443 1444 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1445 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1446 1447 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1448 if (!is_varshift) { 1449 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1450 } else { 1451 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1452 } 1453 } 1454 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1455 if (!is_varshift) { 1456 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1457 } else { 1458 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1459 } 1460 } 1461 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1462 if (!is_varshift) { 1463 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1464 } else { 1465 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1466 } 1467 } 1468 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1469 if (!is_varshift) { 1470 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1471 } else { 1472 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1473 } 1474 } 1475 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1476 if (!is_varshift) { 1477 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1478 } else { 1479 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1480 } 1481 } 1482 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1483 if (!is_varshift) { 1484 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1485 } else { 1486 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1487 } 1488 } 1489 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1490 if (!is_varshift) { 1491 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1492 } else { 1493 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1494 } 1495 } 1496 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1497 if (!is_varshift) { 1498 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1499 } else { 1500 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1501 } 1502 } 1503 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1504 if (!is_varshift) { 1505 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1506 } else { 1507 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1508 } 1509 } 1510 1511 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1512 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1513 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1514 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1515 1516 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1517 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1518 1519 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1520 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1521 1522 void vptest(XMMRegister dst, XMMRegister src); 1523 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1524 1525 void punpcklbw(XMMRegister dst, XMMRegister src); 1526 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1527 1528 void pshufd(XMMRegister dst, Address src, int mode); 1529 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1530 1531 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1532 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1533 1534 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1535 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1536 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1537 1538 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1539 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1540 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1541 1542 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1543 1544 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1545 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1546 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1547 1548 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1549 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1550 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1551 1552 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1553 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1554 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1555 1556 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1557 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1558 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1559 1560 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1561 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1562 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1563 1564 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1565 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1566 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1567 1568 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1569 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1570 1571 // AVX Vector instructions 1572 1573 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1574 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1575 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1576 1577 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1578 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1579 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1580 1581 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1582 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1583 Assembler::vpxor(dst, nds, src, vector_len); 1584 else 1585 Assembler::vxorpd(dst, nds, src, vector_len); 1586 } 1587 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1588 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1589 Assembler::vpxor(dst, nds, src, vector_len); 1590 else 1591 Assembler::vxorpd(dst, nds, src, vector_len); 1592 } 1593 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1594 1595 // Simple version for AVX2 256bit vectors 1596 void vpxor(XMMRegister dst, XMMRegister src) { 1597 assert(UseAVX >= 2, "Should be at least AVX2"); 1598 Assembler::vpxor(dst, dst, src, AVX_256bit); 1599 } 1600 void vpxor(XMMRegister dst, Address src) { 1601 assert(UseAVX >= 2, "Should be at least AVX2"); 1602 Assembler::vpxor(dst, dst, src, AVX_256bit); 1603 } 1604 1605 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1606 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1607 1608 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1609 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1610 Assembler::vinserti32x4(dst, nds, src, imm8); 1611 } else if (UseAVX > 1) { 1612 // vinserti128 is available only in AVX2 1613 Assembler::vinserti128(dst, nds, src, imm8); 1614 } else { 1615 Assembler::vinsertf128(dst, nds, src, imm8); 1616 } 1617 } 1618 1619 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1620 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1621 Assembler::vinserti32x4(dst, nds, src, imm8); 1622 } else if (UseAVX > 1) { 1623 // vinserti128 is available only in AVX2 1624 Assembler::vinserti128(dst, nds, src, imm8); 1625 } else { 1626 Assembler::vinsertf128(dst, nds, src, imm8); 1627 } 1628 } 1629 1630 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1631 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1632 Assembler::vextracti32x4(dst, src, imm8); 1633 } else if (UseAVX > 1) { 1634 // vextracti128 is available only in AVX2 1635 Assembler::vextracti128(dst, src, imm8); 1636 } else { 1637 Assembler::vextractf128(dst, src, imm8); 1638 } 1639 } 1640 1641 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1642 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1643 Assembler::vextracti32x4(dst, src, imm8); 1644 } else if (UseAVX > 1) { 1645 // vextracti128 is available only in AVX2 1646 Assembler::vextracti128(dst, src, imm8); 1647 } else { 1648 Assembler::vextractf128(dst, src, imm8); 1649 } 1650 } 1651 1652 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1653 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1654 vinserti128(dst, dst, src, 1); 1655 } 1656 void vinserti128_high(XMMRegister dst, Address src) { 1657 vinserti128(dst, dst, src, 1); 1658 } 1659 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1660 vextracti128(dst, src, 1); 1661 } 1662 void vextracti128_high(Address dst, XMMRegister src) { 1663 vextracti128(dst, src, 1); 1664 } 1665 1666 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1667 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1668 Assembler::vinsertf32x4(dst, dst, src, 1); 1669 } else { 1670 Assembler::vinsertf128(dst, dst, src, 1); 1671 } 1672 } 1673 1674 void vinsertf128_high(XMMRegister dst, Address src) { 1675 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1676 Assembler::vinsertf32x4(dst, dst, src, 1); 1677 } else { 1678 Assembler::vinsertf128(dst, dst, src, 1); 1679 } 1680 } 1681 1682 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1683 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1684 Assembler::vextractf32x4(dst, src, 1); 1685 } else { 1686 Assembler::vextractf128(dst, src, 1); 1687 } 1688 } 1689 1690 void vextractf128_high(Address dst, XMMRegister src) { 1691 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1692 Assembler::vextractf32x4(dst, src, 1); 1693 } else { 1694 Assembler::vextractf128(dst, src, 1); 1695 } 1696 } 1697 1698 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1699 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1700 Assembler::vinserti64x4(dst, dst, src, 1); 1701 } 1702 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1703 Assembler::vinsertf64x4(dst, dst, src, 1); 1704 } 1705 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1706 Assembler::vextracti64x4(dst, src, 1); 1707 } 1708 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1709 Assembler::vextractf64x4(dst, src, 1); 1710 } 1711 void vextractf64x4_high(Address dst, XMMRegister src) { 1712 Assembler::vextractf64x4(dst, src, 1); 1713 } 1714 void vinsertf64x4_high(XMMRegister dst, Address src) { 1715 Assembler::vinsertf64x4(dst, dst, src, 1); 1716 } 1717 1718 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1719 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1720 vinserti128(dst, dst, src, 0); 1721 } 1722 void vinserti128_low(XMMRegister dst, Address src) { 1723 vinserti128(dst, dst, src, 0); 1724 } 1725 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1726 vextracti128(dst, src, 0); 1727 } 1728 void vextracti128_low(Address dst, XMMRegister src) { 1729 vextracti128(dst, src, 0); 1730 } 1731 1732 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1733 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1734 Assembler::vinsertf32x4(dst, dst, src, 0); 1735 } else { 1736 Assembler::vinsertf128(dst, dst, src, 0); 1737 } 1738 } 1739 1740 void vinsertf128_low(XMMRegister dst, Address src) { 1741 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1742 Assembler::vinsertf32x4(dst, dst, src, 0); 1743 } else { 1744 Assembler::vinsertf128(dst, dst, src, 0); 1745 } 1746 } 1747 1748 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1749 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1750 Assembler::vextractf32x4(dst, src, 0); 1751 } else { 1752 Assembler::vextractf128(dst, src, 0); 1753 } 1754 } 1755 1756 void vextractf128_low(Address dst, XMMRegister src) { 1757 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1758 Assembler::vextractf32x4(dst, src, 0); 1759 } else { 1760 Assembler::vextractf128(dst, src, 0); 1761 } 1762 } 1763 1764 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1765 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1766 Assembler::vinserti64x4(dst, dst, src, 0); 1767 } 1768 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1769 Assembler::vinsertf64x4(dst, dst, src, 0); 1770 } 1771 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1772 Assembler::vextracti64x4(dst, src, 0); 1773 } 1774 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1775 Assembler::vextractf64x4(dst, src, 0); 1776 } 1777 void vextractf64x4_low(Address dst, XMMRegister src) { 1778 Assembler::vextractf64x4(dst, src, 0); 1779 } 1780 void vinsertf64x4_low(XMMRegister dst, Address src) { 1781 Assembler::vinsertf64x4(dst, dst, src, 0); 1782 } 1783 1784 // Carry-Less Multiplication Quadword 1785 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1786 // 0x00 - multiply lower 64 bits [0:63] 1787 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1788 } 1789 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1790 // 0x11 - multiply upper 64 bits [64:127] 1791 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1792 } 1793 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1794 // 0x10 - multiply nds[0:63] and src[64:127] 1795 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1796 } 1797 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1798 //0x01 - multiply nds[64:127] and src[0:63] 1799 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1800 } 1801 1802 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1803 // 0x00 - multiply lower 64 bits [0:63] 1804 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1805 } 1806 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1807 // 0x11 - multiply upper 64 bits [64:127] 1808 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1809 } 1810 1811 // AVX-512 mask operations. 1812 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1813 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1814 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1815 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1816 void kortest(uint masklen, KRegister src1, KRegister src2); 1817 void ktest(uint masklen, KRegister src1, KRegister src2); 1818 1819 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1820 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1821 1822 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1823 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1824 1825 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1826 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1827 1828 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1829 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1830 1831 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1832 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1833 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1834 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1835 1836 using Assembler::evpandq; 1837 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1838 1839 using Assembler::evpaddq; 1840 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1841 1842 using Assembler::evporq; 1843 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1844 1845 using Assembler::vpshufb; 1846 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1847 1848 using Assembler::vpor; 1849 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1850 1851 using Assembler::vpternlogq; 1852 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1853 1854 void cmov32( Condition cc, Register dst, Address src); 1855 void cmov32( Condition cc, Register dst, Register src); 1856 1857 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1858 1859 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1860 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1861 1862 void movoop(Register dst, jobject obj); 1863 void movoop(Address dst, jobject obj, Register rscratch); 1864 1865 void mov_metadata(Register dst, Metadata* obj); 1866 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1867 1868 void movptr(Register dst, Register src); 1869 void movptr(Register dst, Address src); 1870 void movptr(Register dst, AddressLiteral src); 1871 void movptr(Register dst, ArrayAddress src); 1872 void movptr(Register dst, intptr_t src); 1873 void movptr(Address dst, Register src); 1874 void movptr(Address dst, int32_t imm); 1875 void movptr(Address dst, intptr_t src, Register rscratch); 1876 void movptr(ArrayAddress dst, Register src, Register rscratch); 1877 1878 void movptr(Register dst, RegisterOrConstant src) { 1879 if (src.is_constant()) movptr(dst, src.as_constant()); 1880 else movptr(dst, src.as_register()); 1881 } 1882 1883 1884 // to avoid hiding movl 1885 void mov32(Register dst, AddressLiteral src); 1886 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 1887 1888 // Import other mov() methods from the parent class or else 1889 // they will be hidden by the following overriding declaration. 1890 using Assembler::movdl; 1891 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1892 1893 using Assembler::movq; 1894 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1895 1896 // Can push value or effective address 1897 void pushptr(AddressLiteral src, Register rscratch); 1898 1899 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1900 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1901 1902 void pushoop(jobject obj, Register rscratch); 1903 void pushklass(Metadata* obj, Register rscratch); 1904 1905 // sign extend as need a l to ptr sized element 1906 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1907 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1908 1909 1910 public: 1911 // clear memory of size 'cnt' qwords, starting at 'base'; 1912 // if 'is_large' is set, do not try to produce short loop 1913 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1914 1915 // clear memory initialization sequence for constant size; 1916 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1917 1918 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1919 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1920 1921 // Fill primitive arrays 1922 void generate_fill(BasicType t, bool aligned, 1923 Register to, Register value, Register count, 1924 Register rtmp, XMMRegister xtmp); 1925 1926 void encode_iso_array(Register src, Register dst, Register len, 1927 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1928 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1929 1930 #ifdef _LP64 1931 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1932 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1933 Register y, Register y_idx, Register z, 1934 Register carry, Register product, 1935 Register idx, Register kdx); 1936 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1937 Register yz_idx, Register idx, 1938 Register carry, Register product, int offset); 1939 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1940 Register carry, Register carry2, 1941 Register idx, Register jdx, 1942 Register yz_idx1, Register yz_idx2, 1943 Register tmp, Register tmp3, Register tmp4); 1944 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1945 Register yz_idx, Register idx, Register jdx, 1946 Register carry, Register product, 1947 Register carry2); 1948 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1949 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1950 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1951 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1952 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1953 Register tmp2); 1954 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1955 Register rdxReg, Register raxReg); 1956 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1957 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1958 Register tmp3, Register tmp4); 1959 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1960 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1961 1962 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1963 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1964 Register raxReg); 1965 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1966 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1967 Register raxReg); 1968 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1969 Register result, Register tmp1, Register tmp2, 1970 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1971 #endif 1972 1973 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1974 void update_byte_crc32(Register crc, Register val, Register table); 1975 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1976 1977 1978 #ifdef _LP64 1979 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1980 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1981 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1982 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1983 #endif // _LP64 1984 1985 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1986 // Note on a naming convention: 1987 // Prefix w = register only used on a Westmere+ architecture 1988 // Prefix n = register only used on a Nehalem architecture 1989 #ifdef _LP64 1990 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1991 Register tmp1, Register tmp2, Register tmp3); 1992 #else 1993 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1994 Register tmp1, Register tmp2, Register tmp3, 1995 XMMRegister xtmp1, XMMRegister xtmp2); 1996 #endif 1997 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1998 Register in_out, 1999 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2000 XMMRegister w_xtmp2, 2001 Register tmp1, 2002 Register n_tmp2, Register n_tmp3); 2003 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2004 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2005 Register tmp1, Register tmp2, 2006 Register n_tmp3); 2007 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2008 Register in_out1, Register in_out2, Register in_out3, 2009 Register tmp1, Register tmp2, Register tmp3, 2010 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2011 Register tmp4, Register tmp5, 2012 Register n_tmp6); 2013 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2014 Register tmp1, Register tmp2, Register tmp3, 2015 Register tmp4, Register tmp5, Register tmp6, 2016 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2017 bool is_pclmulqdq_supported); 2018 // Fold 128-bit data chunk 2019 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2020 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2021 #ifdef _LP64 2022 // Fold 512-bit data chunk 2023 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2024 #endif // _LP64 2025 // Fold 8-bit data 2026 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2027 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2028 2029 // Compress char[] array to byte[]. 2030 void char_array_compress(Register src, Register dst, Register len, 2031 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2032 XMMRegister tmp4, Register tmp5, Register result, 2033 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2034 2035 // Inflate byte[] array to char[]. 2036 void byte_array_inflate(Register src, Register dst, Register len, 2037 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2038 2039 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2040 Register length, Register temp, int vec_enc); 2041 2042 void fill64_masked(uint shift, Register dst, int disp, 2043 XMMRegister xmm, KRegister mask, Register length, 2044 Register temp, bool use64byteVector = false); 2045 2046 void fill32_masked(uint shift, Register dst, int disp, 2047 XMMRegister xmm, KRegister mask, Register length, 2048 Register temp); 2049 2050 void fill32(Address dst, XMMRegister xmm); 2051 2052 void fill32(Register dst, int disp, XMMRegister xmm); 2053 2054 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2055 2056 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2057 2058 #ifdef _LP64 2059 void convert_f2i(Register dst, XMMRegister src); 2060 void convert_d2i(Register dst, XMMRegister src); 2061 void convert_f2l(Register dst, XMMRegister src); 2062 void convert_d2l(Register dst, XMMRegister src); 2063 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2064 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2065 2066 void cache_wb(Address line); 2067 void cache_wbsync(bool is_pre); 2068 2069 #ifdef COMPILER2_OR_JVMCI 2070 void generate_fill_avx3(BasicType type, Register to, Register value, 2071 Register count, Register rtmp, XMMRegister xtmp); 2072 #endif // COMPILER2_OR_JVMCI 2073 #endif // _LP64 2074 2075 void vallones(XMMRegister dst, int vector_len); 2076 2077 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2078 2079 void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2080 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2081 }; 2082 2083 /** 2084 * class SkipIfEqual: 2085 * 2086 * Instantiating this class will result in assembly code being output that will 2087 * jump around any code emitted between the creation of the instance and it's 2088 * automatic destruction at the end of a scope block, depending on the value of 2089 * the flag passed to the constructor, which will be checked at run-time. 2090 */ 2091 class SkipIfEqual { 2092 private: 2093 MacroAssembler* _masm; 2094 Label _label; 2095 2096 public: 2097 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); 2098 ~SkipIfEqual(); 2099 }; 2100 2101 #endif // CPU_X86_MACROASSEMBLER_X86_HPP