1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/vm_version.hpp" 34 #include "utilities/checkedCast.hpp" 35 36 // MacroAssembler extends Assembler by frequently used macros. 37 // 38 // Instructions for which a 'better' code sequence exists depending 39 // on arguments should also go in here. 40 41 class MacroAssembler: public Assembler { 42 friend class LIR_Assembler; 43 friend class Runtime1; // as_Address() 44 45 public: 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 protected: 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 63 // returns the register which contains the thread upon return. If a thread register has been 64 // specified, the return value will correspond to that register. If no last_java_sp is specified 65 // (noreg) than rsp will be used instead. 66 virtual void call_VM_base( // returns the register containing the thread upon return 67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 68 Register java_thread, // the thread if computed before ; use noreg otherwise 69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 70 address entry_point, // the entry point 71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 72 bool check_exceptions // whether to check for pending exceptions after return 73 ); 74 75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 76 77 // helpers for FPU flag access 78 // tmp is a temporary register, if none is available use noreg 79 void save_rax (Register tmp); 80 void restore_rax(Register tmp); 81 82 public: 83 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 84 85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 86 // The implementation is only non-empty for the InterpreterMacroAssembler, 87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 88 virtual void check_and_handle_popframe(Register java_thread); 89 virtual void check_and_handle_earlyret(Register java_thread); 90 91 Address as_Address(AddressLiteral adr); 92 Address as_Address(ArrayAddress adr, Register rscratch); 93 94 // Support for null-checks 95 // 96 // Generates code that causes a null OS exception if the content of reg is null. 97 // If the accessed location is M[reg + offset] and the offset is known, provide the 98 // offset. No explicit code generation is needed if the offset is within a certain 99 // range (0 <= offset <= page_size). 100 101 void null_check(Register reg, int offset = -1); 102 static bool needs_explicit_null_check(intptr_t offset); 103 static bool uses_implicit_null_check(void* address); 104 105 // Required platform-specific helpers for Label::patch_instructions. 106 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 107 void pd_patch_instruction(address branch, address target, const char* file, int line) { 108 unsigned char op = branch[0]; 109 assert(op == 0xE8 /* call */ || 110 op == 0xE9 /* jmp */ || 111 op == 0xEB /* short jmp */ || 112 (op & 0xF0) == 0x70 /* short jcc */ || 113 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 114 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 115 "Invalid opcode at patch point"); 116 117 if (op == 0xEB || (op & 0xF0) == 0x70) { 118 // short offset operators (jmp and jcc) 119 char* disp = (char*) &branch[1]; 120 int imm8 = checked_cast<int>(target - (address) &disp[1]); 121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 122 file == nullptr ? "<null>" : file, line); 123 *disp = (char)imm8; 124 } else { 125 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 126 int imm32 = checked_cast<int>(target - (address) &disp[1]); 127 *disp = imm32; 128 } 129 } 130 131 // The following 4 methods return the offset of the appropriate move instruction 132 133 // Support for fast byte/short loading with zero extension (depending on particular CPU) 134 int load_unsigned_byte(Register dst, Address src); 135 int load_unsigned_short(Register dst, Address src); 136 137 // Support for fast byte/short loading with sign extension (depending on particular CPU) 138 int load_signed_byte(Register dst, Address src); 139 int load_signed_short(Register dst, Address src); 140 141 // Support for sign-extension (hi:lo = extend_sign(lo)) 142 void extend_sign(Register hi, Register lo); 143 144 // Load and store values by size and signed-ness 145 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 146 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 147 148 // Support for inc/dec with optimal instruction selection depending on value 149 150 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 151 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 152 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 153 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 154 155 void decrementl(Address dst, int value = 1); 156 void decrementl(Register reg, int value = 1); 157 158 void decrementq(Register reg, int value = 1); 159 void decrementq(Address dst, int value = 1); 160 161 void incrementl(Address dst, int value = 1); 162 void incrementl(Register reg, int value = 1); 163 164 void incrementq(Register reg, int value = 1); 165 void incrementq(Address dst, int value = 1); 166 167 void incrementl(AddressLiteral dst, Register rscratch = noreg); 168 void incrementl(ArrayAddress dst, Register rscratch); 169 170 void incrementq(AddressLiteral dst, Register rscratch = noreg); 171 172 // Support optimal SSE move instructions. 173 void movflt(XMMRegister dst, XMMRegister src) { 174 if (dst-> encoding() == src->encoding()) return; 175 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 176 else { movss (dst, src); return; } 177 } 178 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 179 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 180 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 181 182 // Move with zero extension 183 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 184 185 void movdbl(XMMRegister dst, XMMRegister src) { 186 if (dst-> encoding() == src->encoding()) return; 187 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 188 else { movsd (dst, src); return; } 189 } 190 191 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 192 193 void movdbl(XMMRegister dst, Address src) { 194 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 195 else { movlpd(dst, src); return; } 196 } 197 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 198 199 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 200 // Use separate tmp XMM register because caller may 201 // requires src XMM register to be unchanged (as in x86.ad). 202 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 203 movdl(dst, tmp); 204 movswl(dst, dst); 205 } 206 207 void flt16_to_flt(XMMRegister dst, Register src) { 208 movdl(dst, src); 209 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 210 } 211 212 // Alignment 213 void align32(); 214 void align64(); 215 void align(uint modulus); 216 void align(uint modulus, uint target); 217 218 void post_call_nop(); 219 // A 5 byte nop that is safe for patching (see patch_verified_entry) 220 void fat_nop(); 221 222 // Stack frame creation/removal 223 void enter(); 224 void leave(); 225 226 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 227 // The pointer will be loaded into the thread register. 228 void get_thread(Register thread); 229 230 #ifdef _LP64 231 // Support for argument shuffling 232 233 // bias in bytes 234 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 235 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 236 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 237 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 238 void move_ptr(VMRegPair src, VMRegPair dst); 239 void object_move(OopMap* map, 240 int oop_handle_offset, 241 int framesize_in_slots, 242 VMRegPair src, 243 VMRegPair dst, 244 bool is_receiver, 245 int* receiver_offset); 246 #endif // _LP64 247 248 // Support for VM calls 249 // 250 // It is imperative that all calls into the VM are handled via the call_VM macros. 251 // They make sure that the stack linkage is setup correctly. call_VM's correspond 252 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 253 254 255 void call_VM(Register oop_result, 256 address entry_point, 257 bool check_exceptions = true); 258 void call_VM(Register oop_result, 259 address entry_point, 260 Register arg_1, 261 bool check_exceptions = true); 262 void call_VM(Register oop_result, 263 address entry_point, 264 Register arg_1, Register arg_2, 265 bool check_exceptions = true); 266 void call_VM(Register oop_result, 267 address entry_point, 268 Register arg_1, Register arg_2, Register arg_3, 269 bool check_exceptions = true); 270 271 // Overloadings with last_Java_sp 272 void call_VM(Register oop_result, 273 Register last_java_sp, 274 address entry_point, 275 int number_of_arguments = 0, 276 bool check_exceptions = true); 277 void call_VM(Register oop_result, 278 Register last_java_sp, 279 address entry_point, 280 Register arg_1, bool 281 check_exceptions = true); 282 void call_VM(Register oop_result, 283 Register last_java_sp, 284 address entry_point, 285 Register arg_1, Register arg_2, 286 bool check_exceptions = true); 287 void call_VM(Register oop_result, 288 Register last_java_sp, 289 address entry_point, 290 Register arg_1, Register arg_2, Register arg_3, 291 bool check_exceptions = true); 292 293 void get_vm_result (Register oop_result, Register thread); 294 void get_vm_result_2(Register metadata_result, Register thread); 295 296 // These always tightly bind to MacroAssembler::call_VM_base 297 // bypassing the virtual implementation 298 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 299 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 300 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 301 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 302 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 303 304 void call_VM_leaf0(address entry_point); 305 void call_VM_leaf(address entry_point, 306 int number_of_arguments = 0); 307 void call_VM_leaf(address entry_point, 308 Register arg_1); 309 void call_VM_leaf(address entry_point, 310 Register arg_1, Register arg_2); 311 void call_VM_leaf(address entry_point, 312 Register arg_1, Register arg_2, Register arg_3); 313 314 void call_VM_leaf(address entry_point, 315 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 316 317 // These always tightly bind to MacroAssembler::call_VM_leaf_base 318 // bypassing the virtual implementation 319 void super_call_VM_leaf(address entry_point); 320 void super_call_VM_leaf(address entry_point, Register arg_1); 321 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 322 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 323 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 324 325 // last Java Frame (fills frame anchor) 326 void set_last_Java_frame(Register thread, 327 Register last_java_sp, 328 Register last_java_fp, 329 address last_java_pc, 330 Register rscratch); 331 332 // thread in the default location (r15_thread on 64bit) 333 void set_last_Java_frame(Register last_java_sp, 334 Register last_java_fp, 335 address last_java_pc, 336 Register rscratch); 337 338 void reset_last_Java_frame(Register thread, bool clear_fp); 339 340 // thread in the default location (r15_thread on 64bit) 341 void reset_last_Java_frame(bool clear_fp); 342 343 // jobjects 344 void clear_jobject_tag(Register possibly_non_local); 345 void resolve_jobject(Register value, Register thread, Register tmp); 346 void resolve_global_jobject(Register value, Register thread, Register tmp); 347 348 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 349 void c2bool(Register x); 350 351 // C++ bool manipulation 352 353 void movbool(Register dst, Address src); 354 void movbool(Address dst, bool boolconst); 355 void movbool(Address dst, Register src); 356 void testbool(Register dst); 357 358 void resolve_oop_handle(Register result, Register tmp); 359 void resolve_weak_handle(Register result, Register tmp); 360 void load_mirror(Register mirror, Register method, Register tmp); 361 void load_method_holder_cld(Register rresult, Register rmethod); 362 363 void load_method_holder(Register holder, Register method); 364 365 // oop manipulations 366 void load_klass(Register dst, Register src, Register tmp); 367 void store_klass(Register dst, Register src, Register tmp); 368 369 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 370 Register tmp1, Register thread_tmp); 371 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 372 Register tmp1, Register tmp2, Register tmp3); 373 374 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 375 Register thread_tmp = noreg, DecoratorSet decorators = 0); 376 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 377 Register thread_tmp = noreg, DecoratorSet decorators = 0); 378 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 379 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 380 381 // Used for storing null. All other oop constants should be 382 // stored using routines that take a jobject. 383 void store_heap_oop_null(Address dst); 384 385 #ifdef _LP64 386 void store_klass_gap(Register dst, Register src); 387 388 // This dummy is to prevent a call to store_heap_oop from 389 // converting a zero (like null) into a Register by giving 390 // the compiler two choices it can't resolve 391 392 void store_heap_oop(Address dst, void* dummy); 393 394 void encode_heap_oop(Register r); 395 void decode_heap_oop(Register r); 396 void encode_heap_oop_not_null(Register r); 397 void decode_heap_oop_not_null(Register r); 398 void encode_heap_oop_not_null(Register dst, Register src); 399 void decode_heap_oop_not_null(Register dst, Register src); 400 401 void set_narrow_oop(Register dst, jobject obj); 402 void set_narrow_oop(Address dst, jobject obj); 403 void cmp_narrow_oop(Register dst, jobject obj); 404 void cmp_narrow_oop(Address dst, jobject obj); 405 406 void encode_klass_not_null(Register r, Register tmp); 407 void decode_klass_not_null(Register r, Register tmp); 408 void encode_and_move_klass_not_null(Register dst, Register src); 409 void decode_and_move_klass_not_null(Register dst, Register src); 410 void set_narrow_klass(Register dst, Klass* k); 411 void set_narrow_klass(Address dst, Klass* k); 412 void cmp_narrow_klass(Register dst, Klass* k); 413 void cmp_narrow_klass(Address dst, Klass* k); 414 415 // if heap base register is used - reinit it with the correct value 416 void reinit_heapbase(); 417 418 DEBUG_ONLY(void verify_heapbase(const char* msg);) 419 420 #endif // _LP64 421 422 // Int division/remainder for Java 423 // (as idivl, but checks for special case as described in JVM spec.) 424 // returns idivl instruction offset for implicit exception handling 425 int corrected_idivl(Register reg); 426 427 // Long division/remainder for Java 428 // (as idivq, but checks for special case as described in JVM spec.) 429 // returns idivq instruction offset for implicit exception handling 430 int corrected_idivq(Register reg); 431 432 void int3(); 433 434 // Long operation macros for a 32bit cpu 435 // Long negation for Java 436 void lneg(Register hi, Register lo); 437 438 // Long multiplication for Java 439 // (destroys contents of eax, ebx, ecx and edx) 440 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 441 442 // Long shifts for Java 443 // (semantics as described in JVM spec.) 444 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 445 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 446 447 // Long compare for Java 448 // (semantics as described in JVM spec.) 449 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 450 451 452 // misc 453 454 // Sign extension 455 void sign_extend_short(Register reg); 456 void sign_extend_byte(Register reg); 457 458 // Division by power of 2, rounding towards 0 459 void division_with_shift(Register reg, int shift_value); 460 461 #ifndef _LP64 462 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 463 // 464 // CF (corresponds to C0) if x < y 465 // PF (corresponds to C2) if unordered 466 // ZF (corresponds to C3) if x = y 467 // 468 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 469 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 470 void fcmp(Register tmp); 471 // Variant of the above which allows y to be further down the stack 472 // and which only pops x and y if specified. If pop_right is 473 // specified then pop_left must also be specified. 474 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 475 476 // Floating-point comparison for Java 477 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 478 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 479 // (semantics as described in JVM spec.) 480 void fcmp2int(Register dst, bool unordered_is_less); 481 // Variant of the above which allows y to be further down the stack 482 // and which only pops x and y if specified. If pop_right is 483 // specified then pop_left must also be specified. 484 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 485 486 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 487 // tmp is a temporary register, if none is available use noreg 488 void fremr(Register tmp); 489 490 // only if +VerifyFPU 491 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 492 #endif // !LP64 493 494 // dst = c = a * b + c 495 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 496 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 497 498 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 499 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 500 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 501 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 502 503 504 // same as fcmp2int, but using SSE2 505 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 506 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 507 508 // branch to L if FPU flag C2 is set/not set 509 // tmp is a temporary register, if none is available use noreg 510 void jC2 (Register tmp, Label& L); 511 void jnC2(Register tmp, Label& L); 512 513 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 514 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 515 void load_float(Address src); 516 517 // Store float value to 'address'. If UseSSE >= 1, the value is stored 518 // from register xmm0. Otherwise, the value is stored from the FPU stack. 519 void store_float(Address dst); 520 521 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 522 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 523 void load_double(Address src); 524 525 // Store double value to 'address'. If UseSSE >= 2, the value is stored 526 // from register xmm0. Otherwise, the value is stored from the FPU stack. 527 void store_double(Address dst); 528 529 #ifndef _LP64 530 // Pop ST (ffree & fincstp combined) 531 void fpop(); 532 533 void empty_FPU_stack(); 534 #endif // !_LP64 535 536 void push_IU_state(); 537 void pop_IU_state(); 538 539 void push_FPU_state(); 540 void pop_FPU_state(); 541 542 void push_CPU_state(); 543 void pop_CPU_state(); 544 545 void push_cont_fastpath(); 546 void pop_cont_fastpath(); 547 548 void inc_held_monitor_count(); 549 void dec_held_monitor_count(); 550 551 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 552 553 // Round up to a power of two 554 void round_to(Register reg, int modulus); 555 556 private: 557 // General purpose and XMM registers potentially clobbered by native code; there 558 // is no need for FPU or AVX opmask related methods because C1/interpreter 559 // - we save/restore FPU state as a whole always 560 // - do not care about AVX-512 opmask 561 static RegSet call_clobbered_gp_registers(); 562 static XMMRegSet call_clobbered_xmm_registers(); 563 564 void push_set(XMMRegSet set, int offset); 565 void pop_set(XMMRegSet set, int offset); 566 567 public: 568 void push_set(RegSet set, int offset = -1); 569 void pop_set(RegSet set, int offset = -1); 570 571 // Push and pop everything that might be clobbered by a native 572 // runtime call. 573 // Only save the lower 64 bits of each vector register. 574 // Additional registers can be excluded in a passed RegSet. 575 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 576 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 577 578 void push_call_clobbered_registers(bool save_fpu = true) { 579 push_call_clobbered_registers_except(RegSet(), save_fpu); 580 } 581 void pop_call_clobbered_registers(bool restore_fpu = true) { 582 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 583 } 584 585 // allocation 586 void tlab_allocate( 587 Register thread, // Current thread 588 Register obj, // result: pointer to object after successful allocation 589 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 590 int con_size_in_bytes, // object size in bytes if known at compile time 591 Register t1, // temp register 592 Register t2, // temp register 593 Label& slow_case // continuation point if fast allocation fails 594 ); 595 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 596 597 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 598 599 // interface method calling 600 void lookup_interface_method(Register recv_klass, 601 Register intf_klass, 602 RegisterOrConstant itable_index, 603 Register method_result, 604 Register scan_temp, 605 Label& no_such_interface, 606 bool return_method = true); 607 608 void lookup_interface_method_stub(Register recv_klass, 609 Register holder_klass, 610 Register resolved_klass, 611 Register method_result, 612 Register scan_temp, 613 Register temp_reg2, 614 Register receiver, 615 int itable_index, 616 Label& L_no_such_interface); 617 618 // virtual method calling 619 void lookup_virtual_method(Register recv_klass, 620 RegisterOrConstant vtable_index, 621 Register method_result); 622 623 // Test sub_klass against super_klass, with fast and slow paths. 624 625 // The fast path produces a tri-state answer: yes / no / maybe-slow. 626 // One of the three labels can be null, meaning take the fall-through. 627 // If super_check_offset is -1, the value is loaded up from super_klass. 628 // No registers are killed, except temp_reg. 629 void check_klass_subtype_fast_path(Register sub_klass, 630 Register super_klass, 631 Register temp_reg, 632 Label* L_success, 633 Label* L_failure, 634 Label* L_slow_path, 635 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 636 637 // The rest of the type check; must be wired to a corresponding fast path. 638 // It does not repeat the fast path logic, so don't use it standalone. 639 // The temp_reg and temp2_reg can be noreg, if no temps are available. 640 // Updates the sub's secondary super cache as necessary. 641 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 642 void check_klass_subtype_slow_path(Register sub_klass, 643 Register super_klass, 644 Register temp_reg, 645 Register temp2_reg, 646 Label* L_success, 647 Label* L_failure, 648 bool set_cond_codes = false); 649 void hashed_check_klass_subtype_slow_path(Register sub_klass, 650 Register super_klass, 651 Register temp_reg, 652 Register temp2_reg, 653 Label* L_success, 654 Label* L_failure, 655 bool set_cond_codes = false); 656 657 // As above, but with a constant super_klass. 658 // The result is in Register result, not the condition codes. 659 void lookup_secondary_supers_table(Register sub_klass, 660 Register super_klass, 661 Register temp1, 662 Register temp2, 663 Register temp3, 664 Register temp4, 665 Register result, 666 u1 super_klass_slot); 667 668 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 669 Register r_array_base, 670 Register r_array_index, 671 Register r_bitmap, 672 Register temp1, 673 Register temp2, 674 Label* L_success, 675 Label* L_failure = nullptr); 676 677 void verify_secondary_supers_table(Register r_sub_klass, 678 Register r_super_klass, 679 Register expected, 680 Register temp1, 681 Register temp2, 682 Register temp3); 683 684 void repne_scanq(Register addr, Register value, Register count, Register limit, 685 Label* L_success, 686 Label* L_failure = nullptr); 687 688 // Simplified, combined version, good for typical uses. 689 // Falls through on failure. 690 void check_klass_subtype(Register sub_klass, 691 Register super_klass, 692 Register temp_reg, 693 Label& L_success); 694 695 void clinit_barrier(Register klass, 696 Register thread, 697 Label* L_fast_path = nullptr, 698 Label* L_slow_path = nullptr); 699 700 // method handles (JSR 292) 701 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 702 703 // Debugging 704 705 // only if +VerifyOops 706 void _verify_oop(Register reg, const char* s, const char* file, int line); 707 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 708 709 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 710 if (VerifyOops) { 711 _verify_oop(reg, s, file, line); 712 } 713 } 714 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 715 if (VerifyOops) { 716 _verify_oop_addr(reg, s, file, line); 717 } 718 } 719 720 // TODO: verify method and klass metadata (compare against vptr?) 721 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 722 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 723 724 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 725 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 726 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 727 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 728 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 729 730 // Verify or restore cpu control state after JNI call 731 void restore_cpu_control_state_after_jni(Register rscratch); 732 733 // prints msg, dumps registers and stops execution 734 void stop(const char* msg); 735 736 // prints msg and continues 737 void warn(const char* msg); 738 739 // dumps registers and other state 740 void print_state(); 741 742 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 743 static void debug64(char* msg, int64_t pc, int64_t regs[]); 744 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 745 static void print_state64(int64_t pc, int64_t regs[]); 746 747 void os_breakpoint(); 748 749 void untested() { stop("untested"); } 750 751 void unimplemented(const char* what = ""); 752 753 void should_not_reach_here() { stop("should not reach here"); } 754 755 void print_CPU_state(); 756 757 // Stack overflow checking 758 void bang_stack_with_offset(int offset) { 759 // stack grows down, caller passes positive offset 760 assert(offset > 0, "must bang with negative offset"); 761 movl(Address(rsp, (-offset)), rax); 762 } 763 764 // Writes to stack successive pages until offset reached to check for 765 // stack overflow + shadow pages. Also, clobbers tmp 766 void bang_stack_size(Register size, Register tmp); 767 768 // Check for reserved stack access in method being exited (for JIT) 769 void reserved_stack_check(); 770 771 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 772 773 void verify_tlab(); 774 775 static Condition negate_condition(Condition cond); 776 777 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 778 // operands. In general the names are modified to avoid hiding the instruction in Assembler 779 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 780 // here in MacroAssembler. The major exception to this rule is call 781 782 // Arithmetics 783 784 785 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 786 void addptr(Address dst, Register src); 787 788 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 789 void addptr(Register dst, int32_t src); 790 void addptr(Register dst, Register src); 791 void addptr(Register dst, RegisterOrConstant src) { 792 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 793 else addptr(dst, src.as_register()); 794 } 795 796 void andptr(Register dst, int32_t src); 797 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 798 799 #ifdef _LP64 800 using Assembler::andq; 801 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 802 #endif 803 804 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 805 806 // renamed to drag out the casting of address to int32_t/intptr_t 807 void cmp32(Register src1, int32_t imm); 808 809 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 810 // compare reg - mem, or reg - &mem 811 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 812 813 void cmp32(Register src1, Address src2); 814 815 #ifndef _LP64 816 void cmpklass(Address dst, Metadata* obj); 817 void cmpklass(Register dst, Metadata* obj); 818 void cmpoop(Address dst, jobject obj); 819 #endif // _LP64 820 821 void cmpoop(Register src1, Register src2); 822 void cmpoop(Register src1, Address src2); 823 void cmpoop(Register dst, jobject obj, Register rscratch); 824 825 // NOTE src2 must be the lval. This is NOT an mem-mem compare 826 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 827 828 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 829 830 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 831 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 832 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 833 834 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 835 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 836 837 // cmp64 to avoild hiding cmpq 838 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 839 840 void cmpxchgptr(Register reg, Address adr); 841 842 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 843 844 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 845 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 846 847 848 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 849 850 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 851 852 void shlptr(Register dst, int32_t shift); 853 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 854 855 void shrptr(Register dst, int32_t shift); 856 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 857 858 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 859 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 860 861 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 862 863 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 864 void subptr(Register dst, int32_t src); 865 // Force generation of a 4 byte immediate value even if it fits into 8bit 866 void subptr_imm32(Register dst, int32_t src); 867 void subptr(Register dst, Register src); 868 void subptr(Register dst, RegisterOrConstant src) { 869 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 870 else subptr(dst, src.as_register()); 871 } 872 873 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 874 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 875 876 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 877 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 878 879 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 880 881 882 883 // Helper functions for statistics gathering. 884 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 885 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 886 // Unconditional atomic increment. 887 void atomic_incl(Address counter_addr); 888 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 889 #ifdef _LP64 890 void atomic_incq(Address counter_addr); 891 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 892 #endif 893 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 894 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 895 896 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 897 void lea(Register dst, AddressLiteral adr); 898 void lea(Address dst, AddressLiteral adr, Register rscratch); 899 900 void leal32(Register dst, Address src) { leal(dst, src); } 901 902 // Import other testl() methods from the parent class or else 903 // they will be hidden by the following overriding declaration. 904 using Assembler::testl; 905 void testl(Address dst, int32_t imm32); 906 void testl(Register dst, int32_t imm32); 907 void testl(Register dst, AddressLiteral src); // requires reachable address 908 using Assembler::testq; 909 void testq(Address dst, int32_t imm32); 910 void testq(Register dst, int32_t imm32); 911 912 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 913 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 914 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 915 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 916 917 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 918 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 919 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 920 void testptr(Register src1, Register src2); 921 922 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 923 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 924 925 // Calls 926 927 void call(Label& L, relocInfo::relocType rtype); 928 void call(Register entry); 929 void call(Address addr) { Assembler::call(addr); } 930 931 // NOTE: this call transfers to the effective address of entry NOT 932 // the address contained by entry. This is because this is more natural 933 // for jumps/calls. 934 void call(AddressLiteral entry, Register rscratch = rax); 935 936 // Emit the CompiledIC call idiom 937 void ic_call(address entry, jint method_index = 0); 938 static int ic_check_size(); 939 int ic_check(int end_alignment); 940 941 void emit_static_call_stub(); 942 943 // Jumps 944 945 // NOTE: these jumps transfer to the effective address of dst NOT 946 // the address contained by dst. This is because this is more natural 947 // for jumps/calls. 948 void jump(AddressLiteral dst, Register rscratch = noreg); 949 950 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 951 952 // 32bit can do a case table jump in one instruction but we no longer allow the base 953 // to be installed in the Address class. This jump will transfer to the address 954 // contained in the location described by entry (not the address of entry) 955 void jump(ArrayAddress entry, Register rscratch); 956 957 // Adding more natural conditional jump instructions 958 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 959 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 960 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 961 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 962 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 963 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 964 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 965 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 966 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 967 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 968 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 969 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 970 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 971 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 972 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 973 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 974 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 975 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 976 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 977 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 978 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 979 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 980 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 981 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 982 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 983 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 984 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 985 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 986 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 987 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 988 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 989 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 990 991 // Short versions of the above 992 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 993 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 994 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 995 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 996 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 997 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 998 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 999 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1000 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1001 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1002 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1003 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1004 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1005 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1006 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1007 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1008 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1009 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1010 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1011 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1012 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1013 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1014 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1015 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1016 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1017 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1018 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1019 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1020 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1021 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1022 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1023 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1024 1025 // Floating 1026 1027 void push_f(XMMRegister r); 1028 void pop_f(XMMRegister r); 1029 void push_d(XMMRegister r); 1030 void pop_d(XMMRegister r); 1031 1032 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1033 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1034 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1035 1036 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1037 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1038 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1039 1040 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1041 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1042 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1043 1044 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1045 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1046 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1047 1048 #ifndef _LP64 1049 void fadd_s(Address src) { Assembler::fadd_s(src); } 1050 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1051 1052 void fldcw(Address src) { Assembler::fldcw(src); } 1053 void fldcw(AddressLiteral src); 1054 1055 void fld_s(int index) { Assembler::fld_s(index); } 1056 void fld_s(Address src) { Assembler::fld_s(src); } 1057 void fld_s(AddressLiteral src); 1058 1059 void fld_d(Address src) { Assembler::fld_d(src); } 1060 void fld_d(AddressLiteral src); 1061 1062 void fld_x(Address src) { Assembler::fld_x(src); } 1063 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1064 1065 void fmul_s(Address src) { Assembler::fmul_s(src); } 1066 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1067 #endif // !_LP64 1068 1069 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1070 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1071 1072 #ifdef _LP64 1073 private: 1074 void sha256_AVX2_one_round_compute( 1075 Register reg_old_h, 1076 Register reg_a, 1077 Register reg_b, 1078 Register reg_c, 1079 Register reg_d, 1080 Register reg_e, 1081 Register reg_f, 1082 Register reg_g, 1083 Register reg_h, 1084 int iter); 1085 void sha256_AVX2_four_rounds_compute_first(int start); 1086 void sha256_AVX2_four_rounds_compute_last(int start); 1087 void sha256_AVX2_one_round_and_sched( 1088 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1089 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1090 XMMRegister xmm_2, /* ymm6 */ 1091 XMMRegister xmm_3, /* ymm7 */ 1092 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1093 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1094 Register reg_c, /* edi */ 1095 Register reg_d, /* esi */ 1096 Register reg_e, /* r8d */ 1097 Register reg_f, /* r9d */ 1098 Register reg_g, /* r10d */ 1099 Register reg_h, /* r11d */ 1100 int iter); 1101 1102 void addm(int disp, Register r1, Register r2); 1103 1104 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1105 Register e, Register f, Register g, Register h, int iteration); 1106 1107 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1108 Register a, Register b, Register c, Register d, Register e, Register f, 1109 Register g, Register h, int iteration); 1110 1111 void addmq(int disp, Register r1, Register r2); 1112 public: 1113 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1114 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1115 Register buf, Register state, Register ofs, Register limit, Register rsp, 1116 bool multi_block, XMMRegister shuf_mask); 1117 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1118 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1119 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1120 XMMRegister shuf_mask); 1121 #endif // _LP64 1122 1123 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1124 bool multi_block); 1125 1126 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1127 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1128 Register buf, Register state, Register ofs, Register limit, Register rsp, 1129 bool multi_block); 1130 1131 #ifdef _LP64 1132 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1133 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1134 Register buf, Register state, Register ofs, Register limit, Register rsp, 1135 bool multi_block, XMMRegister shuf_mask); 1136 #else 1137 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1138 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1139 Register buf, Register state, Register ofs, Register limit, Register rsp, 1140 bool multi_block); 1141 #endif 1142 1143 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1144 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1145 Register rax, Register rcx, Register rdx, Register tmp); 1146 1147 #ifndef _LP64 1148 private: 1149 // Initialized in macroAssembler_x86_constants.cpp 1150 static address ONES; 1151 static address L_2IL0FLOATPACKET_0; 1152 static address PI4_INV; 1153 static address PI4X3; 1154 static address PI4X4; 1155 1156 public: 1157 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1158 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1159 Register rax, Register rcx, Register rdx, Register tmp1); 1160 1161 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1162 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1163 Register rax, Register rcx, Register rdx, Register tmp); 1164 1165 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1166 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1167 Register rdx, Register tmp); 1168 1169 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1170 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1171 Register rax, Register rbx, Register rdx); 1172 1173 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1174 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1175 Register rax, Register rcx, Register rdx, Register tmp); 1176 1177 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1178 Register edx, Register ebx, Register esi, Register edi, 1179 Register ebp, Register esp); 1180 1181 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1182 Register esi, Register edi, Register ebp, Register esp); 1183 1184 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1185 Register edx, Register ebx, Register esi, Register edi, 1186 Register ebp, Register esp); 1187 1188 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1189 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1190 Register rax, Register rcx, Register rdx, Register tmp); 1191 #endif // !_LP64 1192 1193 private: 1194 1195 // these are private because users should be doing movflt/movdbl 1196 1197 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1198 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1199 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1200 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1201 1202 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1203 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1204 1205 public: 1206 1207 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1208 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1209 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1210 1211 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1212 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1213 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1214 1215 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1216 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1217 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1218 1219 using Assembler::vbroadcastsd; 1220 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1221 1222 using Assembler::vbroadcastss; 1223 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1224 1225 // Vector float blend 1226 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1227 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1228 1229 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1230 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1231 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1232 1233 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1234 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1235 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1236 1237 // Move Unaligned Double Quadword 1238 void movdqu(Address dst, XMMRegister src); 1239 void movdqu(XMMRegister dst, XMMRegister src); 1240 void movdqu(XMMRegister dst, Address src); 1241 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1242 1243 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1244 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1245 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1246 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1247 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1248 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1249 1250 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1251 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1252 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1253 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1254 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1255 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1256 1257 // Safe move operation, lowers down to 16bit moves for targets supporting 1258 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1259 void kmov(Address dst, KRegister src); 1260 void kmov(KRegister dst, Address src); 1261 void kmov(KRegister dst, KRegister src); 1262 void kmov(Register dst, KRegister src); 1263 void kmov(KRegister dst, Register src); 1264 1265 using Assembler::movddup; 1266 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1267 1268 using Assembler::vmovddup; 1269 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1270 1271 // AVX Unaligned forms 1272 void vmovdqu(Address dst, XMMRegister src); 1273 void vmovdqu(XMMRegister dst, Address src); 1274 void vmovdqu(XMMRegister dst, XMMRegister src); 1275 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1276 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1277 1278 // AVX512 Unaligned 1279 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1280 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1281 1282 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1283 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1284 1285 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1286 if (dst->encoding() != src->encoding() || mask != k0) { 1287 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1288 } 1289 } 1290 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1291 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1292 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1293 1294 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1295 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1296 1297 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1298 if (dst->encoding() != src->encoding() || mask != k0) { 1299 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1300 } 1301 } 1302 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1303 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1304 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1305 1306 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1307 if (dst->encoding() != src->encoding()) { 1308 Assembler::evmovdqul(dst, src, vector_len); 1309 } 1310 } 1311 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1312 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1313 1314 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1315 if (dst->encoding() != src->encoding() || mask != k0) { 1316 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1317 } 1318 } 1319 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1320 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1321 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1322 1323 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1324 if (dst->encoding() != src->encoding()) { 1325 Assembler::evmovdquq(dst, src, vector_len); 1326 } 1327 } 1328 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1329 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1330 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1331 1332 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1333 if (dst->encoding() != src->encoding() || mask != k0) { 1334 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1335 } 1336 } 1337 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1338 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1339 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1340 1341 // Move Aligned Double Quadword 1342 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1343 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1344 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1345 1346 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1347 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1348 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1349 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1350 1351 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1352 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1353 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1354 1355 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1356 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1357 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1358 1359 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1360 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1361 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1362 1363 // Carry-Less Multiplication Quadword 1364 void pclmulldq(XMMRegister dst, XMMRegister src) { 1365 // 0x00 - multiply lower 64 bits [0:63] 1366 Assembler::pclmulqdq(dst, src, 0x00); 1367 } 1368 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1369 // 0x11 - multiply upper 64 bits [64:127] 1370 Assembler::pclmulqdq(dst, src, 0x11); 1371 } 1372 1373 void pcmpeqb(XMMRegister dst, XMMRegister src); 1374 void pcmpeqw(XMMRegister dst, XMMRegister src); 1375 1376 void pcmpestri(XMMRegister dst, Address src, int imm8); 1377 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1378 1379 void pmovzxbw(XMMRegister dst, XMMRegister src); 1380 void pmovzxbw(XMMRegister dst, Address src); 1381 1382 void pmovmskb(Register dst, XMMRegister src); 1383 1384 void ptest(XMMRegister dst, XMMRegister src); 1385 1386 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1387 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1388 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1389 1390 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1391 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1392 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1393 1394 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1395 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1396 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1397 1398 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1399 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1400 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1401 1402 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1403 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1404 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1405 1406 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1407 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1408 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1409 1410 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1411 void xorpd(XMMRegister dst, XMMRegister src); 1412 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1413 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1414 1415 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1416 void xorps(XMMRegister dst, XMMRegister src); 1417 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1418 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1419 1420 // Shuffle Bytes 1421 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1422 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1423 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1424 // AVX 3-operands instructions 1425 1426 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1427 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1428 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1429 1430 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1431 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1432 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1433 1434 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1435 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1436 1437 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1438 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1439 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1440 1441 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1442 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1443 1444 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1445 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1446 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1447 1448 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1449 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1450 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1451 1452 using Assembler::vpbroadcastd; 1453 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1454 1455 using Assembler::vpbroadcastq; 1456 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1457 1458 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1459 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1460 1461 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1462 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1463 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1464 1465 // Vector compares 1466 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1467 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1468 } 1469 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1470 1471 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1472 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1473 } 1474 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1475 1476 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1477 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1478 } 1479 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1480 1481 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1482 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1483 } 1484 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1485 1486 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1487 1488 // Emit comparison instruction for the specified comparison predicate. 1489 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1490 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1491 1492 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1493 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1494 1495 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1496 1497 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1498 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1499 1500 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1501 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1502 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1503 1504 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1505 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1506 1507 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1508 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1509 1510 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1511 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1512 1513 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1514 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1515 1516 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1517 if (!is_varshift) { 1518 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1519 } else { 1520 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1521 } 1522 } 1523 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1524 if (!is_varshift) { 1525 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1526 } else { 1527 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1528 } 1529 } 1530 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1531 if (!is_varshift) { 1532 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1533 } else { 1534 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1535 } 1536 } 1537 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1538 if (!is_varshift) { 1539 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1540 } else { 1541 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1542 } 1543 } 1544 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1545 if (!is_varshift) { 1546 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1547 } else { 1548 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1549 } 1550 } 1551 1552 using Assembler::evpsrlq; 1553 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1554 if (!is_varshift) { 1555 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1556 } else { 1557 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1558 } 1559 } 1560 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1561 if (!is_varshift) { 1562 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1563 } else { 1564 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1565 } 1566 } 1567 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1568 if (!is_varshift) { 1569 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1570 } else { 1571 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1572 } 1573 } 1574 using Assembler::evpsraq; 1575 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1576 if (!is_varshift) { 1577 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1578 } else { 1579 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1580 } 1581 } 1582 1583 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1584 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1585 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1586 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1587 1588 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1589 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1590 1591 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1592 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1593 1594 void vptest(XMMRegister dst, XMMRegister src); 1595 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1596 1597 void punpcklbw(XMMRegister dst, XMMRegister src); 1598 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1599 1600 void pshufd(XMMRegister dst, Address src, int mode); 1601 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1602 1603 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1604 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1605 1606 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1607 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1608 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1609 1610 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1611 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1612 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1613 1614 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1615 1616 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1617 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1618 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1619 1620 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1621 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1622 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1623 1624 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1625 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1626 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1627 1628 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1629 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1630 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1631 1632 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1633 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1634 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1635 1636 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1637 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1638 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1639 1640 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1641 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1642 1643 // AVX Vector instructions 1644 1645 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1646 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1647 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1648 1649 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1650 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1651 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1652 1653 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1654 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1655 Assembler::vpxor(dst, nds, src, vector_len); 1656 else 1657 Assembler::vxorpd(dst, nds, src, vector_len); 1658 } 1659 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1660 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1661 Assembler::vpxor(dst, nds, src, vector_len); 1662 else 1663 Assembler::vxorpd(dst, nds, src, vector_len); 1664 } 1665 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1666 1667 // Simple version for AVX2 256bit vectors 1668 void vpxor(XMMRegister dst, XMMRegister src) { 1669 assert(UseAVX >= 2, "Should be at least AVX2"); 1670 Assembler::vpxor(dst, dst, src, AVX_256bit); 1671 } 1672 void vpxor(XMMRegister dst, Address src) { 1673 assert(UseAVX >= 2, "Should be at least AVX2"); 1674 Assembler::vpxor(dst, dst, src, AVX_256bit); 1675 } 1676 1677 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1678 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1679 1680 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1681 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1682 Assembler::vinserti32x4(dst, nds, src, imm8); 1683 } else if (UseAVX > 1) { 1684 // vinserti128 is available only in AVX2 1685 Assembler::vinserti128(dst, nds, src, imm8); 1686 } else { 1687 Assembler::vinsertf128(dst, nds, src, imm8); 1688 } 1689 } 1690 1691 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1692 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1693 Assembler::vinserti32x4(dst, nds, src, imm8); 1694 } else if (UseAVX > 1) { 1695 // vinserti128 is available only in AVX2 1696 Assembler::vinserti128(dst, nds, src, imm8); 1697 } else { 1698 Assembler::vinsertf128(dst, nds, src, imm8); 1699 } 1700 } 1701 1702 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1703 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1704 Assembler::vextracti32x4(dst, src, imm8); 1705 } else if (UseAVX > 1) { 1706 // vextracti128 is available only in AVX2 1707 Assembler::vextracti128(dst, src, imm8); 1708 } else { 1709 Assembler::vextractf128(dst, src, imm8); 1710 } 1711 } 1712 1713 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1714 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1715 Assembler::vextracti32x4(dst, src, imm8); 1716 } else if (UseAVX > 1) { 1717 // vextracti128 is available only in AVX2 1718 Assembler::vextracti128(dst, src, imm8); 1719 } else { 1720 Assembler::vextractf128(dst, src, imm8); 1721 } 1722 } 1723 1724 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1725 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1726 vinserti128(dst, dst, src, 1); 1727 } 1728 void vinserti128_high(XMMRegister dst, Address src) { 1729 vinserti128(dst, dst, src, 1); 1730 } 1731 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1732 vextracti128(dst, src, 1); 1733 } 1734 void vextracti128_high(Address dst, XMMRegister src) { 1735 vextracti128(dst, src, 1); 1736 } 1737 1738 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1739 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1740 Assembler::vinsertf32x4(dst, dst, src, 1); 1741 } else { 1742 Assembler::vinsertf128(dst, dst, src, 1); 1743 } 1744 } 1745 1746 void vinsertf128_high(XMMRegister dst, Address src) { 1747 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1748 Assembler::vinsertf32x4(dst, dst, src, 1); 1749 } else { 1750 Assembler::vinsertf128(dst, dst, src, 1); 1751 } 1752 } 1753 1754 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1755 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1756 Assembler::vextractf32x4(dst, src, 1); 1757 } else { 1758 Assembler::vextractf128(dst, src, 1); 1759 } 1760 } 1761 1762 void vextractf128_high(Address dst, XMMRegister src) { 1763 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1764 Assembler::vextractf32x4(dst, src, 1); 1765 } else { 1766 Assembler::vextractf128(dst, src, 1); 1767 } 1768 } 1769 1770 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1771 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1772 Assembler::vinserti64x4(dst, dst, src, 1); 1773 } 1774 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1775 Assembler::vinsertf64x4(dst, dst, src, 1); 1776 } 1777 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1778 Assembler::vextracti64x4(dst, src, 1); 1779 } 1780 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1781 Assembler::vextractf64x4(dst, src, 1); 1782 } 1783 void vextractf64x4_high(Address dst, XMMRegister src) { 1784 Assembler::vextractf64x4(dst, src, 1); 1785 } 1786 void vinsertf64x4_high(XMMRegister dst, Address src) { 1787 Assembler::vinsertf64x4(dst, dst, src, 1); 1788 } 1789 1790 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1791 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1792 vinserti128(dst, dst, src, 0); 1793 } 1794 void vinserti128_low(XMMRegister dst, Address src) { 1795 vinserti128(dst, dst, src, 0); 1796 } 1797 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1798 vextracti128(dst, src, 0); 1799 } 1800 void vextracti128_low(Address dst, XMMRegister src) { 1801 vextracti128(dst, src, 0); 1802 } 1803 1804 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1805 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1806 Assembler::vinsertf32x4(dst, dst, src, 0); 1807 } else { 1808 Assembler::vinsertf128(dst, dst, src, 0); 1809 } 1810 } 1811 1812 void vinsertf128_low(XMMRegister dst, Address src) { 1813 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1814 Assembler::vinsertf32x4(dst, dst, src, 0); 1815 } else { 1816 Assembler::vinsertf128(dst, dst, src, 0); 1817 } 1818 } 1819 1820 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1821 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1822 Assembler::vextractf32x4(dst, src, 0); 1823 } else { 1824 Assembler::vextractf128(dst, src, 0); 1825 } 1826 } 1827 1828 void vextractf128_low(Address dst, XMMRegister src) { 1829 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1830 Assembler::vextractf32x4(dst, src, 0); 1831 } else { 1832 Assembler::vextractf128(dst, src, 0); 1833 } 1834 } 1835 1836 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1837 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1838 Assembler::vinserti64x4(dst, dst, src, 0); 1839 } 1840 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1841 Assembler::vinsertf64x4(dst, dst, src, 0); 1842 } 1843 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1844 Assembler::vextracti64x4(dst, src, 0); 1845 } 1846 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1847 Assembler::vextractf64x4(dst, src, 0); 1848 } 1849 void vextractf64x4_low(Address dst, XMMRegister src) { 1850 Assembler::vextractf64x4(dst, src, 0); 1851 } 1852 void vinsertf64x4_low(XMMRegister dst, Address src) { 1853 Assembler::vinsertf64x4(dst, dst, src, 0); 1854 } 1855 1856 // Carry-Less Multiplication Quadword 1857 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1858 // 0x00 - multiply lower 64 bits [0:63] 1859 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1860 } 1861 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1862 // 0x11 - multiply upper 64 bits [64:127] 1863 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1864 } 1865 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1866 // 0x10 - multiply nds[0:63] and src[64:127] 1867 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1868 } 1869 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1870 //0x01 - multiply nds[64:127] and src[0:63] 1871 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1872 } 1873 1874 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1875 // 0x00 - multiply lower 64 bits [0:63] 1876 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1877 } 1878 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1879 // 0x11 - multiply upper 64 bits [64:127] 1880 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1881 } 1882 1883 // AVX-512 mask operations. 1884 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1885 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1886 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1887 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1888 void kortest(uint masklen, KRegister src1, KRegister src2); 1889 void ktest(uint masklen, KRegister src1, KRegister src2); 1890 1891 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1892 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1893 1894 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1895 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1896 1897 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1898 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1899 1900 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1901 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1902 1903 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1904 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1905 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1906 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1907 1908 using Assembler::evpandq; 1909 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1910 1911 using Assembler::evpaddq; 1912 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1913 1914 using Assembler::evporq; 1915 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1916 1917 using Assembler::vpshufb; 1918 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1919 1920 using Assembler::vpor; 1921 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1922 1923 using Assembler::vpternlogq; 1924 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1925 1926 void cmov32( Condition cc, Register dst, Address src); 1927 void cmov32( Condition cc, Register dst, Register src); 1928 1929 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1930 1931 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1932 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1933 1934 void movoop(Register dst, jobject obj); 1935 void movoop(Address dst, jobject obj, Register rscratch); 1936 1937 void mov_metadata(Register dst, Metadata* obj); 1938 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1939 1940 void movptr(Register dst, Register src); 1941 void movptr(Register dst, Address src); 1942 void movptr(Register dst, AddressLiteral src); 1943 void movptr(Register dst, ArrayAddress src); 1944 void movptr(Register dst, intptr_t src); 1945 void movptr(Address dst, Register src); 1946 void movptr(Address dst, int32_t imm); 1947 void movptr(Address dst, intptr_t src, Register rscratch); 1948 void movptr(ArrayAddress dst, Register src, Register rscratch); 1949 1950 void movptr(Register dst, RegisterOrConstant src) { 1951 if (src.is_constant()) movptr(dst, src.as_constant()); 1952 else movptr(dst, src.as_register()); 1953 } 1954 1955 1956 // to avoid hiding movl 1957 void mov32(Register dst, AddressLiteral src); 1958 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 1959 1960 // Import other mov() methods from the parent class or else 1961 // they will be hidden by the following overriding declaration. 1962 using Assembler::movdl; 1963 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1964 1965 using Assembler::movq; 1966 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1967 1968 // Can push value or effective address 1969 void pushptr(AddressLiteral src, Register rscratch); 1970 1971 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1972 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1973 1974 void pushoop(jobject obj, Register rscratch); 1975 void pushklass(Metadata* obj, Register rscratch); 1976 1977 // sign extend as need a l to ptr sized element 1978 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1979 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1980 1981 1982 public: 1983 // clear memory of size 'cnt' qwords, starting at 'base'; 1984 // if 'is_large' is set, do not try to produce short loop 1985 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1986 1987 // clear memory initialization sequence for constant size; 1988 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1989 1990 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1991 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1992 1993 // Fill primitive arrays 1994 void generate_fill(BasicType t, bool aligned, 1995 Register to, Register value, Register count, 1996 Register rtmp, XMMRegister xtmp); 1997 1998 void encode_iso_array(Register src, Register dst, Register len, 1999 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2000 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2001 2002 #ifdef _LP64 2003 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2004 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2005 Register y, Register y_idx, Register z, 2006 Register carry, Register product, 2007 Register idx, Register kdx); 2008 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2009 Register yz_idx, Register idx, 2010 Register carry, Register product, int offset); 2011 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2012 Register carry, Register carry2, 2013 Register idx, Register jdx, 2014 Register yz_idx1, Register yz_idx2, 2015 Register tmp, Register tmp3, Register tmp4); 2016 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2017 Register yz_idx, Register idx, Register jdx, 2018 Register carry, Register product, 2019 Register carry2); 2020 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2021 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2022 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2023 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2024 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2025 Register tmp2); 2026 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2027 Register rdxReg, Register raxReg); 2028 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2029 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2030 Register tmp3, Register tmp4); 2031 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2032 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2033 2034 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2035 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2036 Register raxReg); 2037 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2038 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2039 Register raxReg); 2040 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2041 Register result, Register tmp1, Register tmp2, 2042 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2043 #endif 2044 2045 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2046 void update_byte_crc32(Register crc, Register val, Register table); 2047 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2048 2049 2050 #ifdef _LP64 2051 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2052 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2053 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2054 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2055 #endif // _LP64 2056 2057 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2058 // Note on a naming convention: 2059 // Prefix w = register only used on a Westmere+ architecture 2060 // Prefix n = register only used on a Nehalem architecture 2061 #ifdef _LP64 2062 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2063 Register tmp1, Register tmp2, Register tmp3); 2064 #else 2065 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2066 Register tmp1, Register tmp2, Register tmp3, 2067 XMMRegister xtmp1, XMMRegister xtmp2); 2068 #endif 2069 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2070 Register in_out, 2071 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2072 XMMRegister w_xtmp2, 2073 Register tmp1, 2074 Register n_tmp2, Register n_tmp3); 2075 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2076 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2077 Register tmp1, Register tmp2, 2078 Register n_tmp3); 2079 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2080 Register in_out1, Register in_out2, Register in_out3, 2081 Register tmp1, Register tmp2, Register tmp3, 2082 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2083 Register tmp4, Register tmp5, 2084 Register n_tmp6); 2085 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2086 Register tmp1, Register tmp2, Register tmp3, 2087 Register tmp4, Register tmp5, Register tmp6, 2088 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2089 bool is_pclmulqdq_supported); 2090 // Fold 128-bit data chunk 2091 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2092 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2093 #ifdef _LP64 2094 // Fold 512-bit data chunk 2095 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2096 #endif // _LP64 2097 // Fold 8-bit data 2098 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2099 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2100 2101 // Compress char[] array to byte[]. 2102 void char_array_compress(Register src, Register dst, Register len, 2103 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2104 XMMRegister tmp4, Register tmp5, Register result, 2105 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2106 2107 // Inflate byte[] array to char[]. 2108 void byte_array_inflate(Register src, Register dst, Register len, 2109 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2110 2111 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2112 Register length, Register temp, int vec_enc); 2113 2114 void fill64_masked(uint shift, Register dst, int disp, 2115 XMMRegister xmm, KRegister mask, Register length, 2116 Register temp, bool use64byteVector = false); 2117 2118 void fill32_masked(uint shift, Register dst, int disp, 2119 XMMRegister xmm, KRegister mask, Register length, 2120 Register temp); 2121 2122 void fill32(Address dst, XMMRegister xmm); 2123 2124 void fill32(Register dst, int disp, XMMRegister xmm); 2125 2126 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2127 2128 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2129 2130 #ifdef _LP64 2131 void convert_f2i(Register dst, XMMRegister src); 2132 void convert_d2i(Register dst, XMMRegister src); 2133 void convert_f2l(Register dst, XMMRegister src); 2134 void convert_d2l(Register dst, XMMRegister src); 2135 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2136 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2137 2138 void cache_wb(Address line); 2139 void cache_wbsync(bool is_pre); 2140 2141 #ifdef COMPILER2_OR_JVMCI 2142 void generate_fill_avx3(BasicType type, Register to, Register value, 2143 Register count, Register rtmp, XMMRegister xtmp); 2144 #endif // COMPILER2_OR_JVMCI 2145 #endif // _LP64 2146 2147 void vallones(XMMRegister dst, int vector_len); 2148 2149 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2150 2151 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2152 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2153 2154 #ifdef _LP64 2155 void save_legacy_gprs(); 2156 void restore_legacy_gprs(); 2157 void setcc(Assembler::Condition comparison, Register dst); 2158 #endif 2159 }; 2160 2161 /** 2162 * class SkipIfEqual: 2163 * 2164 * Instantiating this class will result in assembly code being output that will 2165 * jump around any code emitted between the creation of the instance and it's 2166 * automatic destruction at the end of a scope block, depending on the value of 2167 * the flag passed to the constructor, which will be checked at run-time. 2168 */ 2169 class SkipIfEqual { 2170 private: 2171 MacroAssembler* _masm; 2172 Label _label; 2173 2174 public: 2175 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); 2176 ~SkipIfEqual(); 2177 }; 2178 2179 #endif // CPU_X86_MACROASSEMBLER_X86_HPP