1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/rtmLocking.hpp" 34 #include "runtime/vm_version.hpp" 35 36 // MacroAssembler extends Assembler by frequently used macros. 37 // 38 // Instructions for which a 'better' code sequence exists depending 39 // on arguments should also go in here. 40 41 class MacroAssembler: public Assembler { 42 friend class LIR_Assembler; 43 friend class Runtime1; // as_Address() 44 45 public: 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 protected: 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 63 // returns the register which contains the thread upon return. If a thread register has been 64 // specified, the return value will correspond to that register. If no last_java_sp is specified 65 // (noreg) than rsp will be used instead. 66 virtual void call_VM_base( // returns the register containing the thread upon return 67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 68 Register java_thread, // the thread if computed before ; use noreg otherwise 69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 70 address entry_point, // the entry point 71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 72 bool check_exceptions // whether to check for pending exceptions after return 73 ); 74 75 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 76 77 // helpers for FPU flag access 78 // tmp is a temporary register, if none is available use noreg 79 void save_rax (Register tmp); 80 void restore_rax(Register tmp); 81 82 public: 83 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 84 85 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 86 // The implementation is only non-empty for the InterpreterMacroAssembler, 87 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 88 virtual void check_and_handle_popframe(Register java_thread); 89 virtual void check_and_handle_earlyret(Register java_thread); 90 91 Address as_Address(AddressLiteral adr); 92 Address as_Address(ArrayAddress adr); 93 94 // Support for NULL-checks 95 // 96 // Generates code that causes a NULL OS exception if the content of reg is NULL. 97 // If the accessed location is M[reg + offset] and the offset is known, provide the 98 // offset. No explicit code generation is needed if the offset is within a certain 99 // range (0 <= offset <= page_size). 100 101 void null_check(Register reg, int offset = -1); 102 static bool needs_explicit_null_check(intptr_t offset); 103 static bool uses_implicit_null_check(void* address); 104 105 // Required platform-specific helpers for Label::patch_instructions. 106 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 107 void pd_patch_instruction(address branch, address target, const char* file, int line) { 108 unsigned char op = branch[0]; 109 assert(op == 0xE8 /* call */ || 110 op == 0xE9 /* jmp */ || 111 op == 0xEB /* short jmp */ || 112 (op & 0xF0) == 0x70 /* short jcc */ || 113 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 114 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 115 "Invalid opcode at patch point"); 116 117 if (op == 0xEB || (op & 0xF0) == 0x70) { 118 // short offset operators (jmp and jcc) 119 char* disp = (char*) &branch[1]; 120 int imm8 = target - (address) &disp[1]; 121 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 122 file == NULL ? "<NULL>" : file, line); 123 *disp = imm8; 124 } else { 125 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 126 int imm32 = target - (address) &disp[1]; 127 *disp = imm32; 128 } 129 } 130 131 // The following 4 methods return the offset of the appropriate move instruction 132 133 // Support for fast byte/short loading with zero extension (depending on particular CPU) 134 int load_unsigned_byte(Register dst, Address src); 135 int load_unsigned_short(Register dst, Address src); 136 137 // Support for fast byte/short loading with sign extension (depending on particular CPU) 138 int load_signed_byte(Register dst, Address src); 139 int load_signed_short(Register dst, Address src); 140 141 // Support for sign-extension (hi:lo = extend_sign(lo)) 142 void extend_sign(Register hi, Register lo); 143 144 // Load and store values by size and signed-ness 145 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 146 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 147 148 // Support for inc/dec with optimal instruction selection depending on value 149 150 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 151 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 152 153 void decrementl(Address dst, int value = 1); 154 void decrementl(Register reg, int value = 1); 155 156 void decrementq(Register reg, int value = 1); 157 void decrementq(Address dst, int value = 1); 158 159 void incrementl(Address dst, int value = 1); 160 void incrementl(Register reg, int value = 1); 161 162 void incrementq(Register reg, int value = 1); 163 void incrementq(Address dst, int value = 1); 164 165 // Support optimal SSE move instructions. 166 void movflt(XMMRegister dst, XMMRegister src) { 167 if (dst-> encoding() == src->encoding()) return; 168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 169 else { movss (dst, src); return; } 170 } 171 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 172 void movflt(XMMRegister dst, AddressLiteral src); 173 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 174 175 // Move with zero extension 176 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 177 178 void movdbl(XMMRegister dst, XMMRegister src) { 179 if (dst-> encoding() == src->encoding()) return; 180 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 181 else { movsd (dst, src); return; } 182 } 183 184 void movdbl(XMMRegister dst, AddressLiteral src); 185 186 void movdbl(XMMRegister dst, Address src) { 187 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 188 else { movlpd(dst, src); return; } 189 } 190 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 191 192 void incrementl(AddressLiteral dst); 193 void incrementl(ArrayAddress dst); 194 195 void incrementq(AddressLiteral dst); 196 197 // Alignment 198 void align32(); 199 void align64(); 200 void align(int modulus); 201 void align(int modulus, int target); 202 203 // A 5 byte nop that is safe for patching (see patch_verified_entry) 204 void fat_nop(); 205 206 // Stack frame creation/removal 207 void enter(); 208 void leave(); 209 210 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 211 // The pointer will be loaded into the thread register. 212 void get_thread(Register thread); 213 214 #ifdef _LP64 215 // Support for argument shuffling 216 217 void move32_64(VMRegPair src, VMRegPair dst); 218 void long_move(VMRegPair src, VMRegPair dst); 219 void float_move(VMRegPair src, VMRegPair dst); 220 void double_move(VMRegPair src, VMRegPair dst); 221 void move_ptr(VMRegPair src, VMRegPair dst); 222 void object_move(OopMap* map, 223 int oop_handle_offset, 224 int framesize_in_slots, 225 VMRegPair src, 226 VMRegPair dst, 227 bool is_receiver, 228 int* receiver_offset); 229 #endif // _LP64 230 231 // Support for VM calls 232 // 233 // It is imperative that all calls into the VM are handled via the call_VM macros. 234 // They make sure that the stack linkage is setup correctly. call_VM's correspond 235 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 236 237 238 void call_VM(Register oop_result, 239 address entry_point, 240 bool check_exceptions = true); 241 void call_VM(Register oop_result, 242 address entry_point, 243 Register arg_1, 244 bool check_exceptions = true); 245 void call_VM(Register oop_result, 246 address entry_point, 247 Register arg_1, Register arg_2, 248 bool check_exceptions = true); 249 void call_VM(Register oop_result, 250 address entry_point, 251 Register arg_1, Register arg_2, Register arg_3, 252 bool check_exceptions = true); 253 254 // Overloadings with last_Java_sp 255 void call_VM(Register oop_result, 256 Register last_java_sp, 257 address entry_point, 258 int number_of_arguments = 0, 259 bool check_exceptions = true); 260 void call_VM(Register oop_result, 261 Register last_java_sp, 262 address entry_point, 263 Register arg_1, bool 264 check_exceptions = true); 265 void call_VM(Register oop_result, 266 Register last_java_sp, 267 address entry_point, 268 Register arg_1, Register arg_2, 269 bool check_exceptions = true); 270 void call_VM(Register oop_result, 271 Register last_java_sp, 272 address entry_point, 273 Register arg_1, Register arg_2, Register arg_3, 274 bool check_exceptions = true); 275 276 void get_vm_result (Register oop_result, Register thread); 277 void get_vm_result_2(Register metadata_result, Register thread); 278 279 // These always tightly bind to MacroAssembler::call_VM_base 280 // bypassing the virtual implementation 281 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 282 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 283 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 284 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 285 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 286 287 void call_VM_leaf0(address entry_point); 288 void call_VM_leaf(address entry_point, 289 int number_of_arguments = 0); 290 void call_VM_leaf(address entry_point, 291 Register arg_1); 292 void call_VM_leaf(address entry_point, 293 Register arg_1, Register arg_2); 294 void call_VM_leaf(address entry_point, 295 Register arg_1, Register arg_2, Register arg_3); 296 297 // These always tightly bind to MacroAssembler::call_VM_leaf_base 298 // bypassing the virtual implementation 299 void super_call_VM_leaf(address entry_point); 300 void super_call_VM_leaf(address entry_point, Register arg_1); 301 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 302 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 303 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 304 305 // last Java Frame (fills frame anchor) 306 void set_last_Java_frame(Register thread, 307 Register last_java_sp, 308 Register last_java_fp, 309 address last_java_pc); 310 311 // thread in the default location (r15_thread on 64bit) 312 void set_last_Java_frame(Register last_java_sp, 313 Register last_java_fp, 314 address last_java_pc); 315 316 void reset_last_Java_frame(Register thread, bool clear_fp); 317 318 // thread in the default location (r15_thread on 64bit) 319 void reset_last_Java_frame(bool clear_fp); 320 321 // jobjects 322 void clear_jweak_tag(Register possibly_jweak); 323 void resolve_jobject(Register value, Register thread, Register tmp); 324 325 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 326 void c2bool(Register x); 327 328 // C++ bool manipulation 329 330 void movbool(Register dst, Address src); 331 void movbool(Address dst, bool boolconst); 332 void movbool(Address dst, Register src); 333 void testbool(Register dst); 334 335 void resolve_oop_handle(Register result, Register tmp = rscratch2); 336 void resolve_weak_handle(Register result, Register tmp); 337 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 338 void load_method_holder_cld(Register rresult, Register rmethod); 339 340 void load_method_holder(Register holder, Register method); 341 342 // oop manipulations 343 void load_klass(Register dst, Register src, Register tmp); 344 void store_klass(Register dst, Register src, Register tmp); 345 346 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 347 Register tmp1, Register thread_tmp); 348 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 349 Register tmp1, Register tmp2, Register tmp3); 350 351 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 352 Register thread_tmp = noreg, DecoratorSet decorators = 0); 353 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 354 Register thread_tmp = noreg, DecoratorSet decorators = 0); 355 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 356 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 357 358 // Used for storing NULL. All other oop constants should be 359 // stored using routines that take a jobject. 360 void store_heap_oop_null(Address dst); 361 362 #ifdef _LP64 363 void store_klass_gap(Register dst, Register src); 364 365 // This dummy is to prevent a call to store_heap_oop from 366 // converting a zero (like NULL) into a Register by giving 367 // the compiler two choices it can't resolve 368 369 void store_heap_oop(Address dst, void* dummy); 370 371 void encode_heap_oop(Register r); 372 void decode_heap_oop(Register r); 373 void encode_heap_oop_not_null(Register r); 374 void decode_heap_oop_not_null(Register r); 375 void encode_heap_oop_not_null(Register dst, Register src); 376 void decode_heap_oop_not_null(Register dst, Register src); 377 378 void set_narrow_oop(Register dst, jobject obj); 379 void set_narrow_oop(Address dst, jobject obj); 380 void cmp_narrow_oop(Register dst, jobject obj); 381 void cmp_narrow_oop(Address dst, jobject obj); 382 383 void encode_klass_not_null(Register r, Register tmp); 384 void decode_klass_not_null(Register r, Register tmp); 385 void encode_and_move_klass_not_null(Register dst, Register src); 386 void decode_and_move_klass_not_null(Register dst, Register src); 387 void set_narrow_klass(Register dst, Klass* k); 388 void set_narrow_klass(Address dst, Klass* k); 389 void cmp_narrow_klass(Register dst, Klass* k); 390 void cmp_narrow_klass(Address dst, Klass* k); 391 392 // if heap base register is used - reinit it with the correct value 393 void reinit_heapbase(); 394 395 DEBUG_ONLY(void verify_heapbase(const char* msg);) 396 397 #endif // _LP64 398 399 // Int division/remainder for Java 400 // (as idivl, but checks for special case as described in JVM spec.) 401 // returns idivl instruction offset for implicit exception handling 402 int corrected_idivl(Register reg); 403 404 // Long division/remainder for Java 405 // (as idivq, but checks for special case as described in JVM spec.) 406 // returns idivq instruction offset for implicit exception handling 407 int corrected_idivq(Register reg); 408 409 void int3(); 410 411 // Long operation macros for a 32bit cpu 412 // Long negation for Java 413 void lneg(Register hi, Register lo); 414 415 // Long multiplication for Java 416 // (destroys contents of eax, ebx, ecx and edx) 417 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 418 419 // Long shifts for Java 420 // (semantics as described in JVM spec.) 421 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 422 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 423 424 // Long compare for Java 425 // (semantics as described in JVM spec.) 426 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 427 428 429 // misc 430 431 // Sign extension 432 void sign_extend_short(Register reg); 433 void sign_extend_byte(Register reg); 434 435 // Division by power of 2, rounding towards 0 436 void division_with_shift(Register reg, int shift_value); 437 438 #ifndef _LP64 439 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 440 // 441 // CF (corresponds to C0) if x < y 442 // PF (corresponds to C2) if unordered 443 // ZF (corresponds to C3) if x = y 444 // 445 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 446 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 447 void fcmp(Register tmp); 448 // Variant of the above which allows y to be further down the stack 449 // and which only pops x and y if specified. If pop_right is 450 // specified then pop_left must also be specified. 451 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 452 453 // Floating-point comparison for Java 454 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 455 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 456 // (semantics as described in JVM spec.) 457 void fcmp2int(Register dst, bool unordered_is_less); 458 // Variant of the above which allows y to be further down the stack 459 // and which only pops x and y if specified. If pop_right is 460 // specified then pop_left must also be specified. 461 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 462 463 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 464 // tmp is a temporary register, if none is available use noreg 465 void fremr(Register tmp); 466 467 // only if +VerifyFPU 468 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 469 #endif // !LP64 470 471 // dst = c = a * b + c 472 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 473 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 474 475 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 476 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 477 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 478 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 479 480 481 // same as fcmp2int, but using SSE2 482 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 483 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 484 485 // branch to L if FPU flag C2 is set/not set 486 // tmp is a temporary register, if none is available use noreg 487 void jC2 (Register tmp, Label& L); 488 void jnC2(Register tmp, Label& L); 489 490 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 491 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 492 void load_float(Address src); 493 494 // Store float value to 'address'. If UseSSE >= 1, the value is stored 495 // from register xmm0. Otherwise, the value is stored from the FPU stack. 496 void store_float(Address dst); 497 498 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 499 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 500 void load_double(Address src); 501 502 // Store double value to 'address'. If UseSSE >= 2, the value is stored 503 // from register xmm0. Otherwise, the value is stored from the FPU stack. 504 void store_double(Address dst); 505 506 #ifndef _LP64 507 // Pop ST (ffree & fincstp combined) 508 void fpop(); 509 510 void empty_FPU_stack(); 511 #endif // !_LP64 512 513 void push_IU_state(); 514 void pop_IU_state(); 515 516 void push_FPU_state(); 517 void pop_FPU_state(); 518 519 void push_CPU_state(); 520 void pop_CPU_state(); 521 522 // Round up to a power of two 523 void round_to(Register reg, int modulus); 524 525 private: 526 // General purpose and XMM registers potentially clobbered by native code; there 527 // is no need for FPU or AVX opmask related methods because C1/interpreter 528 // - we save/restore FPU state as a whole always 529 // - do not care about AVX-512 opmask 530 static RegSet call_clobbered_gp_registers(); 531 static XMMRegSet call_clobbered_xmm_registers(); 532 533 void push_set(XMMRegSet set, int offset); 534 void pop_set(XMMRegSet set, int offset); 535 536 public: 537 void push_set(RegSet set, int offset = -1); 538 void pop_set(RegSet set, int offset = -1); 539 540 // Push and pop everything that might be clobbered by a native 541 // runtime call. 542 // Only save the lower 64 bits of each vector register. 543 // Additional registers can be excluded in a passed RegSet. 544 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 545 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 546 547 void push_call_clobbered_registers(bool save_fpu = true) { 548 push_call_clobbered_registers_except(RegSet(), save_fpu); 549 } 550 void pop_call_clobbered_registers(bool restore_fpu = true) { 551 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 552 } 553 554 // allocation 555 void eden_allocate( 556 Register thread, // Current thread 557 Register obj, // result: pointer to object after successful allocation 558 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 559 int con_size_in_bytes, // object size in bytes if known at compile time 560 Register t1, // temp register 561 Label& slow_case // continuation point if fast allocation fails 562 ); 563 void tlab_allocate( 564 Register thread, // Current thread 565 Register obj, // result: pointer to object after successful allocation 566 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 567 int con_size_in_bytes, // object size in bytes if known at compile time 568 Register t1, // temp register 569 Register t2, // temp register 570 Label& slow_case // continuation point if fast allocation fails 571 ); 572 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 573 574 // interface method calling 575 void lookup_interface_method(Register recv_klass, 576 Register intf_klass, 577 RegisterOrConstant itable_index, 578 Register method_result, 579 Register scan_temp, 580 Label& no_such_interface, 581 bool return_method = true); 582 583 // virtual method calling 584 void lookup_virtual_method(Register recv_klass, 585 RegisterOrConstant vtable_index, 586 Register method_result); 587 588 // Test sub_klass against super_klass, with fast and slow paths. 589 590 // The fast path produces a tri-state answer: yes / no / maybe-slow. 591 // One of the three labels can be NULL, meaning take the fall-through. 592 // If super_check_offset is -1, the value is loaded up from super_klass. 593 // No registers are killed, except temp_reg. 594 void check_klass_subtype_fast_path(Register sub_klass, 595 Register super_klass, 596 Register temp_reg, 597 Label* L_success, 598 Label* L_failure, 599 Label* L_slow_path, 600 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 601 602 // The rest of the type check; must be wired to a corresponding fast path. 603 // It does not repeat the fast path logic, so don't use it standalone. 604 // The temp_reg and temp2_reg can be noreg, if no temps are available. 605 // Updates the sub's secondary super cache as necessary. 606 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 607 void check_klass_subtype_slow_path(Register sub_klass, 608 Register super_klass, 609 Register temp_reg, 610 Register temp2_reg, 611 Label* L_success, 612 Label* L_failure, 613 bool set_cond_codes = false); 614 615 // Simplified, combined version, good for typical uses. 616 // Falls through on failure. 617 void check_klass_subtype(Register sub_klass, 618 Register super_klass, 619 Register temp_reg, 620 Label& L_success); 621 622 void clinit_barrier(Register klass, 623 Register thread, 624 Label* L_fast_path = NULL, 625 Label* L_slow_path = NULL); 626 627 // method handles (JSR 292) 628 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 629 630 // Debugging 631 632 // only if +VerifyOops 633 void _verify_oop(Register reg, const char* s, const char* file, int line); 634 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 635 636 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 637 if (VerifyOops) { 638 _verify_oop(reg, s, file, line); 639 } 640 } 641 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 642 if (VerifyOops) { 643 _verify_oop_addr(reg, s, file, line); 644 } 645 } 646 647 // TODO: verify method and klass metadata (compare against vptr?) 648 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 649 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 650 651 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 652 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 653 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 654 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 655 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 656 657 // Verify or restore cpu control state after JNI call 658 void restore_cpu_control_state_after_jni(); 659 660 // prints msg, dumps registers and stops execution 661 void stop(const char* msg); 662 663 // prints msg and continues 664 void warn(const char* msg); 665 666 // dumps registers and other state 667 void print_state(); 668 669 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 670 static void debug64(char* msg, int64_t pc, int64_t regs[]); 671 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 672 static void print_state64(int64_t pc, int64_t regs[]); 673 674 void os_breakpoint(); 675 676 void untested() { stop("untested"); } 677 678 void unimplemented(const char* what = ""); 679 680 void should_not_reach_here() { stop("should not reach here"); } 681 682 void print_CPU_state(); 683 684 // Stack overflow checking 685 void bang_stack_with_offset(int offset) { 686 // stack grows down, caller passes positive offset 687 assert(offset > 0, "must bang with negative offset"); 688 movl(Address(rsp, (-offset)), rax); 689 } 690 691 // Writes to stack successive pages until offset reached to check for 692 // stack overflow + shadow pages. Also, clobbers tmp 693 void bang_stack_size(Register size, Register tmp); 694 695 // Check for reserved stack access in method being exited (for JIT) 696 void reserved_stack_check(); 697 698 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 699 700 void verify_tlab(); 701 702 Condition negate_condition(Condition cond); 703 704 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 705 // operands. In general the names are modified to avoid hiding the instruction in Assembler 706 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 707 // here in MacroAssembler. The major exception to this rule is call 708 709 // Arithmetics 710 711 712 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 713 void addptr(Address dst, Register src); 714 715 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 716 void addptr(Register dst, int32_t src); 717 void addptr(Register dst, Register src); 718 void addptr(Register dst, RegisterOrConstant src) { 719 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 720 else addptr(dst, src.as_register()); 721 } 722 723 void andptr(Register dst, int32_t src); 724 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 725 726 void cmp8(AddressLiteral src1, int imm); 727 728 // renamed to drag out the casting of address to int32_t/intptr_t 729 void cmp32(Register src1, int32_t imm); 730 731 void cmp32(AddressLiteral src1, int32_t imm); 732 // compare reg - mem, or reg - &mem 733 void cmp32(Register src1, AddressLiteral src2); 734 735 void cmp32(Register src1, Address src2); 736 737 #ifndef _LP64 738 void cmpklass(Address dst, Metadata* obj); 739 void cmpklass(Register dst, Metadata* obj); 740 void cmpoop(Address dst, jobject obj); 741 #endif // _LP64 742 743 void cmpoop(Register src1, Register src2); 744 void cmpoop(Register src1, Address src2); 745 void cmpoop(Register dst, jobject obj); 746 747 // NOTE src2 must be the lval. This is NOT an mem-mem compare 748 void cmpptr(Address src1, AddressLiteral src2); 749 750 void cmpptr(Register src1, AddressLiteral src2); 751 752 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 753 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 754 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 755 756 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 757 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 758 759 // cmp64 to avoild hiding cmpq 760 void cmp64(Register src1, AddressLiteral src); 761 762 void cmpxchgptr(Register reg, Address adr); 763 764 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 765 766 767 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 768 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 769 770 771 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 772 773 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 774 775 void shlptr(Register dst, int32_t shift); 776 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 777 778 void shrptr(Register dst, int32_t shift); 779 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 780 781 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 782 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 783 784 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 785 786 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 787 void subptr(Register dst, int32_t src); 788 // Force generation of a 4 byte immediate value even if it fits into 8bit 789 void subptr_imm32(Register dst, int32_t src); 790 void subptr(Register dst, Register src); 791 void subptr(Register dst, RegisterOrConstant src) { 792 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 793 else subptr(dst, src.as_register()); 794 } 795 796 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 797 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 798 799 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 800 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 801 802 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 803 804 805 806 // Helper functions for statistics gathering. 807 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 808 void cond_inc32(Condition cond, AddressLiteral counter_addr); 809 // Unconditional atomic increment. 810 void atomic_incl(Address counter_addr); 811 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 812 #ifdef _LP64 813 void atomic_incq(Address counter_addr); 814 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 815 #endif 816 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 817 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 818 819 void lea(Register dst, AddressLiteral adr); 820 void lea(Address dst, AddressLiteral adr); 821 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 822 823 void leal32(Register dst, Address src) { leal(dst, src); } 824 825 // Import other testl() methods from the parent class or else 826 // they will be hidden by the following overriding declaration. 827 using Assembler::testl; 828 void testl(Register dst, AddressLiteral src); 829 830 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 831 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 832 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 833 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 834 835 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 836 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 837 void testptr(Register src1, Register src2); 838 839 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 840 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 841 842 // Calls 843 844 void call(Label& L, relocInfo::relocType rtype); 845 void call(Register entry); 846 void call(Address addr) { Assembler::call(addr); } 847 848 // NOTE: this call transfers to the effective address of entry NOT 849 // the address contained by entry. This is because this is more natural 850 // for jumps/calls. 851 void call(AddressLiteral entry); 852 853 // Emit the CompiledIC call idiom 854 void ic_call(address entry, jint method_index = 0); 855 856 // Jumps 857 858 // NOTE: these jumps transfer to the effective address of dst NOT 859 // the address contained by dst. This is because this is more natural 860 // for jumps/calls. 861 void jump(AddressLiteral dst); 862 void jump_cc(Condition cc, AddressLiteral dst); 863 864 // 32bit can do a case table jump in one instruction but we no longer allow the base 865 // to be installed in the Address class. This jump will transfer to the address 866 // contained in the location described by entry (not the address of entry) 867 void jump(ArrayAddress entry); 868 869 // Floating 870 871 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 872 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 873 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 874 875 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 876 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 877 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 878 879 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 880 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 881 void comiss(XMMRegister dst, AddressLiteral src); 882 883 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 884 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 885 void comisd(XMMRegister dst, AddressLiteral src); 886 887 #ifndef _LP64 888 void fadd_s(Address src) { Assembler::fadd_s(src); } 889 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 890 891 void fldcw(Address src) { Assembler::fldcw(src); } 892 void fldcw(AddressLiteral src); 893 894 void fld_s(int index) { Assembler::fld_s(index); } 895 void fld_s(Address src) { Assembler::fld_s(src); } 896 void fld_s(AddressLiteral src); 897 898 void fld_d(Address src) { Assembler::fld_d(src); } 899 void fld_d(AddressLiteral src); 900 901 void fmul_s(Address src) { Assembler::fmul_s(src); } 902 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 903 #endif // _LP64 904 905 void fld_x(Address src) { Assembler::fld_x(src); } 906 void fld_x(AddressLiteral src); 907 908 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 909 void ldmxcsr(AddressLiteral src, Register scratchReg = rscratch1); 910 911 #ifdef _LP64 912 private: 913 void sha256_AVX2_one_round_compute( 914 Register reg_old_h, 915 Register reg_a, 916 Register reg_b, 917 Register reg_c, 918 Register reg_d, 919 Register reg_e, 920 Register reg_f, 921 Register reg_g, 922 Register reg_h, 923 int iter); 924 void sha256_AVX2_four_rounds_compute_first(int start); 925 void sha256_AVX2_four_rounds_compute_last(int start); 926 void sha256_AVX2_one_round_and_sched( 927 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 928 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 929 XMMRegister xmm_2, /* ymm6 */ 930 XMMRegister xmm_3, /* ymm7 */ 931 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 932 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 933 Register reg_c, /* edi */ 934 Register reg_d, /* esi */ 935 Register reg_e, /* r8d */ 936 Register reg_f, /* r9d */ 937 Register reg_g, /* r10d */ 938 Register reg_h, /* r11d */ 939 int iter); 940 941 void addm(int disp, Register r1, Register r2); 942 void gfmul(XMMRegister tmp0, XMMRegister t); 943 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 944 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 945 void generateHtbl_one_block(Register htbl); 946 void generateHtbl_eight_blocks(Register htbl); 947 public: 948 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 949 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 950 Register buf, Register state, Register ofs, Register limit, Register rsp, 951 bool multi_block, XMMRegister shuf_mask); 952 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 953 #endif 954 955 #ifdef _LP64 956 private: 957 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 958 Register e, Register f, Register g, Register h, int iteration); 959 960 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 961 Register a, Register b, Register c, Register d, Register e, Register f, 962 Register g, Register h, int iteration); 963 964 void addmq(int disp, Register r1, Register r2); 965 public: 966 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 967 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 968 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 969 XMMRegister shuf_mask); 970 private: 971 void roundEnc(XMMRegister key, int rnum); 972 void lastroundEnc(XMMRegister key, int rnum); 973 void roundDec(XMMRegister key, int rnum); 974 void lastroundDec(XMMRegister key, int rnum); 975 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 976 void gfmul_avx512(XMMRegister ghash, XMMRegister hkey); 977 void generateHtbl_48_block_zmm(Register htbl, Register avx512_subkeyHtbl); 978 void ghash16_encrypt16_parallel(Register key, Register subkeyHtbl, XMMRegister ctr_blockx, 979 XMMRegister aad_hashx, Register in, Register out, Register data, Register pos, bool reduction, 980 XMMRegister addmask, bool no_ghash_input, Register rounds, Register ghash_pos, 981 bool final_reduction, int index, XMMRegister counter_inc_mask); 982 public: 983 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 984 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 985 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 986 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 987 void aesgcm_encrypt(Register in, Register len, Register ct, Register out, Register key, 988 Register state, Register subkeyHtbl, Register avx512_subkeyHtbl, Register counter); 989 990 #endif 991 992 void fast_md5(Register buf, Address state, Address ofs, Address limit, 993 bool multi_block); 994 995 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 996 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 997 Register buf, Register state, Register ofs, Register limit, Register rsp, 998 bool multi_block); 999 1000 #ifdef _LP64 1001 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1002 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1003 Register buf, Register state, Register ofs, Register limit, Register rsp, 1004 bool multi_block, XMMRegister shuf_mask); 1005 #else 1006 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1007 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1008 Register buf, Register state, Register ofs, Register limit, Register rsp, 1009 bool multi_block); 1010 #endif 1011 1012 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1013 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1014 Register rax, Register rcx, Register rdx, Register tmp); 1015 1016 #ifdef _LP64 1017 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1018 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1019 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1020 1021 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1022 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1023 Register rax, Register rcx, Register rdx, Register r11); 1024 1025 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1026 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1027 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1028 1029 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1030 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1031 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1032 Register tmp3, Register tmp4); 1033 1034 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1035 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1036 Register rax, Register rcx, Register rdx, Register tmp1, 1037 Register tmp2, Register tmp3, Register tmp4); 1038 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1039 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1040 Register rax, Register rcx, Register rdx, Register tmp1, 1041 Register tmp2, Register tmp3, Register tmp4); 1042 #else 1043 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1044 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1045 Register rax, Register rcx, Register rdx, Register tmp1); 1046 1047 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1048 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1049 Register rax, Register rcx, Register rdx, Register tmp); 1050 1051 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1052 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1053 Register rdx, Register tmp); 1054 1055 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1056 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1057 Register rax, Register rbx, Register rdx); 1058 1059 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1060 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1061 Register rax, Register rcx, Register rdx, Register tmp); 1062 1063 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1064 Register edx, Register ebx, Register esi, Register edi, 1065 Register ebp, Register esp); 1066 1067 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1068 Register esi, Register edi, Register ebp, Register esp); 1069 1070 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1071 Register edx, Register ebx, Register esi, Register edi, 1072 Register ebp, Register esp); 1073 1074 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1075 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1076 Register rax, Register rcx, Register rdx, Register tmp); 1077 #endif 1078 1079 private: 1080 1081 // these are private because users should be doing movflt/movdbl 1082 1083 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1084 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1085 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1086 void movss(XMMRegister dst, AddressLiteral src); 1087 1088 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1089 void movlpd(XMMRegister dst, AddressLiteral src); 1090 1091 public: 1092 1093 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1094 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1095 void addsd(XMMRegister dst, AddressLiteral src); 1096 1097 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1098 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1099 void addss(XMMRegister dst, AddressLiteral src); 1100 1101 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1102 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1103 void addpd(XMMRegister dst, AddressLiteral src); 1104 1105 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1106 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1107 void divsd(XMMRegister dst, AddressLiteral src); 1108 1109 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1110 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1111 void divss(XMMRegister dst, AddressLiteral src); 1112 1113 // Move Unaligned Double Quadword 1114 void movdqu(Address dst, XMMRegister src); 1115 void movdqu(XMMRegister dst, Address src); 1116 void movdqu(XMMRegister dst, XMMRegister src); 1117 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1118 1119 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1120 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1121 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1122 void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1123 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1124 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1125 1126 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1127 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1128 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1129 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1130 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1131 void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1132 1133 // Safe move operation, lowers down to 16bit moves for targets supporting 1134 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1135 void kmov(Address dst, KRegister src); 1136 void kmov(KRegister dst, Address src); 1137 void kmov(KRegister dst, KRegister src); 1138 void kmov(Register dst, KRegister src); 1139 void kmov(KRegister dst, Register src); 1140 1141 // AVX Unaligned forms 1142 void vmovdqu(Address dst, XMMRegister src); 1143 void vmovdqu(XMMRegister dst, Address src); 1144 void vmovdqu(XMMRegister dst, XMMRegister src); 1145 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1146 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg, int vector_len); 1147 1148 1149 // AVX512 Unaligned 1150 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len); 1151 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len); 1152 1153 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1154 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1155 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1156 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1157 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1158 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1159 1160 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1161 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1162 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1163 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1164 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1165 1166 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1167 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1168 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1169 if (dst->encoding() == src->encoding()) return; 1170 Assembler::evmovdqul(dst, src, vector_len); 1171 } 1172 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1173 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1174 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1175 if (dst->encoding() == src->encoding() && mask == k0) return; 1176 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1177 } 1178 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1179 1180 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1181 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1182 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1183 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1184 if (dst->encoding() == src->encoding()) return; 1185 Assembler::evmovdquq(dst, src, vector_len); 1186 } 1187 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1188 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1189 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1190 if (dst->encoding() == src->encoding() && mask == k0) return; 1191 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1192 } 1193 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1194 1195 // Move Aligned Double Quadword 1196 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1197 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1198 void movdqa(XMMRegister dst, AddressLiteral src); 1199 1200 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1201 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1202 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1203 void movsd(XMMRegister dst, AddressLiteral src); 1204 1205 using Assembler::vmovddup; 1206 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); 1207 1208 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1209 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1210 void mulpd(XMMRegister dst, AddressLiteral src); 1211 1212 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1213 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1214 void mulsd(XMMRegister dst, AddressLiteral src); 1215 1216 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1217 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1218 void mulss(XMMRegister dst, AddressLiteral src); 1219 1220 // Carry-Less Multiplication Quadword 1221 void pclmulldq(XMMRegister dst, XMMRegister src) { 1222 // 0x00 - multiply lower 64 bits [0:63] 1223 Assembler::pclmulqdq(dst, src, 0x00); 1224 } 1225 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1226 // 0x11 - multiply upper 64 bits [64:127] 1227 Assembler::pclmulqdq(dst, src, 0x11); 1228 } 1229 1230 void pcmpeqb(XMMRegister dst, XMMRegister src); 1231 void pcmpeqw(XMMRegister dst, XMMRegister src); 1232 1233 void pcmpestri(XMMRegister dst, Address src, int imm8); 1234 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1235 1236 void pmovzxbw(XMMRegister dst, XMMRegister src); 1237 void pmovzxbw(XMMRegister dst, Address src); 1238 1239 void pmovmskb(Register dst, XMMRegister src); 1240 1241 void ptest(XMMRegister dst, XMMRegister src); 1242 1243 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1244 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1245 void sqrtsd(XMMRegister dst, AddressLiteral src); 1246 1247 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1248 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1249 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1250 1251 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1252 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1253 void sqrtss(XMMRegister dst, AddressLiteral src); 1254 1255 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1256 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1257 void subsd(XMMRegister dst, AddressLiteral src); 1258 1259 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1260 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1261 void subss(XMMRegister dst, AddressLiteral src); 1262 1263 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1264 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1265 void ucomiss(XMMRegister dst, AddressLiteral src); 1266 1267 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1268 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1269 void ucomisd(XMMRegister dst, AddressLiteral src); 1270 1271 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1272 void xorpd(XMMRegister dst, XMMRegister src); 1273 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1274 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1275 1276 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1277 void xorps(XMMRegister dst, XMMRegister src); 1278 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1279 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1280 1281 // Shuffle Bytes 1282 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1283 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1284 void pshufb(XMMRegister dst, AddressLiteral src); 1285 // AVX 3-operands instructions 1286 1287 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1288 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1289 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1290 1291 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1292 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1293 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1294 1295 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1296 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1297 1298 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1299 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1300 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1301 1302 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1303 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1304 1305 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1306 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1307 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1308 1309 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1310 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1311 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1312 1313 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1314 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1315 1316 using Assembler::vbroadcastsd; 1317 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); 1318 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1); 1319 void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpbroadcastq(dst, src, vector_len); } 1320 void vpbroadcastq(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastq(dst, src, vector_len); } 1321 1322 1323 1324 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1325 1326 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1327 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1328 1329 // Vector compares 1330 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1331 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1332 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1333 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1334 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1335 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1336 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1337 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1338 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1339 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1340 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1341 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1342 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1343 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1344 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1345 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1346 1347 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1348 1349 // Emit comparison instruction for the specified comparison predicate. 1350 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1351 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1352 1353 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1354 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1355 1356 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1357 1358 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1359 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1360 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1361 Assembler::vpmulld(dst, nds, src, vector_len); 1362 }; 1363 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1364 Assembler::vpmulld(dst, nds, src, vector_len); 1365 } 1366 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1367 1368 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1369 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1370 1371 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1372 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1373 1374 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1375 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1376 1377 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1378 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1379 1380 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1381 if (!is_varshift) { 1382 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1383 } else { 1384 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1385 } 1386 } 1387 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1388 if (!is_varshift) { 1389 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1390 } else { 1391 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1392 } 1393 } 1394 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1395 if (!is_varshift) { 1396 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1397 } else { 1398 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1399 } 1400 } 1401 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1402 if (!is_varshift) { 1403 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1404 } else { 1405 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1406 } 1407 } 1408 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1409 if (!is_varshift) { 1410 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1411 } else { 1412 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1413 } 1414 } 1415 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1416 if (!is_varshift) { 1417 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1418 } else { 1419 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1420 } 1421 } 1422 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1423 if (!is_varshift) { 1424 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1425 } else { 1426 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1427 } 1428 } 1429 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1430 if (!is_varshift) { 1431 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1432 } else { 1433 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1434 } 1435 } 1436 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1437 if (!is_varshift) { 1438 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1439 } else { 1440 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1441 } 1442 } 1443 1444 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1445 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1446 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1447 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1448 1449 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1450 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1451 1452 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1453 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1454 1455 void vptest(XMMRegister dst, XMMRegister src); 1456 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1457 1458 void punpcklbw(XMMRegister dst, XMMRegister src); 1459 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1460 1461 void pshufd(XMMRegister dst, Address src, int mode); 1462 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1463 1464 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1465 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1466 1467 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1468 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1469 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1470 1471 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1472 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1473 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1474 1475 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1476 1477 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1478 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1479 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1480 1481 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1482 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1483 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1484 1485 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1486 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1487 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1488 1489 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1490 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1491 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1492 1493 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1494 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1495 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1496 1497 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1498 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1499 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1500 1501 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1502 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1503 1504 // AVX Vector instructions 1505 1506 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1507 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1508 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1509 1510 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1511 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1512 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1513 1514 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1515 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1516 Assembler::vpxor(dst, nds, src, vector_len); 1517 else 1518 Assembler::vxorpd(dst, nds, src, vector_len); 1519 } 1520 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1521 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1522 Assembler::vpxor(dst, nds, src, vector_len); 1523 else 1524 Assembler::vxorpd(dst, nds, src, vector_len); 1525 } 1526 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1527 1528 // Simple version for AVX2 256bit vectors 1529 void vpxor(XMMRegister dst, XMMRegister src) { 1530 assert(UseAVX >= 2, "Should be at least AVX2"); 1531 Assembler::vpxor(dst, dst, src, AVX_256bit); 1532 } 1533 void vpxor(XMMRegister dst, Address src) { 1534 assert(UseAVX >= 2, "Should be at least AVX2"); 1535 Assembler::vpxor(dst, dst, src, AVX_256bit); 1536 } 1537 1538 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1539 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1540 1541 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1542 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1543 Assembler::vinserti32x4(dst, nds, src, imm8); 1544 } else if (UseAVX > 1) { 1545 // vinserti128 is available only in AVX2 1546 Assembler::vinserti128(dst, nds, src, imm8); 1547 } else { 1548 Assembler::vinsertf128(dst, nds, src, imm8); 1549 } 1550 } 1551 1552 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1553 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1554 Assembler::vinserti32x4(dst, nds, src, imm8); 1555 } else if (UseAVX > 1) { 1556 // vinserti128 is available only in AVX2 1557 Assembler::vinserti128(dst, nds, src, imm8); 1558 } else { 1559 Assembler::vinsertf128(dst, nds, src, imm8); 1560 } 1561 } 1562 1563 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1564 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1565 Assembler::vextracti32x4(dst, src, imm8); 1566 } else if (UseAVX > 1) { 1567 // vextracti128 is available only in AVX2 1568 Assembler::vextracti128(dst, src, imm8); 1569 } else { 1570 Assembler::vextractf128(dst, src, imm8); 1571 } 1572 } 1573 1574 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1575 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1576 Assembler::vextracti32x4(dst, src, imm8); 1577 } else if (UseAVX > 1) { 1578 // vextracti128 is available only in AVX2 1579 Assembler::vextracti128(dst, src, imm8); 1580 } else { 1581 Assembler::vextractf128(dst, src, imm8); 1582 } 1583 } 1584 1585 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1586 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1587 vinserti128(dst, dst, src, 1); 1588 } 1589 void vinserti128_high(XMMRegister dst, Address src) { 1590 vinserti128(dst, dst, src, 1); 1591 } 1592 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1593 vextracti128(dst, src, 1); 1594 } 1595 void vextracti128_high(Address dst, XMMRegister src) { 1596 vextracti128(dst, src, 1); 1597 } 1598 1599 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1600 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1601 Assembler::vinsertf32x4(dst, dst, src, 1); 1602 } else { 1603 Assembler::vinsertf128(dst, dst, src, 1); 1604 } 1605 } 1606 1607 void vinsertf128_high(XMMRegister dst, Address src) { 1608 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1609 Assembler::vinsertf32x4(dst, dst, src, 1); 1610 } else { 1611 Assembler::vinsertf128(dst, dst, src, 1); 1612 } 1613 } 1614 1615 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1616 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1617 Assembler::vextractf32x4(dst, src, 1); 1618 } else { 1619 Assembler::vextractf128(dst, src, 1); 1620 } 1621 } 1622 1623 void vextractf128_high(Address dst, XMMRegister src) { 1624 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1625 Assembler::vextractf32x4(dst, src, 1); 1626 } else { 1627 Assembler::vextractf128(dst, src, 1); 1628 } 1629 } 1630 1631 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1632 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1633 Assembler::vinserti64x4(dst, dst, src, 1); 1634 } 1635 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1636 Assembler::vinsertf64x4(dst, dst, src, 1); 1637 } 1638 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1639 Assembler::vextracti64x4(dst, src, 1); 1640 } 1641 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1642 Assembler::vextractf64x4(dst, src, 1); 1643 } 1644 void vextractf64x4_high(Address dst, XMMRegister src) { 1645 Assembler::vextractf64x4(dst, src, 1); 1646 } 1647 void vinsertf64x4_high(XMMRegister dst, Address src) { 1648 Assembler::vinsertf64x4(dst, dst, src, 1); 1649 } 1650 1651 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1652 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1653 vinserti128(dst, dst, src, 0); 1654 } 1655 void vinserti128_low(XMMRegister dst, Address src) { 1656 vinserti128(dst, dst, src, 0); 1657 } 1658 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1659 vextracti128(dst, src, 0); 1660 } 1661 void vextracti128_low(Address dst, XMMRegister src) { 1662 vextracti128(dst, src, 0); 1663 } 1664 1665 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1666 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1667 Assembler::vinsertf32x4(dst, dst, src, 0); 1668 } else { 1669 Assembler::vinsertf128(dst, dst, src, 0); 1670 } 1671 } 1672 1673 void vinsertf128_low(XMMRegister dst, Address src) { 1674 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1675 Assembler::vinsertf32x4(dst, dst, src, 0); 1676 } else { 1677 Assembler::vinsertf128(dst, dst, src, 0); 1678 } 1679 } 1680 1681 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1682 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1683 Assembler::vextractf32x4(dst, src, 0); 1684 } else { 1685 Assembler::vextractf128(dst, src, 0); 1686 } 1687 } 1688 1689 void vextractf128_low(Address dst, XMMRegister src) { 1690 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1691 Assembler::vextractf32x4(dst, src, 0); 1692 } else { 1693 Assembler::vextractf128(dst, src, 0); 1694 } 1695 } 1696 1697 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1698 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1699 Assembler::vinserti64x4(dst, dst, src, 0); 1700 } 1701 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1702 Assembler::vinsertf64x4(dst, dst, src, 0); 1703 } 1704 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1705 Assembler::vextracti64x4(dst, src, 0); 1706 } 1707 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1708 Assembler::vextractf64x4(dst, src, 0); 1709 } 1710 void vextractf64x4_low(Address dst, XMMRegister src) { 1711 Assembler::vextractf64x4(dst, src, 0); 1712 } 1713 void vinsertf64x4_low(XMMRegister dst, Address src) { 1714 Assembler::vinsertf64x4(dst, dst, src, 0); 1715 } 1716 1717 // Carry-Less Multiplication Quadword 1718 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1719 // 0x00 - multiply lower 64 bits [0:63] 1720 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1721 } 1722 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1723 // 0x11 - multiply upper 64 bits [64:127] 1724 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1725 } 1726 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1727 // 0x10 - multiply nds[0:63] and src[64:127] 1728 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1729 } 1730 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1731 //0x01 - multiply nds[64:127] and src[0:63] 1732 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1733 } 1734 1735 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1736 // 0x00 - multiply lower 64 bits [0:63] 1737 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1738 } 1739 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1740 // 0x11 - multiply upper 64 bits [64:127] 1741 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1742 } 1743 1744 // AVX-512 mask operations. 1745 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1746 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1747 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1748 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1749 void kortest(uint masklen, KRegister src1, KRegister src2); 1750 void ktest(uint masklen, KRegister src1, KRegister src2); 1751 1752 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1753 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1754 1755 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1756 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1757 1758 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1759 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1760 1761 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1762 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1763 1764 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1765 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1766 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1767 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1768 1769 void alltrue(Register dst, uint masklen, KRegister src1, KRegister src2, KRegister kscratch); 1770 void anytrue(Register dst, uint masklen, KRegister src, KRegister kscratch); 1771 1772 void cmov32( Condition cc, Register dst, Address src); 1773 void cmov32( Condition cc, Register dst, Register src); 1774 1775 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1776 1777 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1778 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1779 1780 void movoop(Register dst, jobject obj); 1781 void movoop(Address dst, jobject obj); 1782 1783 void mov_metadata(Register dst, Metadata* obj); 1784 void mov_metadata(Address dst, Metadata* obj); 1785 1786 void movptr(ArrayAddress dst, Register src); 1787 // can this do an lea? 1788 void movptr(Register dst, ArrayAddress src); 1789 1790 void movptr(Register dst, Address src); 1791 1792 #ifdef _LP64 1793 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1794 #else 1795 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1796 #endif 1797 1798 void movptr(Register dst, intptr_t src); 1799 void movptr(Register dst, Register src); 1800 void movptr(Address dst, intptr_t src); 1801 1802 void movptr(Address dst, Register src); 1803 1804 void movptr(Register dst, RegisterOrConstant src) { 1805 if (src.is_constant()) movptr(dst, src.as_constant()); 1806 else movptr(dst, src.as_register()); 1807 } 1808 1809 #ifdef _LP64 1810 // Generally the next two are only used for moving NULL 1811 // Although there are situations in initializing the mark word where 1812 // they could be used. They are dangerous. 1813 1814 // They only exist on LP64 so that int32_t and intptr_t are not the same 1815 // and we have ambiguous declarations. 1816 1817 void movptr(Address dst, int32_t imm32); 1818 void movptr(Register dst, int32_t imm32); 1819 #endif // _LP64 1820 1821 // to avoid hiding movl 1822 void mov32(AddressLiteral dst, Register src); 1823 void mov32(Register dst, AddressLiteral src); 1824 1825 // to avoid hiding movb 1826 void movbyte(ArrayAddress dst, int src); 1827 1828 // Import other mov() methods from the parent class or else 1829 // they will be hidden by the following overriding declaration. 1830 using Assembler::movdl; 1831 using Assembler::movq; 1832 void movdl(XMMRegister dst, AddressLiteral src); 1833 void movq(XMMRegister dst, AddressLiteral src); 1834 1835 // Can push value or effective address 1836 void pushptr(AddressLiteral src); 1837 1838 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1839 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1840 1841 void pushoop(jobject obj); 1842 void pushklass(Metadata* obj); 1843 1844 // sign extend as need a l to ptr sized element 1845 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1846 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1847 1848 1849 public: 1850 // C2 compiled method's prolog code. 1851 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1852 1853 // clear memory of size 'cnt' qwords, starting at 'base'; 1854 // if 'is_large' is set, do not try to produce short loop 1855 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1856 1857 // clear memory initialization sequence for constant size; 1858 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1859 1860 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1861 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1862 1863 // Fill primitive arrays 1864 void generate_fill(BasicType t, bool aligned, 1865 Register to, Register value, Register count, 1866 Register rtmp, XMMRegister xtmp); 1867 1868 void encode_iso_array(Register src, Register dst, Register len, 1869 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1870 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1871 1872 #ifdef _LP64 1873 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1874 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1875 Register y, Register y_idx, Register z, 1876 Register carry, Register product, 1877 Register idx, Register kdx); 1878 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1879 Register yz_idx, Register idx, 1880 Register carry, Register product, int offset); 1881 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1882 Register carry, Register carry2, 1883 Register idx, Register jdx, 1884 Register yz_idx1, Register yz_idx2, 1885 Register tmp, Register tmp3, Register tmp4); 1886 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1887 Register yz_idx, Register idx, Register jdx, 1888 Register carry, Register product, 1889 Register carry2); 1890 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1891 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1892 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1893 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1894 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1895 Register tmp2); 1896 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1897 Register rdxReg, Register raxReg); 1898 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1899 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1900 Register tmp3, Register tmp4); 1901 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1902 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1903 1904 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1905 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1906 Register raxReg); 1907 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1908 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1909 Register raxReg); 1910 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1911 Register result, Register tmp1, Register tmp2, 1912 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1913 #endif 1914 1915 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1916 void update_byte_crc32(Register crc, Register val, Register table); 1917 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1918 1919 1920 #ifdef _LP64 1921 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1922 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1923 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1924 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1925 void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale); 1926 #endif // _LP64 1927 1928 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1929 // Note on a naming convention: 1930 // Prefix w = register only used on a Westmere+ architecture 1931 // Prefix n = register only used on a Nehalem architecture 1932 #ifdef _LP64 1933 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1934 Register tmp1, Register tmp2, Register tmp3); 1935 #else 1936 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1937 Register tmp1, Register tmp2, Register tmp3, 1938 XMMRegister xtmp1, XMMRegister xtmp2); 1939 #endif 1940 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1941 Register in_out, 1942 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1943 XMMRegister w_xtmp2, 1944 Register tmp1, 1945 Register n_tmp2, Register n_tmp3); 1946 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1947 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1948 Register tmp1, Register tmp2, 1949 Register n_tmp3); 1950 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1951 Register in_out1, Register in_out2, Register in_out3, 1952 Register tmp1, Register tmp2, Register tmp3, 1953 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1954 Register tmp4, Register tmp5, 1955 Register n_tmp6); 1956 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1957 Register tmp1, Register tmp2, Register tmp3, 1958 Register tmp4, Register tmp5, Register tmp6, 1959 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1960 bool is_pclmulqdq_supported); 1961 // Fold 128-bit data chunk 1962 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1963 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1964 #ifdef _LP64 1965 // Fold 512-bit data chunk 1966 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 1967 #endif // _LP64 1968 // Fold 8-bit data 1969 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1970 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1971 1972 // Compress char[] array to byte[]. 1973 void char_array_compress(Register src, Register dst, Register len, 1974 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1975 XMMRegister tmp4, Register tmp5, Register result, 1976 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 1977 1978 // Inflate byte[] array to char[]. 1979 void byte_array_inflate(Register src, Register dst, Register len, 1980 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 1981 1982 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 1983 Register length, Register temp, int vec_enc); 1984 1985 void fill64_masked(uint shift, Register dst, int disp, 1986 XMMRegister xmm, KRegister mask, Register length, 1987 Register temp, bool use64byteVector = false); 1988 1989 void fill32_masked(uint shift, Register dst, int disp, 1990 XMMRegister xmm, KRegister mask, Register length, 1991 Register temp); 1992 1993 void fill32(Register dst, int disp, XMMRegister xmm); 1994 1995 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 1996 1997 #ifdef _LP64 1998 void convert_f2i(Register dst, XMMRegister src); 1999 void convert_d2i(Register dst, XMMRegister src); 2000 void convert_f2l(Register dst, XMMRegister src); 2001 void convert_d2l(Register dst, XMMRegister src); 2002 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2003 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2004 2005 void cache_wb(Address line); 2006 void cache_wbsync(bool is_pre); 2007 2008 #if COMPILER2_OR_JVMCI 2009 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, 2010 Register to, Register count, int shift, 2011 Register index, Register temp, 2012 bool use64byteVector, Label& L_entry, Label& L_exit); 2013 2014 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from, 2015 Register to, Register start_index, Register end_index, 2016 Register count, int shift, Register temp, 2017 bool use64byteVector, Label& L_entry, Label& L_exit); 2018 2019 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm, 2020 KRegister mask, Register length, Register index, 2021 Register temp, int shift = Address::times_1, int offset = 0, 2022 bool use64byteVector = false); 2023 2024 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm, 2025 KRegister mask, Register length, Register index, 2026 Register temp, int shift = Address::times_1, int offset = 0); 2027 2028 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm, 2029 int shift = Address::times_1, int offset = 0); 2030 2031 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm, 2032 bool conjoint, int shift = Address::times_1, int offset = 0, 2033 bool use64byteVector = false); 2034 2035 void generate_fill_avx3(BasicType type, Register to, Register value, 2036 Register count, Register rtmp, XMMRegister xtmp); 2037 2038 #endif // COMPILER2_OR_JVMCI 2039 2040 #endif // _LP64 2041 2042 void vallones(XMMRegister dst, int vector_len); 2043 }; 2044 2045 /** 2046 * class SkipIfEqual: 2047 * 2048 * Instantiating this class will result in assembly code being output that will 2049 * jump around any code emitted between the creation of the instance and it's 2050 * automatic destruction at the end of a scope block, depending on the value of 2051 * the flag passed to the constructor, which will be checked at run-time. 2052 */ 2053 class SkipIfEqual { 2054 private: 2055 MacroAssembler* _masm; 2056 Label _label; 2057 2058 public: 2059 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 2060 ~SkipIfEqual(); 2061 }; 2062 2063 #endif // CPU_X86_MACROASSEMBLER_X86_HPP