1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/vm_version.hpp" 34 #include "utilities/checkedCast.hpp" 35 36 // MacroAssembler extends Assembler by frequently used macros. 37 // 38 // Instructions for which a 'better' code sequence exists depending 39 // on arguments should also go in here. 40 41 class MacroAssembler: public Assembler { 42 friend class LIR_Assembler; 43 friend class Runtime1; // as_Address() 44 45 public: 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 protected: 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // call_VM_base returns the register which contains the thread upon return. 63 // If no last_java_sp is specified (noreg) than rsp will be used instead. 64 virtual void call_VM_base( // returns the register containing the thread upon return 65 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 66 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 67 address entry_point, // the entry point 68 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 69 bool check_exceptions // whether to check for pending exceptions after return 70 ); 71 72 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 73 74 public: 75 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 76 77 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 78 // The implementation is only non-empty for the InterpreterMacroAssembler, 79 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 80 virtual void check_and_handle_popframe(); 81 virtual void check_and_handle_earlyret(); 82 83 Address as_Address(AddressLiteral adr); 84 Address as_Address(ArrayAddress adr, Register rscratch); 85 86 // Support for null-checks 87 // 88 // Generates code that causes a null OS exception if the content of reg is null. 89 // If the accessed location is M[reg + offset] and the offset is known, provide the 90 // offset. No explicit code generation is needed if the offset is within a certain 91 // range (0 <= offset <= page_size). 92 93 void null_check(Register reg, int offset = -1); 94 static bool needs_explicit_null_check(intptr_t offset); 95 static bool uses_implicit_null_check(void* address); 96 97 // Required platform-specific helpers for Label::patch_instructions. 98 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 99 void pd_patch_instruction(address branch, address target, const char* file, int line) { 100 unsigned char op = branch[0]; 101 assert(op == 0xE8 /* call */ || 102 op == 0xE9 /* jmp */ || 103 op == 0xEB /* short jmp */ || 104 (op & 0xF0) == 0x70 /* short jcc */ || 105 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 106 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ || 107 (op == 0x8D) /* lea */, 108 "Invalid opcode at patch point"); 109 110 if (op == 0xEB || (op & 0xF0) == 0x70) { 111 // short offset operators (jmp and jcc) 112 char* disp = (char*) &branch[1]; 113 int imm8 = checked_cast<int>(target - (address) &disp[1]); 114 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 115 file == nullptr ? "<null>" : file, line); 116 *disp = (char)imm8; 117 } else { 118 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1]; 119 int imm32 = checked_cast<int>(target - (address) &disp[1]); 120 *disp = imm32; 121 } 122 } 123 124 // The following 4 methods return the offset of the appropriate move instruction 125 126 // Support for fast byte/short loading with zero extension (depending on particular CPU) 127 int load_unsigned_byte(Register dst, Address src); 128 int load_unsigned_short(Register dst, Address src); 129 130 // Support for fast byte/short loading with sign extension (depending on particular CPU) 131 int load_signed_byte(Register dst, Address src); 132 int load_signed_short(Register dst, Address src); 133 134 // Support for sign-extension (hi:lo = extend_sign(lo)) 135 void extend_sign(Register hi, Register lo); 136 137 // Load and store values by size and signed-ness 138 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 139 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 140 141 // Support for inc/dec with optimal instruction selection depending on value 142 143 void increment(Register reg, int value = 1) { incrementq(reg, value); } 144 void decrement(Register reg, int value = 1) { decrementq(reg, value); } 145 void increment(Address dst, int value = 1) { incrementq(dst, value); } 146 void decrement(Address dst, int value = 1) { decrementq(dst, value); } 147 148 void decrementl(Address dst, int value = 1); 149 void decrementl(Register reg, int value = 1); 150 151 void decrementq(Register reg, int value = 1); 152 void decrementq(Address dst, int value = 1); 153 154 void incrementl(Address dst, int value = 1); 155 void incrementl(Register reg, int value = 1); 156 157 void incrementq(Register reg, int value = 1); 158 void incrementq(Address dst, int value = 1); 159 160 void incrementl(AddressLiteral dst, Register rscratch = noreg); 161 void incrementl(ArrayAddress dst, Register rscratch); 162 163 void incrementq(AddressLiteral dst, Register rscratch = noreg); 164 165 // Support optimal SSE move instructions. 166 void movflt(XMMRegister dst, XMMRegister src) { 167 if (dst-> encoding() == src->encoding()) return; 168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 169 else { movss (dst, src); return; } 170 } 171 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 172 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 173 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 174 175 // Move with zero extension 176 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 177 178 void movdbl(XMMRegister dst, XMMRegister src) { 179 if (dst-> encoding() == src->encoding()) return; 180 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 181 else { movsd (dst, src); return; } 182 } 183 184 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 185 186 void movdbl(XMMRegister dst, Address src) { 187 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 188 else { movlpd(dst, src); return; } 189 } 190 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 191 192 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 193 // Use separate tmp XMM register because caller may 194 // requires src XMM register to be unchanged (as in x86.ad). 195 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 196 movdl(dst, tmp); 197 movswl(dst, dst); 198 } 199 200 void flt16_to_flt(XMMRegister dst, Register src) { 201 movdl(dst, src); 202 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 203 } 204 205 // Alignment 206 void align32(); 207 void align64(); 208 void align(uint modulus); 209 void align(uint modulus, uint target); 210 211 void post_call_nop(); 212 // A 5 byte nop that is safe for patching (see patch_verified_entry) 213 void fat_nop(); 214 215 // Stack frame creation/removal 216 void enter(); 217 void leave(); 218 219 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information). 220 // The pointer will be loaded into the thread register. This is a slow version that does native call. 221 // Normally, JavaThread pointer is available in r15_thread, use that where possible. 222 void get_thread_slow(Register thread); 223 224 // Support for argument shuffling 225 226 // bias in bytes 227 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 228 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 229 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 230 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 231 void move_ptr(VMRegPair src, VMRegPair dst); 232 void object_move(OopMap* map, 233 int oop_handle_offset, 234 int framesize_in_slots, 235 VMRegPair src, 236 VMRegPair dst, 237 bool is_receiver, 238 int* receiver_offset); 239 240 // Support for VM calls 241 // 242 // It is imperative that all calls into the VM are handled via the call_VM macros. 243 // They make sure that the stack linkage is setup correctly. call_VM's correspond 244 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 245 246 247 void call_VM(Register oop_result, 248 address entry_point, 249 bool check_exceptions = true); 250 void call_VM(Register oop_result, 251 address entry_point, 252 Register arg_1, 253 bool check_exceptions = true); 254 void call_VM(Register oop_result, 255 address entry_point, 256 Register arg_1, Register arg_2, 257 bool check_exceptions = true); 258 void call_VM(Register oop_result, 259 address entry_point, 260 Register arg_1, Register arg_2, Register arg_3, 261 bool check_exceptions = true); 262 263 // Overloadings with last_Java_sp 264 void call_VM(Register oop_result, 265 Register last_java_sp, 266 address entry_point, 267 int number_of_arguments = 0, 268 bool check_exceptions = true); 269 void call_VM(Register oop_result, 270 Register last_java_sp, 271 address entry_point, 272 Register arg_1, bool 273 check_exceptions = true); 274 void call_VM(Register oop_result, 275 Register last_java_sp, 276 address entry_point, 277 Register arg_1, Register arg_2, 278 bool check_exceptions = true); 279 void call_VM(Register oop_result, 280 Register last_java_sp, 281 address entry_point, 282 Register arg_1, Register arg_2, Register arg_3, 283 bool check_exceptions = true); 284 285 void get_vm_result_oop(Register oop_result); 286 void get_vm_result_metadata(Register metadata_result); 287 288 // These always tightly bind to MacroAssembler::call_VM_base 289 // bypassing the virtual implementation 290 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 291 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 292 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 293 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 294 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 295 296 void call_VM_leaf0(address entry_point); 297 void call_VM_leaf(address entry_point, 298 int number_of_arguments = 0); 299 void call_VM_leaf(address entry_point, 300 Register arg_1); 301 void call_VM_leaf(address entry_point, 302 Register arg_1, Register arg_2); 303 void call_VM_leaf(address entry_point, 304 Register arg_1, Register arg_2, Register arg_3); 305 306 void call_VM_leaf(address entry_point, 307 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 308 309 // These always tightly bind to MacroAssembler::call_VM_leaf_base 310 // bypassing the virtual implementation 311 void super_call_VM_leaf(address entry_point); 312 void super_call_VM_leaf(address entry_point, Register arg_1); 313 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 314 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 315 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 316 317 void set_last_Java_frame(Register last_java_sp, 318 Register last_java_fp, 319 address last_java_pc, 320 Register rscratch); 321 322 void set_last_Java_frame(Register last_java_sp, 323 Register last_java_fp, 324 Label &last_java_pc, 325 Register scratch); 326 327 void reset_last_Java_frame(bool clear_fp); 328 329 // jobjects 330 void clear_jobject_tag(Register possibly_non_local); 331 void resolve_jobject(Register value, Register tmp); 332 void resolve_global_jobject(Register value, Register tmp); 333 334 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 335 void c2bool(Register x); 336 337 // C++ bool manipulation 338 339 void movbool(Register dst, Address src); 340 void movbool(Address dst, bool boolconst); 341 void movbool(Address dst, Register src); 342 void testbool(Register dst); 343 344 void resolve_oop_handle(Register result, Register tmp); 345 void resolve_weak_handle(Register result, Register tmp); 346 void load_mirror(Register mirror, Register method, Register tmp); 347 void load_method_holder_cld(Register rresult, Register rmethod); 348 349 void load_method_holder(Register holder, Register method); 350 351 // oop manipulations 352 void load_narrow_klass_compact(Register dst, Register src); 353 void load_klass(Register dst, Register src, Register tmp); 354 void store_klass(Register dst, Register src, Register tmp); 355 356 // Compares the Klass pointer of an object to a given Klass (which might be narrow, 357 // depending on UseCompressedClassPointers). 358 void cmp_klass(Register klass, Register obj, Register tmp); 359 360 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. 361 // Uses tmp1 and tmp2 as temporary registers. 362 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); 363 364 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 365 Register tmp1); 366 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 367 Register tmp1, Register tmp2, Register tmp3); 368 369 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0); 370 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0); 371 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 372 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 373 374 // Used for storing null. All other oop constants should be 375 // stored using routines that take a jobject. 376 void store_heap_oop_null(Address dst); 377 378 void store_klass_gap(Register dst, Register src); 379 380 // This dummy is to prevent a call to store_heap_oop from 381 // converting a zero (like null) into a Register by giving 382 // the compiler two choices it can't resolve 383 384 void store_heap_oop(Address dst, void* dummy); 385 386 void encode_heap_oop(Register r); 387 void decode_heap_oop(Register r); 388 void encode_heap_oop_not_null(Register r); 389 void decode_heap_oop_not_null(Register r); 390 void encode_heap_oop_not_null(Register dst, Register src); 391 void decode_heap_oop_not_null(Register dst, Register src); 392 393 void set_narrow_oop(Register dst, jobject obj); 394 void set_narrow_oop(Address dst, jobject obj); 395 void cmp_narrow_oop(Register dst, jobject obj); 396 void cmp_narrow_oop(Address dst, jobject obj); 397 398 void encode_klass_not_null(Register r, Register tmp); 399 void decode_klass_not_null(Register r, Register tmp); 400 void encode_and_move_klass_not_null(Register dst, Register src); 401 void decode_and_move_klass_not_null(Register dst, Register src); 402 void set_narrow_klass(Register dst, Klass* k); 403 void set_narrow_klass(Address dst, Klass* k); 404 void cmp_narrow_klass(Register dst, Klass* k); 405 void cmp_narrow_klass(Address dst, Klass* k); 406 407 // if heap base register is used - reinit it with the correct value 408 void reinit_heapbase(); 409 410 DEBUG_ONLY(void verify_heapbase(const char* msg);) 411 412 // Int division/remainder for Java 413 // (as idivl, but checks for special case as described in JVM spec.) 414 // returns idivl instruction offset for implicit exception handling 415 int corrected_idivl(Register reg); 416 417 // Long division/remainder for Java 418 // (as idivq, but checks for special case as described in JVM spec.) 419 // returns idivq instruction offset for implicit exception handling 420 int corrected_idivq(Register reg); 421 422 void int3(); 423 424 // Long operation macros for a 32bit cpu 425 // Long negation for Java 426 void lneg(Register hi, Register lo); 427 428 // Long multiplication for Java 429 // (destroys contents of eax, ebx, ecx and edx) 430 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 431 432 // Long shifts for Java 433 // (semantics as described in JVM spec.) 434 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 435 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 436 437 // Long compare for Java 438 // (semantics as described in JVM spec.) 439 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 440 441 442 // misc 443 444 // Sign extension 445 void sign_extend_short(Register reg); 446 void sign_extend_byte(Register reg); 447 448 // Division by power of 2, rounding towards 0 449 void division_with_shift(Register reg, int shift_value); 450 451 // dst = c = a * b + c 452 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 453 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 454 455 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 456 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 457 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 458 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 459 460 461 // same as fcmp2int, but using SSE2 462 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 463 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 464 465 void push_IU_state(); 466 void pop_IU_state(); 467 468 void push_FPU_state(); 469 void pop_FPU_state(); 470 471 void push_CPU_state(); 472 void pop_CPU_state(); 473 474 void push_cont_fastpath(); 475 void pop_cont_fastpath(); 476 477 void inc_held_monitor_count(); 478 void dec_held_monitor_count(); 479 480 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 481 482 // Round up to a power of two 483 void round_to(Register reg, int modulus); 484 485 private: 486 // General purpose and XMM registers potentially clobbered by native code; there 487 // is no need for FPU or AVX opmask related methods because C1/interpreter 488 // - we save/restore FPU state as a whole always 489 // - do not care about AVX-512 opmask 490 static RegSet call_clobbered_gp_registers(); 491 static XMMRegSet call_clobbered_xmm_registers(); 492 493 void push_set(XMMRegSet set, int offset); 494 void pop_set(XMMRegSet set, int offset); 495 496 public: 497 void push_set(RegSet set, int offset = -1); 498 void pop_set(RegSet set, int offset = -1); 499 500 // Push and pop everything that might be clobbered by a native 501 // runtime call. 502 // Only save the lower 64 bits of each vector register. 503 // Additional registers can be excluded in a passed RegSet. 504 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 505 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 506 507 void push_call_clobbered_registers(bool save_fpu = true) { 508 push_call_clobbered_registers_except(RegSet(), save_fpu); 509 } 510 void pop_call_clobbered_registers(bool restore_fpu = true) { 511 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 512 } 513 514 // allocation 515 void tlab_allocate( 516 Register obj, // result: pointer to object after successful allocation 517 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 518 int con_size_in_bytes, // object size in bytes if known at compile time 519 Register t1, // temp register 520 Register t2, // temp register 521 Label& slow_case // continuation point if fast allocation fails 522 ); 523 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 524 525 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 526 527 // interface method calling 528 void lookup_interface_method(Register recv_klass, 529 Register intf_klass, 530 RegisterOrConstant itable_index, 531 Register method_result, 532 Register scan_temp, 533 Label& no_such_interface, 534 bool return_method = true); 535 536 void lookup_interface_method_stub(Register recv_klass, 537 Register holder_klass, 538 Register resolved_klass, 539 Register method_result, 540 Register scan_temp, 541 Register temp_reg2, 542 Register receiver, 543 int itable_index, 544 Label& L_no_such_interface); 545 546 // virtual method calling 547 void lookup_virtual_method(Register recv_klass, 548 RegisterOrConstant vtable_index, 549 Register method_result); 550 551 // Test sub_klass against super_klass, with fast and slow paths. 552 553 // The fast path produces a tri-state answer: yes / no / maybe-slow. 554 // One of the three labels can be null, meaning take the fall-through. 555 // If super_check_offset is -1, the value is loaded up from super_klass. 556 // No registers are killed, except temp_reg. 557 void check_klass_subtype_fast_path(Register sub_klass, 558 Register super_klass, 559 Register temp_reg, 560 Label* L_success, 561 Label* L_failure, 562 Label* L_slow_path, 563 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 564 565 // The rest of the type check; must be wired to a corresponding fast path. 566 // It does not repeat the fast path logic, so don't use it standalone. 567 // The temp_reg and temp2_reg can be noreg, if no temps are available. 568 // Updates the sub's secondary super cache as necessary. 569 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 570 void check_klass_subtype_slow_path(Register sub_klass, 571 Register super_klass, 572 Register temp_reg, 573 Register temp2_reg, 574 Label* L_success, 575 Label* L_failure, 576 bool set_cond_codes = false); 577 578 // The 64-bit version, which may do a hashed subclass lookup. 579 void check_klass_subtype_slow_path(Register sub_klass, 580 Register super_klass, 581 Register temp_reg, 582 Register temp2_reg, 583 Register temp3_reg, 584 Register temp4_reg, 585 Label* L_success, 586 Label* L_failure); 587 588 // Three parts of a hashed subclass lookup: a simple linear search, 589 // a table lookup, and a fallback that does linear probing in the 590 // event of a hash collision. 591 void check_klass_subtype_slow_path_linear(Register sub_klass, 592 Register super_klass, 593 Register temp_reg, 594 Register temp2_reg, 595 Label* L_success, 596 Label* L_failure, 597 bool set_cond_codes = false); 598 void check_klass_subtype_slow_path_table(Register sub_klass, 599 Register super_klass, 600 Register temp_reg, 601 Register temp2_reg, 602 Register temp3_reg, 603 Register result_reg, 604 Label* L_success, 605 Label* L_failure); 606 void hashed_check_klass_subtype_slow_path(Register sub_klass, 607 Register super_klass, 608 Register temp_reg, 609 Label* L_success, 610 Label* L_failure); 611 612 // As above, but with a constant super_klass. 613 // The result is in Register result, not the condition codes. 614 void lookup_secondary_supers_table_const(Register sub_klass, 615 Register super_klass, 616 Register temp1, 617 Register temp2, 618 Register temp3, 619 Register temp4, 620 Register result, 621 u1 super_klass_slot); 622 623 using Assembler::salq; 624 void salq(Register dest, Register count); 625 using Assembler::rorq; 626 void rorq(Register dest, Register count); 627 void lookup_secondary_supers_table_var(Register sub_klass, 628 Register super_klass, 629 Register temp1, 630 Register temp2, 631 Register temp3, 632 Register temp4, 633 Register result); 634 635 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 636 Register r_array_base, 637 Register r_array_index, 638 Register r_bitmap, 639 Register temp1, 640 Register temp2, 641 Label* L_success, 642 Label* L_failure = nullptr); 643 644 void verify_secondary_supers_table(Register r_sub_klass, 645 Register r_super_klass, 646 Register expected, 647 Register temp1, 648 Register temp2, 649 Register temp3); 650 651 void repne_scanq(Register addr, Register value, Register count, Register limit, 652 Label* L_success, 653 Label* L_failure = nullptr); 654 655 // If r is valid, return r. 656 // If r is invalid, remove a register r2 from available_regs, add r2 657 // to regs_to_push, then return r2. 658 Register allocate_if_noreg(const Register r, 659 RegSetIterator<Register> &available_regs, 660 RegSet ®s_to_push); 661 662 // Simplified, combined version, good for typical uses. 663 // Falls through on failure. 664 void check_klass_subtype(Register sub_klass, 665 Register super_klass, 666 Register temp_reg, 667 Label& L_success); 668 669 void clinit_barrier(Register klass, 670 Label* L_fast_path = nullptr, 671 Label* L_slow_path = nullptr); 672 673 // method handles (JSR 292) 674 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 675 676 // Debugging 677 678 // only if +VerifyOops 679 void _verify_oop(Register reg, const char* s, const char* file, int line); 680 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 681 682 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 683 if (VerifyOops) { 684 _verify_oop(reg, s, file, line); 685 } 686 } 687 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 688 if (VerifyOops) { 689 _verify_oop_addr(reg, s, file, line); 690 } 691 } 692 693 // TODO: verify method and klass metadata (compare against vptr?) 694 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 695 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 696 697 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 698 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 699 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 700 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 701 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 702 703 // Verify or restore cpu control state after JNI call 704 void restore_cpu_control_state_after_jni(Register rscratch); 705 706 // prints msg, dumps registers and stops execution 707 void stop(const char* msg); 708 709 // prints msg and continues 710 void warn(const char* msg); 711 712 // dumps registers and other state 713 void print_state(); 714 715 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 716 static void debug64(char* msg, int64_t pc, int64_t regs[]); 717 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 718 static void print_state64(int64_t pc, int64_t regs[]); 719 720 void os_breakpoint(); 721 722 void untested() { stop("untested"); } 723 724 void unimplemented(const char* what = ""); 725 726 void should_not_reach_here() { stop("should not reach here"); } 727 728 void print_CPU_state(); 729 730 // Stack overflow checking 731 void bang_stack_with_offset(int offset) { 732 // stack grows down, caller passes positive offset 733 assert(offset > 0, "must bang with negative offset"); 734 movl(Address(rsp, (-offset)), rax); 735 } 736 737 // Writes to stack successive pages until offset reached to check for 738 // stack overflow + shadow pages. Also, clobbers tmp 739 void bang_stack_size(Register size, Register tmp); 740 741 // Check for reserved stack access in method being exited (for JIT) 742 void reserved_stack_check(); 743 744 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod); 745 746 void verify_tlab(); 747 748 static Condition negate_condition(Condition cond); 749 750 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 751 // operands. In general the names are modified to avoid hiding the instruction in Assembler 752 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 753 // here in MacroAssembler. The major exception to this rule is call 754 755 // Arithmetics 756 757 758 void addptr(Address dst, int32_t src) { addq(dst, src); } 759 void addptr(Address dst, Register src); 760 761 void addptr(Register dst, Address src) { addq(dst, src); } 762 void addptr(Register dst, int32_t src); 763 void addptr(Register dst, Register src); 764 void addptr(Register dst, RegisterOrConstant src) { 765 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 766 else addptr(dst, src.as_register()); 767 } 768 769 void andptr(Register dst, int32_t src); 770 void andptr(Register src1, Register src2) { andq(src1, src2); } 771 772 using Assembler::andq; 773 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 774 775 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 776 777 // renamed to drag out the casting of address to int32_t/intptr_t 778 void cmp32(Register src1, int32_t imm); 779 780 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 781 // compare reg - mem, or reg - &mem 782 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 783 784 void cmp32(Register src1, Address src2); 785 786 void cmpoop(Register src1, Register src2); 787 void cmpoop(Register src1, Address src2); 788 void cmpoop(Register dst, jobject obj, Register rscratch); 789 790 // NOTE src2 must be the lval. This is NOT an mem-mem compare 791 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 792 793 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 794 795 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); } 796 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); } 797 798 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); } 799 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); } 800 801 // cmp64 to avoild hiding cmpq 802 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 803 804 void cmpxchgptr(Register reg, Address adr); 805 806 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 807 808 void imulptr(Register dst, Register src) { imulq(dst, src); } 809 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); } 810 811 812 void negptr(Register dst) { negq(dst); } 813 814 void notptr(Register dst) { notq(dst); } 815 816 void shlptr(Register dst, int32_t shift); 817 void shlptr(Register dst) { shlq(dst); } 818 819 void shrptr(Register dst, int32_t shift); 820 void shrptr(Register dst) { shrq(dst); } 821 822 void sarptr(Register dst) { sarq(dst); } 823 void sarptr(Register dst, int32_t src) { sarq(dst, src); } 824 825 void subptr(Address dst, int32_t src) { subq(dst, src); } 826 827 void subptr(Register dst, Address src) { subq(dst, src); } 828 void subptr(Register dst, int32_t src); 829 // Force generation of a 4 byte immediate value even if it fits into 8bit 830 void subptr_imm32(Register dst, int32_t src); 831 void subptr(Register dst, Register src); 832 void subptr(Register dst, RegisterOrConstant src) { 833 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 834 else subptr(dst, src.as_register()); 835 } 836 837 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); } 838 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); } 839 840 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); } 841 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); } 842 843 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); } 844 845 846 847 // Helper functions for statistics gathering. 848 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 849 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 850 // Unconditional atomic increment. 851 void atomic_incl(Address counter_addr); 852 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 853 void atomic_incq(Address counter_addr); 854 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 855 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); } 856 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); } 857 858 using Assembler::lea; 859 void lea(Register dst, AddressLiteral adr); 860 void lea(Address dst, AddressLiteral adr, Register rscratch); 861 862 void leal32(Register dst, Address src) { leal(dst, src); } 863 864 // Import other testl() methods from the parent class or else 865 // they will be hidden by the following overriding declaration. 866 using Assembler::testl; 867 void testl(Address dst, int32_t imm32); 868 void testl(Register dst, int32_t imm32); 869 void testl(Register dst, AddressLiteral src); // requires reachable address 870 using Assembler::testq; 871 void testq(Address dst, int32_t imm32); 872 void testq(Register dst, int32_t imm32); 873 874 void orptr(Register dst, Address src) { orq(dst, src); } 875 void orptr(Register dst, Register src) { orq(dst, src); } 876 void orptr(Register dst, int32_t src) { orq(dst, src); } 877 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); } 878 879 void testptr(Register src, int32_t imm32) { testq(src, imm32); } 880 void testptr(Register src1, Address src2) { testq(src1, src2); } 881 void testptr(Address src, int32_t imm32) { testq(src, imm32); } 882 void testptr(Register src1, Register src2); 883 884 void xorptr(Register dst, Register src) { xorq(dst, src); } 885 void xorptr(Register dst, Address src) { xorq(dst, src); } 886 887 // Calls 888 889 void call(Label& L, relocInfo::relocType rtype); 890 void call(Register entry); 891 void call(Address addr) { Assembler::call(addr); } 892 893 // NOTE: this call transfers to the effective address of entry NOT 894 // the address contained by entry. This is because this is more natural 895 // for jumps/calls. 896 void call(AddressLiteral entry, Register rscratch = rax); 897 898 // Emit the CompiledIC call idiom 899 void ic_call(address entry, jint method_index = 0); 900 static int ic_check_size(); 901 int ic_check(int end_alignment); 902 903 void emit_static_call_stub(); 904 905 // Jumps 906 907 // NOTE: these jumps transfer to the effective address of dst NOT 908 // the address contained by dst. This is because this is more natural 909 // for jumps/calls. 910 void jump(AddressLiteral dst, Register rscratch = noreg); 911 912 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 913 914 // 32bit can do a case table jump in one instruction but we no longer allow the base 915 // to be installed in the Address class. This jump will transfer to the address 916 // contained in the location described by entry (not the address of entry) 917 void jump(ArrayAddress entry, Register rscratch); 918 919 // Adding more natural conditional jump instructions 920 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 921 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 922 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 923 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 924 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 925 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 926 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 927 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 928 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 929 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 930 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 931 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 932 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 933 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 934 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 935 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 936 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 937 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 938 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 939 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 940 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 941 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 942 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 943 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 944 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 945 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 946 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 947 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 948 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 949 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 950 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 951 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 952 953 // Short versions of the above 954 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 955 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 956 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 957 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 958 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 959 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 960 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 961 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 962 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 963 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 964 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 965 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 966 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 967 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 968 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 969 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 970 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 971 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 972 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 973 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 974 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 975 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 976 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 977 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 978 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 979 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 980 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 981 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 982 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 983 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 984 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 985 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 986 987 // Floating 988 989 void push_f(XMMRegister r); 990 void pop_f(XMMRegister r); 991 void push_d(XMMRegister r); 992 void pop_d(XMMRegister r); 993 994 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 995 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 996 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 997 998 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); } 999 1000 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1001 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1002 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1003 1004 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1005 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1006 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1007 1008 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1009 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1010 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1011 1012 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); } 1013 1014 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg); 1015 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1016 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1017 1018 private: 1019 void sha256_AVX2_one_round_compute( 1020 Register reg_old_h, 1021 Register reg_a, 1022 Register reg_b, 1023 Register reg_c, 1024 Register reg_d, 1025 Register reg_e, 1026 Register reg_f, 1027 Register reg_g, 1028 Register reg_h, 1029 int iter); 1030 void sha256_AVX2_four_rounds_compute_first(int start); 1031 void sha256_AVX2_four_rounds_compute_last(int start); 1032 void sha256_AVX2_one_round_and_sched( 1033 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1034 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1035 XMMRegister xmm_2, /* ymm6 */ 1036 XMMRegister xmm_3, /* ymm7 */ 1037 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1038 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1039 Register reg_c, /* edi */ 1040 Register reg_d, /* esi */ 1041 Register reg_e, /* r8d */ 1042 Register reg_f, /* r9d */ 1043 Register reg_g, /* r10d */ 1044 Register reg_h, /* r11d */ 1045 int iter); 1046 1047 void addm(int disp, Register r1, Register r2); 1048 1049 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1050 Register e, Register f, Register g, Register h, int iteration); 1051 1052 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1053 Register a, Register b, Register c, Register d, Register e, Register f, 1054 Register g, Register h, int iteration); 1055 1056 void addmq(int disp, Register r1, Register r2); 1057 public: 1058 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1059 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1060 Register buf, Register state, Register ofs, Register limit, Register rsp, 1061 bool multi_block, XMMRegister shuf_mask); 1062 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1063 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1064 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1065 XMMRegister shuf_mask); 1066 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block); 1067 1068 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1069 bool multi_block); 1070 1071 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1072 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1073 Register buf, Register state, Register ofs, Register limit, Register rsp, 1074 bool multi_block); 1075 1076 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1077 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1078 Register buf, Register state, Register ofs, Register limit, Register rsp, 1079 bool multi_block, XMMRegister shuf_mask); 1080 1081 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1082 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1083 Register rax, Register rcx, Register rdx, Register tmp); 1084 1085 private: 1086 1087 // these are private because users should be doing movflt/movdbl 1088 1089 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1090 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1091 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1092 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1093 1094 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1095 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1096 1097 public: 1098 1099 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1100 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1101 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1102 1103 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1104 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1105 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1106 1107 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1108 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1109 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1110 1111 using Assembler::vbroadcasti128; 1112 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1113 1114 using Assembler::vbroadcastsd; 1115 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1116 1117 using Assembler::vbroadcastss; 1118 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1119 1120 // Vector float blend 1121 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1122 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1123 1124 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1125 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1126 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1127 1128 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1129 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1130 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1131 1132 // Move Unaligned Double Quadword 1133 void movdqu(Address dst, XMMRegister src); 1134 void movdqu(XMMRegister dst, XMMRegister src); 1135 void movdqu(XMMRegister dst, Address src); 1136 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1137 1138 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1139 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1140 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1141 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1142 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1143 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1144 1145 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1146 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1147 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1148 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1149 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1150 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1151 1152 // Safe move operation, lowers down to 16bit moves for targets supporting 1153 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1154 void kmov(Address dst, KRegister src); 1155 void kmov(KRegister dst, Address src); 1156 void kmov(KRegister dst, KRegister src); 1157 void kmov(Register dst, KRegister src); 1158 void kmov(KRegister dst, Register src); 1159 1160 using Assembler::movddup; 1161 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1162 1163 using Assembler::vmovddup; 1164 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1165 1166 // AVX Unaligned forms 1167 void vmovdqu(Address dst, XMMRegister src); 1168 void vmovdqu(XMMRegister dst, Address src); 1169 void vmovdqu(XMMRegister dst, XMMRegister src); 1170 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1171 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1172 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len); 1173 void vmovdqu(XMMRegister dst, Address src, int vector_len); 1174 void vmovdqu(Address dst, XMMRegister src, int vector_len); 1175 1176 // AVX Aligned forms 1177 using Assembler::vmovdqa; 1178 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1179 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1180 1181 // AVX512 Unaligned 1182 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1183 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1184 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1185 1186 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1187 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1188 1189 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1190 if (dst->encoding() != src->encoding() || mask != k0) { 1191 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1192 } 1193 } 1194 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1195 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1196 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1197 1198 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1199 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1200 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1201 1202 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1203 if (dst->encoding() != src->encoding() || mask != k0) { 1204 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1205 } 1206 } 1207 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1208 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1209 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1210 1211 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1212 if (dst->encoding() != src->encoding()) { 1213 Assembler::evmovdqul(dst, src, vector_len); 1214 } 1215 } 1216 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1217 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1218 1219 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1220 if (dst->encoding() != src->encoding() || mask != k0) { 1221 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1222 } 1223 } 1224 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1225 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1226 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1227 1228 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1229 if (dst->encoding() != src->encoding()) { 1230 Assembler::evmovdquq(dst, src, vector_len); 1231 } 1232 } 1233 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1234 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1235 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1236 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1237 1238 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1239 if (dst->encoding() != src->encoding() || mask != k0) { 1240 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1241 } 1242 } 1243 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1244 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1245 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1246 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1247 1248 using Assembler::movapd; 1249 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1250 1251 // Move Aligned Double Quadword 1252 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1253 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1254 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1255 1256 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1257 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1258 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1259 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1260 1261 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1262 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1263 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1264 1265 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1266 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1267 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1268 1269 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1270 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1271 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1272 1273 // Carry-Less Multiplication Quadword 1274 void pclmulldq(XMMRegister dst, XMMRegister src) { 1275 // 0x00 - multiply lower 64 bits [0:63] 1276 Assembler::pclmulqdq(dst, src, 0x00); 1277 } 1278 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1279 // 0x11 - multiply upper 64 bits [64:127] 1280 Assembler::pclmulqdq(dst, src, 0x11); 1281 } 1282 1283 void pcmpeqb(XMMRegister dst, XMMRegister src); 1284 void pcmpeqw(XMMRegister dst, XMMRegister src); 1285 1286 void pcmpestri(XMMRegister dst, Address src, int imm8); 1287 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1288 1289 void pmovzxbw(XMMRegister dst, XMMRegister src); 1290 void pmovzxbw(XMMRegister dst, Address src); 1291 1292 void pmovmskb(Register dst, XMMRegister src); 1293 1294 void ptest(XMMRegister dst, XMMRegister src); 1295 1296 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1297 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1298 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1299 1300 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1301 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1302 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1303 1304 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1305 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1306 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1307 1308 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1309 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1310 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1311 1312 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1313 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1314 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1315 1316 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1317 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1318 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1319 1320 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1321 void xorpd(XMMRegister dst, XMMRegister src); 1322 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1323 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1324 1325 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1326 void xorps(XMMRegister dst, XMMRegister src); 1327 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1328 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1329 1330 // Shuffle Bytes 1331 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1332 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1333 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1334 // AVX 3-operands instructions 1335 1336 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1337 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1338 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1339 1340 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1341 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1342 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1343 1344 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1345 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1346 1347 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1348 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1349 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1350 1351 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1352 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1353 1354 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1355 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1356 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1357 1358 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1359 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1360 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1361 1362 using Assembler::vpbroadcastd; 1363 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1364 1365 using Assembler::vpbroadcastq; 1366 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1367 1368 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1369 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1370 1371 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1372 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1373 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1374 1375 // Vector compares 1376 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1377 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1378 } 1379 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1380 1381 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1382 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1383 } 1384 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1385 1386 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1387 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1388 } 1389 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1390 1391 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1392 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1393 } 1394 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1395 1396 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1397 1398 // Emit comparison instruction for the specified comparison predicate. 1399 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1400 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1401 1402 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1403 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1404 1405 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1406 1407 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1408 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1409 1410 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1411 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1412 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1413 1414 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); } 1415 1416 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1417 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1418 1419 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1420 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1421 1422 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1423 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1424 1425 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1426 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1427 1428 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1429 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1430 1431 using Assembler::evpsllw; 1432 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1433 if (!is_varshift) { 1434 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1435 } else { 1436 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1437 } 1438 } 1439 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1440 if (!is_varshift) { 1441 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1442 } else { 1443 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1444 } 1445 } 1446 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1447 if (!is_varshift) { 1448 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1449 } else { 1450 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1451 } 1452 } 1453 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1454 if (!is_varshift) { 1455 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1456 } else { 1457 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1458 } 1459 } 1460 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1461 if (!is_varshift) { 1462 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1463 } else { 1464 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1465 } 1466 } 1467 1468 using Assembler::evpsrlq; 1469 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1470 if (!is_varshift) { 1471 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1472 } else { 1473 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1474 } 1475 } 1476 using Assembler::evpsraw; 1477 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1478 if (!is_varshift) { 1479 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1480 } else { 1481 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1482 } 1483 } 1484 using Assembler::evpsrad; 1485 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1486 if (!is_varshift) { 1487 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1488 } else { 1489 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1490 } 1491 } 1492 using Assembler::evpsraq; 1493 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1494 if (!is_varshift) { 1495 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1496 } else { 1497 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1498 } 1499 } 1500 1501 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1502 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1503 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1504 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1505 1506 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1507 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1508 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1509 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1510 1511 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1512 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1513 1514 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1515 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1516 1517 void vptest(XMMRegister dst, XMMRegister src); 1518 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1519 1520 void punpcklbw(XMMRegister dst, XMMRegister src); 1521 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1522 1523 void pshufd(XMMRegister dst, Address src, int mode); 1524 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1525 1526 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1527 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1528 1529 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1530 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1531 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1532 1533 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1534 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1535 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1536 1537 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1538 1539 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1540 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1541 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1542 1543 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1544 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1545 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1546 1547 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1548 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1549 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1550 1551 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1552 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1553 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1554 1555 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1556 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1557 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1558 1559 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1560 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1561 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1562 1563 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1564 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1565 1566 // AVX Vector instructions 1567 1568 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1569 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1570 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1571 1572 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1573 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1574 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1575 1576 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1577 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1578 Assembler::vpxor(dst, nds, src, vector_len); 1579 else 1580 Assembler::vxorpd(dst, nds, src, vector_len); 1581 } 1582 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1583 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1584 Assembler::vpxor(dst, nds, src, vector_len); 1585 else 1586 Assembler::vxorpd(dst, nds, src, vector_len); 1587 } 1588 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1589 1590 // Simple version for AVX2 256bit vectors 1591 void vpxor(XMMRegister dst, XMMRegister src) { 1592 assert(UseAVX >= 2, "Should be at least AVX2"); 1593 Assembler::vpxor(dst, dst, src, AVX_256bit); 1594 } 1595 void vpxor(XMMRegister dst, Address src) { 1596 assert(UseAVX >= 2, "Should be at least AVX2"); 1597 Assembler::vpxor(dst, dst, src, AVX_256bit); 1598 } 1599 1600 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1601 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1602 1603 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1604 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1605 Assembler::vinserti32x4(dst, nds, src, imm8); 1606 } else if (UseAVX > 1) { 1607 // vinserti128 is available only in AVX2 1608 Assembler::vinserti128(dst, nds, src, imm8); 1609 } else { 1610 Assembler::vinsertf128(dst, nds, src, imm8); 1611 } 1612 } 1613 1614 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1615 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1616 Assembler::vinserti32x4(dst, nds, src, imm8); 1617 } else if (UseAVX > 1) { 1618 // vinserti128 is available only in AVX2 1619 Assembler::vinserti128(dst, nds, src, imm8); 1620 } else { 1621 Assembler::vinsertf128(dst, nds, src, imm8); 1622 } 1623 } 1624 1625 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1626 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1627 Assembler::vextracti32x4(dst, src, imm8); 1628 } else if (UseAVX > 1) { 1629 // vextracti128 is available only in AVX2 1630 Assembler::vextracti128(dst, src, imm8); 1631 } else { 1632 Assembler::vextractf128(dst, src, imm8); 1633 } 1634 } 1635 1636 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1637 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1638 Assembler::vextracti32x4(dst, src, imm8); 1639 } else if (UseAVX > 1) { 1640 // vextracti128 is available only in AVX2 1641 Assembler::vextracti128(dst, src, imm8); 1642 } else { 1643 Assembler::vextractf128(dst, src, imm8); 1644 } 1645 } 1646 1647 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1648 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1649 vinserti128(dst, dst, src, 1); 1650 } 1651 void vinserti128_high(XMMRegister dst, Address src) { 1652 vinserti128(dst, dst, src, 1); 1653 } 1654 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1655 vextracti128(dst, src, 1); 1656 } 1657 void vextracti128_high(Address dst, XMMRegister src) { 1658 vextracti128(dst, src, 1); 1659 } 1660 1661 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1662 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1663 Assembler::vinsertf32x4(dst, dst, src, 1); 1664 } else { 1665 Assembler::vinsertf128(dst, dst, src, 1); 1666 } 1667 } 1668 1669 void vinsertf128_high(XMMRegister dst, Address src) { 1670 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1671 Assembler::vinsertf32x4(dst, dst, src, 1); 1672 } else { 1673 Assembler::vinsertf128(dst, dst, src, 1); 1674 } 1675 } 1676 1677 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1678 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1679 Assembler::vextractf32x4(dst, src, 1); 1680 } else { 1681 Assembler::vextractf128(dst, src, 1); 1682 } 1683 } 1684 1685 void vextractf128_high(Address dst, XMMRegister src) { 1686 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1687 Assembler::vextractf32x4(dst, src, 1); 1688 } else { 1689 Assembler::vextractf128(dst, src, 1); 1690 } 1691 } 1692 1693 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1694 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1695 Assembler::vinserti64x4(dst, dst, src, 1); 1696 } 1697 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1698 Assembler::vinsertf64x4(dst, dst, src, 1); 1699 } 1700 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1701 Assembler::vextracti64x4(dst, src, 1); 1702 } 1703 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1704 Assembler::vextractf64x4(dst, src, 1); 1705 } 1706 void vextractf64x4_high(Address dst, XMMRegister src) { 1707 Assembler::vextractf64x4(dst, src, 1); 1708 } 1709 void vinsertf64x4_high(XMMRegister dst, Address src) { 1710 Assembler::vinsertf64x4(dst, dst, src, 1); 1711 } 1712 1713 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1714 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1715 vinserti128(dst, dst, src, 0); 1716 } 1717 void vinserti128_low(XMMRegister dst, Address src) { 1718 vinserti128(dst, dst, src, 0); 1719 } 1720 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1721 vextracti128(dst, src, 0); 1722 } 1723 void vextracti128_low(Address dst, XMMRegister src) { 1724 vextracti128(dst, src, 0); 1725 } 1726 1727 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1728 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1729 Assembler::vinsertf32x4(dst, dst, src, 0); 1730 } else { 1731 Assembler::vinsertf128(dst, dst, src, 0); 1732 } 1733 } 1734 1735 void vinsertf128_low(XMMRegister dst, Address src) { 1736 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1737 Assembler::vinsertf32x4(dst, dst, src, 0); 1738 } else { 1739 Assembler::vinsertf128(dst, dst, src, 0); 1740 } 1741 } 1742 1743 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1744 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1745 Assembler::vextractf32x4(dst, src, 0); 1746 } else { 1747 Assembler::vextractf128(dst, src, 0); 1748 } 1749 } 1750 1751 void vextractf128_low(Address dst, XMMRegister src) { 1752 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1753 Assembler::vextractf32x4(dst, src, 0); 1754 } else { 1755 Assembler::vextractf128(dst, src, 0); 1756 } 1757 } 1758 1759 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1760 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1761 Assembler::vinserti64x4(dst, dst, src, 0); 1762 } 1763 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1764 Assembler::vinsertf64x4(dst, dst, src, 0); 1765 } 1766 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1767 Assembler::vextracti64x4(dst, src, 0); 1768 } 1769 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1770 Assembler::vextractf64x4(dst, src, 0); 1771 } 1772 void vextractf64x4_low(Address dst, XMMRegister src) { 1773 Assembler::vextractf64x4(dst, src, 0); 1774 } 1775 void vinsertf64x4_low(XMMRegister dst, Address src) { 1776 Assembler::vinsertf64x4(dst, dst, src, 0); 1777 } 1778 1779 // Carry-Less Multiplication Quadword 1780 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1781 // 0x00 - multiply lower 64 bits [0:63] 1782 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1783 } 1784 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1785 // 0x11 - multiply upper 64 bits [64:127] 1786 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1787 } 1788 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1789 // 0x10 - multiply nds[0:63] and src[64:127] 1790 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1791 } 1792 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1793 //0x01 - multiply nds[64:127] and src[0:63] 1794 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1795 } 1796 1797 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1798 // 0x00 - multiply lower 64 bits [0:63] 1799 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1800 } 1801 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1802 // 0x11 - multiply upper 64 bits [64:127] 1803 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1804 } 1805 1806 // AVX-512 mask operations. 1807 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1808 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1809 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1810 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1811 void kortest(uint masklen, KRegister src1, KRegister src2); 1812 void ktest(uint masklen, KRegister src1, KRegister src2); 1813 1814 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1815 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1816 1817 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1818 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1819 1820 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1821 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1822 1823 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1824 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1825 1826 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1827 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1828 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1829 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1830 1831 using Assembler::evpandq; 1832 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1833 1834 using Assembler::evpaddq; 1835 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1836 1837 using Assembler::evporq; 1838 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1839 1840 using Assembler::vpshufb; 1841 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1842 1843 using Assembler::vpor; 1844 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1845 1846 using Assembler::vpternlogq; 1847 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1848 1849 void cmov32( Condition cc, Register dst, Address src); 1850 void cmov32( Condition cc, Register dst, Register src); 1851 1852 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1853 1854 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); } 1855 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); } 1856 1857 void movoop(Register dst, jobject obj); 1858 void movoop(Address dst, jobject obj, Register rscratch); 1859 1860 void mov_metadata(Register dst, Metadata* obj); 1861 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1862 1863 void movptr(Register dst, Register src); 1864 void movptr(Register dst, Address src); 1865 void movptr(Register dst, AddressLiteral src); 1866 void movptr(Register dst, ArrayAddress src); 1867 void movptr(Register dst, intptr_t src); 1868 void movptr(Address dst, Register src); 1869 void movptr(Address dst, int32_t imm); 1870 void movptr(Address dst, intptr_t src, Register rscratch); 1871 void movptr(ArrayAddress dst, Register src, Register rscratch); 1872 1873 void movptr(Register dst, RegisterOrConstant src) { 1874 if (src.is_constant()) movptr(dst, src.as_constant()); 1875 else movptr(dst, src.as_register()); 1876 } 1877 1878 1879 // to avoid hiding movl 1880 void mov32(Register dst, AddressLiteral src); 1881 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 1882 1883 // Import other mov() methods from the parent class or else 1884 // they will be hidden by the following overriding declaration. 1885 using Assembler::movdl; 1886 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1887 1888 using Assembler::movq; 1889 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1890 1891 // Can push value or effective address 1892 void pushptr(AddressLiteral src, Register rscratch); 1893 1894 void pushptr(Address src) { pushq(src); } 1895 void popptr(Address src) { popq(src); } 1896 1897 void pushoop(jobject obj, Register rscratch); 1898 void pushklass(Metadata* obj, Register rscratch); 1899 1900 // sign extend as need a l to ptr sized element 1901 void movl2ptr(Register dst, Address src) { movslq(dst, src); } 1902 void movl2ptr(Register dst, Register src) { movslq(dst, src); } 1903 1904 1905 public: 1906 // clear memory of size 'cnt' qwords, starting at 'base'; 1907 // if 'is_large' is set, do not try to produce short loop 1908 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1909 1910 // clear memory initialization sequence for constant size; 1911 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1912 1913 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1914 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1915 1916 // Fill primitive arrays 1917 void generate_fill(BasicType t, bool aligned, 1918 Register to, Register value, Register count, 1919 Register rtmp, XMMRegister xtmp); 1920 1921 void encode_iso_array(Register src, Register dst, Register len, 1922 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1923 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1924 1925 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1926 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1927 Register y, Register y_idx, Register z, 1928 Register carry, Register product, 1929 Register idx, Register kdx); 1930 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1931 Register yz_idx, Register idx, 1932 Register carry, Register product, int offset); 1933 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1934 Register carry, Register carry2, 1935 Register idx, Register jdx, 1936 Register yz_idx1, Register yz_idx2, 1937 Register tmp, Register tmp3, Register tmp4); 1938 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1939 Register yz_idx, Register idx, Register jdx, 1940 Register carry, Register product, 1941 Register carry2); 1942 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 1943 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1944 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1945 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1946 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1947 Register tmp2); 1948 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1949 Register rdxReg, Register raxReg); 1950 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1951 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1952 Register tmp3, Register tmp4); 1953 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1954 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1955 1956 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1957 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1958 Register raxReg); 1959 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1960 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1961 Register raxReg); 1962 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1963 Register result, Register tmp1, Register tmp2, 1964 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1965 1966 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1967 void update_byte_crc32(Register crc, Register val, Register table); 1968 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1969 1970 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1971 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1972 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1973 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1974 1975 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1976 // Note on a naming convention: 1977 // Prefix w = register only used on a Westmere+ architecture 1978 // Prefix n = register only used on a Nehalem architecture 1979 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1980 Register tmp1, Register tmp2, Register tmp3); 1981 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1982 Register in_out, 1983 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1984 XMMRegister w_xtmp2, 1985 Register tmp1, 1986 Register n_tmp2, Register n_tmp3); 1987 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1988 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1989 Register tmp1, Register tmp2, 1990 Register n_tmp3); 1991 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1992 Register in_out1, Register in_out2, Register in_out3, 1993 Register tmp1, Register tmp2, Register tmp3, 1994 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1995 Register tmp4, Register tmp5, 1996 Register n_tmp6); 1997 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1998 Register tmp1, Register tmp2, Register tmp3, 1999 Register tmp4, Register tmp5, Register tmp6, 2000 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2001 bool is_pclmulqdq_supported); 2002 // Fold 128-bit data chunk 2003 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2004 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2005 // Fold 512-bit data chunk 2006 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2007 // Fold 8-bit data 2008 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2009 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2010 2011 // Compress char[] array to byte[]. 2012 void char_array_compress(Register src, Register dst, Register len, 2013 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2014 XMMRegister tmp4, Register tmp5, Register result, 2015 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2016 2017 // Inflate byte[] array to char[]. 2018 void byte_array_inflate(Register src, Register dst, Register len, 2019 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2020 2021 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2022 Register length, Register temp, int vec_enc); 2023 2024 void fill64_masked(uint shift, Register dst, int disp, 2025 XMMRegister xmm, KRegister mask, Register length, 2026 Register temp, bool use64byteVector = false); 2027 2028 void fill32_masked(uint shift, Register dst, int disp, 2029 XMMRegister xmm, KRegister mask, Register length, 2030 Register temp); 2031 2032 void fill32(Address dst, XMMRegister xmm); 2033 2034 void fill32(Register dst, int disp, XMMRegister xmm); 2035 2036 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2037 2038 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2039 2040 void convert_f2i(Register dst, XMMRegister src); 2041 void convert_d2i(Register dst, XMMRegister src); 2042 void convert_f2l(Register dst, XMMRegister src); 2043 void convert_d2l(Register dst, XMMRegister src); 2044 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2045 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2046 2047 void cache_wb(Address line); 2048 void cache_wbsync(bool is_pre); 2049 2050 #ifdef COMPILER2_OR_JVMCI 2051 void generate_fill_avx3(BasicType type, Register to, Register value, 2052 Register count, Register rtmp, XMMRegister xtmp); 2053 #endif // COMPILER2_OR_JVMCI 2054 2055 void vallones(XMMRegister dst, int vector_len); 2056 2057 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2058 2059 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow); 2060 void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow); 2061 2062 void save_legacy_gprs(); 2063 void restore_legacy_gprs(); 2064 void setcc(Assembler::Condition comparison, Register dst); 2065 }; 2066 2067 #endif // CPU_X86_MACROASSEMBLER_X86_HPP