1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/vm_version.hpp" 34 #include "utilities/checkedCast.hpp" 35 36 // MacroAssembler extends Assembler by frequently used macros. 37 // 38 // Instructions for which a 'better' code sequence exists depending 39 // on arguments should also go in here. 40 41 class MacroAssembler: public Assembler { 42 friend class LIR_Assembler; 43 friend class Runtime1; // as_Address() 44 45 public: 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 protected: 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // call_VM_base returns the register which contains the thread upon return. 63 // If no last_java_sp is specified (noreg) than rsp will be used instead. 64 virtual void call_VM_base( // returns the register containing the thread upon return 65 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 66 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 67 address entry_point, // the entry point 68 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 69 bool check_exceptions // whether to check for pending exceptions after return 70 ); 71 72 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 73 74 public: 75 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 76 77 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 78 // The implementation is only non-empty for the InterpreterMacroAssembler, 79 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 80 virtual void check_and_handle_popframe(); 81 virtual void check_and_handle_earlyret(); 82 83 Address as_Address(AddressLiteral adr); 84 Address as_Address(ArrayAddress adr, Register rscratch); 85 86 // Support for null-checks 87 // 88 // Generates code that causes a null OS exception if the content of reg is null. 89 // If the accessed location is M[reg + offset] and the offset is known, provide the 90 // offset. No explicit code generation is needed if the offset is within a certain 91 // range (0 <= offset <= page_size). 92 93 void null_check(Register reg, int offset = -1); 94 static bool needs_explicit_null_check(intptr_t offset); 95 static bool uses_implicit_null_check(void* address); 96 97 // Required platform-specific helpers for Label::patch_instructions. 98 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 99 void pd_patch_instruction(address branch, address target, const char* file, int line) { 100 unsigned char op = branch[0]; 101 assert(op == 0xE8 /* call */ || 102 op == 0xE9 /* jmp */ || 103 op == 0xEB /* short jmp */ || 104 (op & 0xF0) == 0x70 /* short jcc */ || 105 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 106 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ || 107 (op == 0x8D) /* lea */, 108 "Invalid opcode at patch point"); 109 110 if (op == 0xEB || (op & 0xF0) == 0x70) { 111 // short offset operators (jmp and jcc) 112 char* disp = (char*) &branch[1]; 113 int imm8 = checked_cast<int>(target - (address) &disp[1]); 114 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 115 file == nullptr ? "<null>" : file, line); 116 *disp = (char)imm8; 117 } else { 118 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1]; 119 int imm32 = checked_cast<int>(target - (address) &disp[1]); 120 *disp = imm32; 121 } 122 } 123 124 // The following 4 methods return the offset of the appropriate move instruction 125 126 // Support for fast byte/short loading with zero extension (depending on particular CPU) 127 int load_unsigned_byte(Register dst, Address src); 128 int load_unsigned_short(Register dst, Address src); 129 130 // Support for fast byte/short loading with sign extension (depending on particular CPU) 131 int load_signed_byte(Register dst, Address src); 132 int load_signed_short(Register dst, Address src); 133 134 // Support for sign-extension (hi:lo = extend_sign(lo)) 135 void extend_sign(Register hi, Register lo); 136 137 // Load and store values by size and signed-ness 138 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 139 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 140 141 // Support for inc/dec with optimal instruction selection depending on value 142 143 void increment(Register reg, int value = 1) { incrementq(reg, value); } 144 void decrement(Register reg, int value = 1) { decrementq(reg, value); } 145 void increment(Address dst, int value = 1) { incrementq(dst, value); } 146 void decrement(Address dst, int value = 1) { decrementq(dst, value); } 147 148 void decrementl(Address dst, int value = 1); 149 void decrementl(Register reg, int value = 1); 150 151 void decrementq(Register reg, int value = 1); 152 void decrementq(Address dst, int value = 1); 153 154 void incrementl(Address dst, int value = 1); 155 void incrementl(Register reg, int value = 1); 156 157 void incrementq(Register reg, int value = 1); 158 void incrementq(Address dst, int value = 1); 159 160 void incrementl(AddressLiteral dst, Register rscratch = noreg); 161 void incrementl(ArrayAddress dst, Register rscratch); 162 163 void incrementq(AddressLiteral dst, Register rscratch = noreg); 164 165 // Support optimal SSE move instructions. 166 void movflt(XMMRegister dst, XMMRegister src) { 167 if (dst-> encoding() == src->encoding()) return; 168 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 169 else { movss (dst, src); return; } 170 } 171 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 172 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 173 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 174 175 // Move with zero extension 176 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 177 178 void movdbl(XMMRegister dst, XMMRegister src) { 179 if (dst-> encoding() == src->encoding()) return; 180 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 181 else { movsd (dst, src); return; } 182 } 183 184 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 185 186 void movdbl(XMMRegister dst, Address src) { 187 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 188 else { movlpd(dst, src); return; } 189 } 190 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 191 192 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 193 // Use separate tmp XMM register because caller may 194 // requires src XMM register to be unchanged (as in x86.ad). 195 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 196 movdl(dst, tmp); 197 movswl(dst, dst); 198 } 199 200 void flt16_to_flt(XMMRegister dst, Register src) { 201 movdl(dst, src); 202 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 203 } 204 205 // Alignment 206 void align32(); 207 void align64(); 208 void align(uint modulus); 209 void align(uint modulus, uint target); 210 211 void post_call_nop(); 212 213 // Stack frame creation/removal 214 void enter(); 215 void leave(); 216 217 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information). 218 // The pointer will be loaded into the thread register. This is a slow version that does native call. 219 // Normally, JavaThread pointer is available in r15_thread, use that where possible. 220 void get_thread_slow(Register thread); 221 222 // Support for argument shuffling 223 224 // bias in bytes 225 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 226 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 227 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 228 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 229 void move_ptr(VMRegPair src, VMRegPair dst); 230 void object_move(OopMap* map, 231 int oop_handle_offset, 232 int framesize_in_slots, 233 VMRegPair src, 234 VMRegPair dst, 235 bool is_receiver, 236 int* receiver_offset); 237 238 // Support for VM calls 239 // 240 // It is imperative that all calls into the VM are handled via the call_VM macros. 241 // They make sure that the stack linkage is setup correctly. call_VM's correspond 242 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 243 244 245 void call_VM(Register oop_result, 246 address entry_point, 247 bool check_exceptions = true); 248 void call_VM(Register oop_result, 249 address entry_point, 250 Register arg_1, 251 bool check_exceptions = true); 252 void call_VM(Register oop_result, 253 address entry_point, 254 Register arg_1, Register arg_2, 255 bool check_exceptions = true); 256 void call_VM(Register oop_result, 257 address entry_point, 258 Register arg_1, Register arg_2, Register arg_3, 259 bool check_exceptions = true); 260 261 // Overloadings with last_Java_sp 262 void call_VM(Register oop_result, 263 Register last_java_sp, 264 address entry_point, 265 int number_of_arguments = 0, 266 bool check_exceptions = true); 267 void call_VM(Register oop_result, 268 Register last_java_sp, 269 address entry_point, 270 Register arg_1, bool 271 check_exceptions = true); 272 void call_VM(Register oop_result, 273 Register last_java_sp, 274 address entry_point, 275 Register arg_1, Register arg_2, 276 bool check_exceptions = true); 277 void call_VM(Register oop_result, 278 Register last_java_sp, 279 address entry_point, 280 Register arg_1, Register arg_2, Register arg_3, 281 bool check_exceptions = true); 282 283 void get_vm_result_oop(Register oop_result); 284 void get_vm_result_metadata(Register metadata_result); 285 286 // These always tightly bind to MacroAssembler::call_VM_base 287 // bypassing the virtual implementation 288 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 289 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 290 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 291 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 292 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 293 294 void call_VM_leaf0(address entry_point); 295 void call_VM_leaf(address entry_point, 296 int number_of_arguments = 0); 297 void call_VM_leaf(address entry_point, 298 Register arg_1); 299 void call_VM_leaf(address entry_point, 300 Register arg_1, Register arg_2); 301 void call_VM_leaf(address entry_point, 302 Register arg_1, Register arg_2, Register arg_3); 303 304 void call_VM_leaf(address entry_point, 305 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 306 307 // These always tightly bind to MacroAssembler::call_VM_leaf_base 308 // bypassing the virtual implementation 309 void super_call_VM_leaf(address entry_point); 310 void super_call_VM_leaf(address entry_point, Register arg_1); 311 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 312 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 313 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 314 315 void set_last_Java_frame(Register last_java_sp, 316 Register last_java_fp, 317 address last_java_pc, 318 Register rscratch); 319 320 void set_last_Java_frame(Register last_java_sp, 321 Register last_java_fp, 322 Label &last_java_pc, 323 Register scratch); 324 325 void reset_last_Java_frame(bool clear_fp); 326 327 // jobjects 328 void clear_jobject_tag(Register possibly_non_local); 329 void resolve_jobject(Register value, Register tmp); 330 void resolve_global_jobject(Register value, Register tmp); 331 332 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 333 void c2bool(Register x); 334 335 // C++ bool manipulation 336 337 void movbool(Register dst, Address src); 338 void movbool(Address dst, bool boolconst); 339 void movbool(Address dst, Register src); 340 void testbool(Register dst); 341 342 void resolve_oop_handle(Register result, Register tmp); 343 void resolve_weak_handle(Register result, Register tmp); 344 void load_mirror(Register mirror, Register method, Register tmp); 345 void load_method_holder_cld(Register rresult, Register rmethod); 346 347 void load_method_holder(Register holder, Register method); 348 349 // oop manipulations 350 void load_narrow_klass_compact(Register dst, Register src); 351 void load_klass(Register dst, Register src, Register tmp); 352 void store_klass(Register dst, Register src, Register tmp); 353 354 // Compares the Klass pointer of an object to a given Klass (which might be narrow, 355 // depending on UseCompressedClassPointers). 356 void cmp_klass(Register klass, Register obj, Register tmp); 357 358 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. 359 // Uses tmp1 and tmp2 as temporary registers. 360 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); 361 362 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 363 Register tmp1); 364 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 365 Register tmp1, Register tmp2, Register tmp3); 366 367 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0); 368 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, DecoratorSet decorators = 0); 369 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 370 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 371 372 // Used for storing null. All other oop constants should be 373 // stored using routines that take a jobject. 374 void store_heap_oop_null(Address dst); 375 376 void store_klass_gap(Register dst, Register src); 377 378 // This dummy is to prevent a call to store_heap_oop from 379 // converting a zero (like null) into a Register by giving 380 // the compiler two choices it can't resolve 381 382 void store_heap_oop(Address dst, void* dummy); 383 384 void encode_heap_oop(Register r); 385 void decode_heap_oop(Register r); 386 void encode_heap_oop_not_null(Register r); 387 void decode_heap_oop_not_null(Register r); 388 void encode_heap_oop_not_null(Register dst, Register src); 389 void decode_heap_oop_not_null(Register dst, Register src); 390 391 void set_narrow_oop(Register dst, jobject obj); 392 void set_narrow_oop(Address dst, jobject obj); 393 void cmp_narrow_oop(Register dst, jobject obj); 394 void cmp_narrow_oop(Address dst, jobject obj); 395 396 void encode_klass_not_null(Register r, Register tmp); 397 void decode_klass_not_null(Register r, Register tmp); 398 void encode_and_move_klass_not_null(Register dst, Register src); 399 void decode_and_move_klass_not_null(Register dst, Register src); 400 void set_narrow_klass(Register dst, Klass* k); 401 void set_narrow_klass(Address dst, Klass* k); 402 void cmp_narrow_klass(Register dst, Klass* k); 403 void cmp_narrow_klass(Address dst, Klass* k); 404 405 // if heap base register is used - reinit it with the correct value 406 void reinit_heapbase(); 407 408 DEBUG_ONLY(void verify_heapbase(const char* msg);) 409 410 // Int division/remainder for Java 411 // (as idivl, but checks for special case as described in JVM spec.) 412 // returns idivl instruction offset for implicit exception handling 413 int corrected_idivl(Register reg); 414 415 // Long division/remainder for Java 416 // (as idivq, but checks for special case as described in JVM spec.) 417 // returns idivq instruction offset for implicit exception handling 418 int corrected_idivq(Register reg); 419 420 void int3(); 421 422 // Long operation macros for a 32bit cpu 423 // Long negation for Java 424 void lneg(Register hi, Register lo); 425 426 // Long multiplication for Java 427 // (destroys contents of eax, ebx, ecx and edx) 428 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 429 430 // Long shifts for Java 431 // (semantics as described in JVM spec.) 432 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 433 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 434 435 // Long compare for Java 436 // (semantics as described in JVM spec.) 437 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 438 439 440 // misc 441 442 // Sign extension 443 void sign_extend_short(Register reg); 444 void sign_extend_byte(Register reg); 445 446 // Division by power of 2, rounding towards 0 447 void division_with_shift(Register reg, int shift_value); 448 449 // dst = c = a * b + c 450 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 451 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 452 453 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 454 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 455 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 456 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 457 458 459 // same as fcmp2int, but using SSE2 460 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 461 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 462 463 void push_IU_state(); 464 void pop_IU_state(); 465 466 void push_FPU_state(); 467 void pop_FPU_state(); 468 469 void push_CPU_state(); 470 void pop_CPU_state(); 471 472 void push_cont_fastpath(); 473 void pop_cont_fastpath(); 474 475 void inc_held_monitor_count(); 476 void dec_held_monitor_count(); 477 478 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 479 480 // Round up to a power of two 481 void round_to(Register reg, int modulus); 482 483 private: 484 // General purpose and XMM registers potentially clobbered by native code; there 485 // is no need for FPU or AVX opmask related methods because C1/interpreter 486 // - we save/restore FPU state as a whole always 487 // - do not care about AVX-512 opmask 488 static RegSet call_clobbered_gp_registers(); 489 static XMMRegSet call_clobbered_xmm_registers(); 490 491 void push_set(XMMRegSet set, int offset); 492 void pop_set(XMMRegSet set, int offset); 493 494 public: 495 void push_set(RegSet set, int offset = -1); 496 void pop_set(RegSet set, int offset = -1); 497 498 // Push and pop everything that might be clobbered by a native 499 // runtime call. 500 // Only save the lower 64 bits of each vector register. 501 // Additional registers can be excluded in a passed RegSet. 502 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 503 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 504 505 void push_call_clobbered_registers(bool save_fpu = true) { 506 push_call_clobbered_registers_except(RegSet(), save_fpu); 507 } 508 void pop_call_clobbered_registers(bool restore_fpu = true) { 509 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 510 } 511 512 // allocation 513 void tlab_allocate( 514 Register obj, // result: pointer to object after successful allocation 515 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 516 int con_size_in_bytes, // object size in bytes if known at compile time 517 Register t1, // temp register 518 Register t2, // temp register 519 Label& slow_case // continuation point if fast allocation fails 520 ); 521 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 522 523 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 524 525 // interface method calling 526 void lookup_interface_method(Register recv_klass, 527 Register intf_klass, 528 RegisterOrConstant itable_index, 529 Register method_result, 530 Register scan_temp, 531 Label& no_such_interface, 532 bool return_method = true); 533 534 void lookup_interface_method_stub(Register recv_klass, 535 Register holder_klass, 536 Register resolved_klass, 537 Register method_result, 538 Register scan_temp, 539 Register temp_reg2, 540 Register receiver, 541 int itable_index, 542 Label& L_no_such_interface); 543 544 // virtual method calling 545 void lookup_virtual_method(Register recv_klass, 546 RegisterOrConstant vtable_index, 547 Register method_result); 548 549 // Test sub_klass against super_klass, with fast and slow paths. 550 551 // The fast path produces a tri-state answer: yes / no / maybe-slow. 552 // One of the three labels can be null, meaning take the fall-through. 553 // If super_check_offset is -1, the value is loaded up from super_klass. 554 // No registers are killed, except temp_reg. 555 void check_klass_subtype_fast_path(Register sub_klass, 556 Register super_klass, 557 Register temp_reg, 558 Label* L_success, 559 Label* L_failure, 560 Label* L_slow_path, 561 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 562 563 // The rest of the type check; must be wired to a corresponding fast path. 564 // It does not repeat the fast path logic, so don't use it standalone. 565 // The temp_reg and temp2_reg can be noreg, if no temps are available. 566 // Updates the sub's secondary super cache as necessary. 567 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 568 void check_klass_subtype_slow_path(Register sub_klass, 569 Register super_klass, 570 Register temp_reg, 571 Register temp2_reg, 572 Label* L_success, 573 Label* L_failure, 574 bool set_cond_codes = false); 575 576 // The 64-bit version, which may do a hashed subclass lookup. 577 void check_klass_subtype_slow_path(Register sub_klass, 578 Register super_klass, 579 Register temp_reg, 580 Register temp2_reg, 581 Register temp3_reg, 582 Register temp4_reg, 583 Label* L_success, 584 Label* L_failure); 585 586 // Three parts of a hashed subclass lookup: a simple linear search, 587 // a table lookup, and a fallback that does linear probing in the 588 // event of a hash collision. 589 void check_klass_subtype_slow_path_linear(Register sub_klass, 590 Register super_klass, 591 Register temp_reg, 592 Register temp2_reg, 593 Label* L_success, 594 Label* L_failure, 595 bool set_cond_codes = false); 596 void check_klass_subtype_slow_path_table(Register sub_klass, 597 Register super_klass, 598 Register temp_reg, 599 Register temp2_reg, 600 Register temp3_reg, 601 Register result_reg, 602 Label* L_success, 603 Label* L_failure); 604 void hashed_check_klass_subtype_slow_path(Register sub_klass, 605 Register super_klass, 606 Register temp_reg, 607 Label* L_success, 608 Label* L_failure); 609 610 // As above, but with a constant super_klass. 611 // The result is in Register result, not the condition codes. 612 void lookup_secondary_supers_table_const(Register sub_klass, 613 Register super_klass, 614 Register temp1, 615 Register temp2, 616 Register temp3, 617 Register temp4, 618 Register result, 619 u1 super_klass_slot); 620 621 using Assembler::salq; 622 void salq(Register dest, Register count); 623 using Assembler::rorq; 624 void rorq(Register dest, Register count); 625 void lookup_secondary_supers_table_var(Register sub_klass, 626 Register super_klass, 627 Register temp1, 628 Register temp2, 629 Register temp3, 630 Register temp4, 631 Register result); 632 633 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 634 Register r_array_base, 635 Register r_array_index, 636 Register r_bitmap, 637 Register temp1, 638 Register temp2, 639 Label* L_success, 640 Label* L_failure = nullptr); 641 642 void verify_secondary_supers_table(Register r_sub_klass, 643 Register r_super_klass, 644 Register expected, 645 Register temp1, 646 Register temp2, 647 Register temp3); 648 649 void repne_scanq(Register addr, Register value, Register count, Register limit, 650 Label* L_success, 651 Label* L_failure = nullptr); 652 653 // If r is valid, return r. 654 // If r is invalid, remove a register r2 from available_regs, add r2 655 // to regs_to_push, then return r2. 656 Register allocate_if_noreg(const Register r, 657 RegSetIterator<Register> &available_regs, 658 RegSet ®s_to_push); 659 660 // Simplified, combined version, good for typical uses. 661 // Falls through on failure. 662 void check_klass_subtype(Register sub_klass, 663 Register super_klass, 664 Register temp_reg, 665 Label& L_success); 666 667 void clinit_barrier(Register klass, 668 Label* L_fast_path = nullptr, 669 Label* L_slow_path = nullptr); 670 671 // method handles (JSR 292) 672 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 673 674 // Debugging 675 676 // only if +VerifyOops 677 void _verify_oop(Register reg, const char* s, const char* file, int line); 678 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 679 680 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 681 if (VerifyOops) { 682 _verify_oop(reg, s, file, line); 683 } 684 } 685 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 686 if (VerifyOops) { 687 _verify_oop_addr(reg, s, file, line); 688 } 689 } 690 691 // TODO: verify method and klass metadata (compare against vptr?) 692 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 693 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 694 695 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 696 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 697 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 698 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 699 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 700 701 // Verify or restore cpu control state after JNI call 702 void restore_cpu_control_state_after_jni(Register rscratch); 703 704 // prints msg, dumps registers and stops execution 705 void stop(const char* msg); 706 707 // prints msg and continues 708 void warn(const char* msg); 709 710 // dumps registers and other state 711 void print_state(); 712 713 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 714 static void debug64(char* msg, int64_t pc, int64_t regs[]); 715 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 716 static void print_state64(int64_t pc, int64_t regs[]); 717 718 void os_breakpoint(); 719 720 void untested() { stop("untested"); } 721 722 void unimplemented(const char* what = ""); 723 724 void should_not_reach_here() { stop("should not reach here"); } 725 726 void print_CPU_state(); 727 728 // Stack overflow checking 729 void bang_stack_with_offset(int offset) { 730 // stack grows down, caller passes positive offset 731 assert(offset > 0, "must bang with negative offset"); 732 movl(Address(rsp, (-offset)), rax); 733 } 734 735 // Writes to stack successive pages until offset reached to check for 736 // stack overflow + shadow pages. Also, clobbers tmp 737 void bang_stack_size(Register size, Register tmp); 738 739 // Check for reserved stack access in method being exited (for JIT) 740 void reserved_stack_check(); 741 742 void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod); 743 744 void verify_tlab(); 745 746 static Condition negate_condition(Condition cond); 747 748 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 749 // operands. In general the names are modified to avoid hiding the instruction in Assembler 750 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 751 // here in MacroAssembler. The major exception to this rule is call 752 753 // Arithmetics 754 755 756 void addptr(Address dst, int32_t src) { addq(dst, src); } 757 void addptr(Address dst, Register src); 758 759 void addptr(Register dst, Address src) { addq(dst, src); } 760 void addptr(Register dst, int32_t src); 761 void addptr(Register dst, Register src); 762 void addptr(Register dst, RegisterOrConstant src) { 763 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 764 else addptr(dst, src.as_register()); 765 } 766 767 void andptr(Register dst, int32_t src); 768 void andptr(Register src1, Register src2) { andq(src1, src2); } 769 770 using Assembler::andq; 771 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 772 773 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 774 775 // renamed to drag out the casting of address to int32_t/intptr_t 776 void cmp32(Register src1, int32_t imm); 777 778 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 779 // compare reg - mem, or reg - &mem 780 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 781 782 void cmp32(Register src1, Address src2); 783 784 void cmpoop(Register src1, Register src2); 785 void cmpoop(Register src1, Address src2); 786 void cmpoop(Register dst, jobject obj, Register rscratch); 787 788 // NOTE src2 must be the lval. This is NOT an mem-mem compare 789 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 790 791 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 792 793 void cmpptr(Register src1, Register src2) { cmpq(src1, src2); } 794 void cmpptr(Register src1, Address src2) { cmpq(src1, src2); } 795 796 void cmpptr(Register src1, int32_t src2) { cmpq(src1, src2); } 797 void cmpptr(Address src1, int32_t src2) { cmpq(src1, src2); } 798 799 // cmp64 to avoild hiding cmpq 800 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 801 802 void cmpxchgptr(Register reg, Address adr); 803 804 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 805 806 void imulptr(Register dst, Register src) { imulq(dst, src); } 807 void imulptr(Register dst, Register src, int imm32) { imulq(dst, src, imm32); } 808 809 810 void negptr(Register dst) { negq(dst); } 811 812 void notptr(Register dst) { notq(dst); } 813 814 void shlptr(Register dst, int32_t shift); 815 void shlptr(Register dst) { shlq(dst); } 816 817 void shrptr(Register dst, int32_t shift); 818 void shrptr(Register dst) { shrq(dst); } 819 820 void sarptr(Register dst) { sarq(dst); } 821 void sarptr(Register dst, int32_t src) { sarq(dst, src); } 822 823 void subptr(Address dst, int32_t src) { subq(dst, src); } 824 825 void subptr(Register dst, Address src) { subq(dst, src); } 826 void subptr(Register dst, int32_t src); 827 // Force generation of a 4 byte immediate value even if it fits into 8bit 828 void subptr_imm32(Register dst, int32_t src); 829 void subptr(Register dst, Register src); 830 void subptr(Register dst, RegisterOrConstant src) { 831 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 832 else subptr(dst, src.as_register()); 833 } 834 835 void sbbptr(Address dst, int32_t src) { sbbq(dst, src); } 836 void sbbptr(Register dst, int32_t src) { sbbq(dst, src); } 837 838 void xchgptr(Register src1, Register src2) { xchgq(src1, src2); } 839 void xchgptr(Register src1, Address src2) { xchgq(src1, src2); } 840 841 void xaddptr(Address src1, Register src2) { xaddq(src1, src2); } 842 843 844 845 // Helper functions for statistics gathering. 846 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 847 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 848 // Unconditional atomic increment. 849 void atomic_incl(Address counter_addr); 850 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 851 void atomic_incq(Address counter_addr); 852 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 853 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { atomic_incq(counter_addr, rscratch); } 854 void atomic_incptr(Address counter_addr) { atomic_incq(counter_addr); } 855 856 using Assembler::lea; 857 void lea(Register dst, AddressLiteral adr); 858 void lea(Address dst, AddressLiteral adr, Register rscratch); 859 860 void leal32(Register dst, Address src) { leal(dst, src); } 861 862 // Import other testl() methods from the parent class or else 863 // they will be hidden by the following overriding declaration. 864 using Assembler::testl; 865 void testl(Address dst, int32_t imm32); 866 void testl(Register dst, int32_t imm32); 867 void testl(Register dst, AddressLiteral src); // requires reachable address 868 using Assembler::testq; 869 void testq(Address dst, int32_t imm32); 870 void testq(Register dst, int32_t imm32); 871 872 void orptr(Register dst, Address src) { orq(dst, src); } 873 void orptr(Register dst, Register src) { orq(dst, src); } 874 void orptr(Register dst, int32_t src) { orq(dst, src); } 875 void orptr(Address dst, int32_t imm32) { orq(dst, imm32); } 876 877 void testptr(Register src, int32_t imm32) { testq(src, imm32); } 878 void testptr(Register src1, Address src2) { testq(src1, src2); } 879 void testptr(Address src, int32_t imm32) { testq(src, imm32); } 880 void testptr(Register src1, Register src2); 881 882 void xorptr(Register dst, Register src) { xorq(dst, src); } 883 void xorptr(Register dst, Address src) { xorq(dst, src); } 884 885 // Calls 886 887 void call(Label& L, relocInfo::relocType rtype); 888 void call(Register entry); 889 void call(Address addr) { Assembler::call(addr); } 890 891 // NOTE: this call transfers to the effective address of entry NOT 892 // the address contained by entry. This is because this is more natural 893 // for jumps/calls. 894 void call(AddressLiteral entry, Register rscratch = rax); 895 896 // Emit the CompiledIC call idiom 897 void ic_call(address entry, jint method_index = 0); 898 static int ic_check_size(); 899 int ic_check(int end_alignment); 900 901 void emit_static_call_stub(); 902 903 // Jumps 904 905 // NOTE: these jumps transfer to the effective address of dst NOT 906 // the address contained by dst. This is because this is more natural 907 // for jumps/calls. 908 void jump(AddressLiteral dst, Register rscratch = noreg); 909 910 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 911 912 // 32bit can do a case table jump in one instruction but we no longer allow the base 913 // to be installed in the Address class. This jump will transfer to the address 914 // contained in the location described by entry (not the address of entry) 915 void jump(ArrayAddress entry, Register rscratch); 916 917 // Adding more natural conditional jump instructions 918 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 919 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 920 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 921 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 922 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 923 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 924 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 925 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 926 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 927 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 928 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 929 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 930 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 931 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 932 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 933 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 934 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 935 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 936 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 937 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 938 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 939 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 940 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 941 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 942 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 943 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 944 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 945 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 946 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 947 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 948 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 949 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 950 951 // Short versions of the above 952 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 953 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 954 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 955 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 956 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 957 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 958 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 959 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 960 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 961 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 962 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 963 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 964 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 965 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 966 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 967 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 968 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 969 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 970 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 971 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 972 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 973 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 974 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 975 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 976 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 977 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 978 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 979 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 980 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 981 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 982 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 983 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 984 985 // Floating 986 987 void push_f(XMMRegister r); 988 void pop_f(XMMRegister r); 989 void push_d(XMMRegister r); 990 void pop_d(XMMRegister r); 991 992 void push_ppx(Register src); 993 void pop_ppx(Register dst); 994 995 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 996 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 997 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 998 999 void andnpd(XMMRegister dst, XMMRegister src) { Assembler::andnpd(dst, src); } 1000 1001 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1002 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1003 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1004 1005 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1006 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1007 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1008 1009 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1010 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1011 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1012 1013 void orpd(XMMRegister dst, XMMRegister src) { Assembler::orpd(dst, src); } 1014 1015 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg); 1016 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1017 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1018 1019 private: 1020 void sha256_AVX2_one_round_compute( 1021 Register reg_old_h, 1022 Register reg_a, 1023 Register reg_b, 1024 Register reg_c, 1025 Register reg_d, 1026 Register reg_e, 1027 Register reg_f, 1028 Register reg_g, 1029 Register reg_h, 1030 int iter); 1031 void sha256_AVX2_four_rounds_compute_first(int start); 1032 void sha256_AVX2_four_rounds_compute_last(int start); 1033 void sha256_AVX2_one_round_and_sched( 1034 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1035 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1036 XMMRegister xmm_2, /* ymm6 */ 1037 XMMRegister xmm_3, /* ymm7 */ 1038 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1039 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1040 Register reg_c, /* edi */ 1041 Register reg_d, /* esi */ 1042 Register reg_e, /* r8d */ 1043 Register reg_f, /* r9d */ 1044 Register reg_g, /* r10d */ 1045 Register reg_h, /* r11d */ 1046 int iter); 1047 1048 void addm(int disp, Register r1, Register r2); 1049 1050 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1051 Register e, Register f, Register g, Register h, int iteration); 1052 1053 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1054 Register a, Register b, Register c, Register d, Register e, Register f, 1055 Register g, Register h, int iteration); 1056 1057 void addmq(int disp, Register r1, Register r2); 1058 public: 1059 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1060 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1061 Register buf, Register state, Register ofs, Register limit, Register rsp, 1062 bool multi_block, XMMRegister shuf_mask); 1063 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1064 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1065 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1066 XMMRegister shuf_mask); 1067 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block); 1068 1069 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1070 bool multi_block); 1071 1072 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1073 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1074 Register buf, Register state, Register ofs, Register limit, Register rsp, 1075 bool multi_block); 1076 1077 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1078 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1079 Register buf, Register state, Register ofs, Register limit, Register rsp, 1080 bool multi_block, XMMRegister shuf_mask); 1081 1082 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1083 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1084 Register rax, Register rcx, Register rdx, Register tmp); 1085 1086 private: 1087 1088 // these are private because users should be doing movflt/movdbl 1089 1090 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1091 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1092 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1093 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1094 1095 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1096 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1097 1098 public: 1099 1100 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1101 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1102 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1103 1104 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1105 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1106 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1107 1108 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1109 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1110 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1111 1112 using Assembler::vbroadcasti128; 1113 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1114 1115 using Assembler::vbroadcastsd; 1116 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1117 1118 using Assembler::vbroadcastss; 1119 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1120 1121 // Vector float blend 1122 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1123 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1124 1125 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1126 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1127 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1128 1129 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1130 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1131 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1132 1133 // Move Unaligned Double Quadword 1134 void movdqu(Address dst, XMMRegister src); 1135 void movdqu(XMMRegister dst, XMMRegister src); 1136 void movdqu(XMMRegister dst, Address src); 1137 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1138 1139 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1140 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1141 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1142 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1143 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1144 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1145 1146 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1147 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1148 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1149 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1150 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1151 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1152 1153 // Safe move operation, lowers down to 16bit moves for targets supporting 1154 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1155 void kmov(Address dst, KRegister src); 1156 void kmov(KRegister dst, Address src); 1157 void kmov(KRegister dst, KRegister src); 1158 void kmov(Register dst, KRegister src); 1159 void kmov(KRegister dst, Register src); 1160 1161 using Assembler::movddup; 1162 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1163 1164 using Assembler::vmovddup; 1165 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1166 1167 // AVX Unaligned forms 1168 void vmovdqu(Address dst, XMMRegister src); 1169 void vmovdqu(XMMRegister dst, Address src); 1170 void vmovdqu(XMMRegister dst, XMMRegister src); 1171 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1172 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1173 void vmovdqu(XMMRegister dst, XMMRegister src, int vector_len); 1174 void vmovdqu(XMMRegister dst, Address src, int vector_len); 1175 void vmovdqu(Address dst, XMMRegister src, int vector_len); 1176 1177 // AVX Aligned forms 1178 using Assembler::vmovdqa; 1179 void vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1180 void vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1181 1182 // AVX512 Unaligned 1183 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1184 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1185 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1186 1187 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1188 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1189 1190 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1191 if (dst->encoding() != src->encoding() || mask != k0) { 1192 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1193 } 1194 } 1195 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1196 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1197 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1198 1199 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1200 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1201 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1202 1203 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1204 if (dst->encoding() != src->encoding() || mask != k0) { 1205 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1206 } 1207 } 1208 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1209 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1210 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1211 1212 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1213 if (dst->encoding() != src->encoding()) { 1214 Assembler::evmovdqul(dst, src, vector_len); 1215 } 1216 } 1217 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1218 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1219 1220 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1221 if (dst->encoding() != src->encoding() || mask != k0) { 1222 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1223 } 1224 } 1225 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1226 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1227 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1228 1229 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1230 if (dst->encoding() != src->encoding()) { 1231 Assembler::evmovdquq(dst, src, vector_len); 1232 } 1233 } 1234 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1235 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1236 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1237 void evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1238 1239 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1240 if (dst->encoding() != src->encoding() || mask != k0) { 1241 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1242 } 1243 } 1244 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1245 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1246 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1247 void evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1248 1249 using Assembler::movapd; 1250 void movapd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1251 1252 // Move Aligned Double Quadword 1253 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1254 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1255 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1256 1257 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1258 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1259 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1260 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1261 1262 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1263 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1264 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1265 1266 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1267 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1268 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1269 1270 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1271 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1272 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1273 1274 // Carry-Less Multiplication Quadword 1275 void pclmulldq(XMMRegister dst, XMMRegister src) { 1276 // 0x00 - multiply lower 64 bits [0:63] 1277 Assembler::pclmulqdq(dst, src, 0x00); 1278 } 1279 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1280 // 0x11 - multiply upper 64 bits [64:127] 1281 Assembler::pclmulqdq(dst, src, 0x11); 1282 } 1283 1284 void pcmpeqb(XMMRegister dst, XMMRegister src); 1285 void pcmpeqw(XMMRegister dst, XMMRegister src); 1286 1287 void pcmpestri(XMMRegister dst, Address src, int imm8); 1288 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1289 1290 void pmovzxbw(XMMRegister dst, XMMRegister src); 1291 void pmovzxbw(XMMRegister dst, Address src); 1292 1293 void pmovmskb(Register dst, XMMRegister src); 1294 1295 void ptest(XMMRegister dst, XMMRegister src); 1296 1297 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1298 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1299 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1300 1301 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1302 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1303 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1304 1305 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1306 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1307 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1308 1309 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1310 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1311 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1312 1313 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1314 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1315 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1316 1317 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1318 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1319 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1320 1321 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1322 void xorpd(XMMRegister dst, XMMRegister src); 1323 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1324 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1325 1326 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1327 void xorps(XMMRegister dst, XMMRegister src); 1328 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1329 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1330 1331 // Shuffle Bytes 1332 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1333 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1334 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1335 // AVX 3-operands instructions 1336 1337 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1338 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1339 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1340 1341 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1342 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1343 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1344 1345 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1346 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1347 1348 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1349 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1350 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1351 1352 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1353 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1354 1355 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1356 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1357 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1358 1359 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1360 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1361 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1362 1363 using Assembler::vpbroadcastd; 1364 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1365 1366 using Assembler::vpbroadcastq; 1367 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1368 1369 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1370 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1371 1372 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1373 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1374 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1375 1376 // Vector compares 1377 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1378 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1379 } 1380 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1381 1382 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1383 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1384 } 1385 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1386 1387 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1388 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1389 } 1390 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1391 1392 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1393 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1394 } 1395 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1396 1397 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1398 1399 // Emit comparison instruction for the specified comparison predicate. 1400 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1401 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1402 1403 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1404 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1405 1406 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1407 1408 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1409 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1410 1411 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1412 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1413 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1414 1415 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); } 1416 1417 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1418 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1419 1420 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1421 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1422 1423 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1424 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1425 1426 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1427 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1428 1429 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1430 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1431 1432 using Assembler::evpsllw; 1433 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1434 if (!is_varshift) { 1435 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1436 } else { 1437 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1438 } 1439 } 1440 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1441 if (!is_varshift) { 1442 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1443 } else { 1444 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1445 } 1446 } 1447 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1448 if (!is_varshift) { 1449 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1450 } else { 1451 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1452 } 1453 } 1454 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1455 if (!is_varshift) { 1456 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1457 } else { 1458 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1459 } 1460 } 1461 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1462 if (!is_varshift) { 1463 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1464 } else { 1465 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1466 } 1467 } 1468 1469 using Assembler::evpsrlq; 1470 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1471 if (!is_varshift) { 1472 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1473 } else { 1474 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1475 } 1476 } 1477 using Assembler::evpsraw; 1478 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1479 if (!is_varshift) { 1480 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1481 } else { 1482 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1483 } 1484 } 1485 using Assembler::evpsrad; 1486 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1487 if (!is_varshift) { 1488 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1489 } else { 1490 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1491 } 1492 } 1493 using Assembler::evpsraq; 1494 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1495 if (!is_varshift) { 1496 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1497 } else { 1498 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1499 } 1500 } 1501 1502 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1503 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1504 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1505 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1506 1507 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1508 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1509 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1510 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1511 1512 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1513 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1514 1515 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1516 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1517 1518 void vptest(XMMRegister dst, XMMRegister src); 1519 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1520 1521 void punpcklbw(XMMRegister dst, XMMRegister src); 1522 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1523 1524 void pshufd(XMMRegister dst, Address src, int mode); 1525 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1526 1527 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1528 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1529 1530 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1531 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1532 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1533 1534 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1535 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1536 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1537 1538 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1539 1540 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1541 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1542 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1543 1544 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1545 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1546 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1547 1548 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1549 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1550 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1551 1552 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1553 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1554 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1555 1556 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1557 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1558 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1559 1560 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1561 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1562 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1563 1564 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1565 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1566 1567 // AVX Vector instructions 1568 1569 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1570 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1571 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1572 1573 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1574 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1575 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1576 1577 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1578 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1579 Assembler::vpxor(dst, nds, src, vector_len); 1580 else 1581 Assembler::vxorpd(dst, nds, src, vector_len); 1582 } 1583 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1584 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1585 Assembler::vpxor(dst, nds, src, vector_len); 1586 else 1587 Assembler::vxorpd(dst, nds, src, vector_len); 1588 } 1589 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1590 1591 // Simple version for AVX2 256bit vectors 1592 void vpxor(XMMRegister dst, XMMRegister src) { 1593 assert(UseAVX >= 2, "Should be at least AVX2"); 1594 Assembler::vpxor(dst, dst, src, AVX_256bit); 1595 } 1596 void vpxor(XMMRegister dst, Address src) { 1597 assert(UseAVX >= 2, "Should be at least AVX2"); 1598 Assembler::vpxor(dst, dst, src, AVX_256bit); 1599 } 1600 1601 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1602 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1603 1604 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1605 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1606 Assembler::vinserti32x4(dst, nds, src, imm8); 1607 } else if (UseAVX > 1) { 1608 // vinserti128 is available only in AVX2 1609 Assembler::vinserti128(dst, nds, src, imm8); 1610 } else { 1611 Assembler::vinsertf128(dst, nds, src, imm8); 1612 } 1613 } 1614 1615 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1616 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1617 Assembler::vinserti32x4(dst, nds, src, imm8); 1618 } else if (UseAVX > 1) { 1619 // vinserti128 is available only in AVX2 1620 Assembler::vinserti128(dst, nds, src, imm8); 1621 } else { 1622 Assembler::vinsertf128(dst, nds, src, imm8); 1623 } 1624 } 1625 1626 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1627 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1628 Assembler::vextracti32x4(dst, src, imm8); 1629 } else if (UseAVX > 1) { 1630 // vextracti128 is available only in AVX2 1631 Assembler::vextracti128(dst, src, imm8); 1632 } else { 1633 Assembler::vextractf128(dst, src, imm8); 1634 } 1635 } 1636 1637 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1638 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1639 Assembler::vextracti32x4(dst, src, imm8); 1640 } else if (UseAVX > 1) { 1641 // vextracti128 is available only in AVX2 1642 Assembler::vextracti128(dst, src, imm8); 1643 } else { 1644 Assembler::vextractf128(dst, src, imm8); 1645 } 1646 } 1647 1648 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1649 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1650 vinserti128(dst, dst, src, 1); 1651 } 1652 void vinserti128_high(XMMRegister dst, Address src) { 1653 vinserti128(dst, dst, src, 1); 1654 } 1655 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1656 vextracti128(dst, src, 1); 1657 } 1658 void vextracti128_high(Address dst, XMMRegister src) { 1659 vextracti128(dst, src, 1); 1660 } 1661 1662 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1663 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1664 Assembler::vinsertf32x4(dst, dst, src, 1); 1665 } else { 1666 Assembler::vinsertf128(dst, dst, src, 1); 1667 } 1668 } 1669 1670 void vinsertf128_high(XMMRegister dst, Address src) { 1671 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1672 Assembler::vinsertf32x4(dst, dst, src, 1); 1673 } else { 1674 Assembler::vinsertf128(dst, dst, src, 1); 1675 } 1676 } 1677 1678 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1679 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1680 Assembler::vextractf32x4(dst, src, 1); 1681 } else { 1682 Assembler::vextractf128(dst, src, 1); 1683 } 1684 } 1685 1686 void vextractf128_high(Address dst, XMMRegister src) { 1687 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1688 Assembler::vextractf32x4(dst, src, 1); 1689 } else { 1690 Assembler::vextractf128(dst, src, 1); 1691 } 1692 } 1693 1694 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1695 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1696 Assembler::vinserti64x4(dst, dst, src, 1); 1697 } 1698 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1699 Assembler::vinsertf64x4(dst, dst, src, 1); 1700 } 1701 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1702 Assembler::vextracti64x4(dst, src, 1); 1703 } 1704 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1705 Assembler::vextractf64x4(dst, src, 1); 1706 } 1707 void vextractf64x4_high(Address dst, XMMRegister src) { 1708 Assembler::vextractf64x4(dst, src, 1); 1709 } 1710 void vinsertf64x4_high(XMMRegister dst, Address src) { 1711 Assembler::vinsertf64x4(dst, dst, src, 1); 1712 } 1713 1714 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1715 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1716 vinserti128(dst, dst, src, 0); 1717 } 1718 void vinserti128_low(XMMRegister dst, Address src) { 1719 vinserti128(dst, dst, src, 0); 1720 } 1721 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1722 vextracti128(dst, src, 0); 1723 } 1724 void vextracti128_low(Address dst, XMMRegister src) { 1725 vextracti128(dst, src, 0); 1726 } 1727 1728 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1729 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1730 Assembler::vinsertf32x4(dst, dst, src, 0); 1731 } else { 1732 Assembler::vinsertf128(dst, dst, src, 0); 1733 } 1734 } 1735 1736 void vinsertf128_low(XMMRegister dst, Address src) { 1737 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1738 Assembler::vinsertf32x4(dst, dst, src, 0); 1739 } else { 1740 Assembler::vinsertf128(dst, dst, src, 0); 1741 } 1742 } 1743 1744 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1745 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1746 Assembler::vextractf32x4(dst, src, 0); 1747 } else { 1748 Assembler::vextractf128(dst, src, 0); 1749 } 1750 } 1751 1752 void vextractf128_low(Address dst, XMMRegister src) { 1753 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1754 Assembler::vextractf32x4(dst, src, 0); 1755 } else { 1756 Assembler::vextractf128(dst, src, 0); 1757 } 1758 } 1759 1760 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1761 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1762 Assembler::vinserti64x4(dst, dst, src, 0); 1763 } 1764 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1765 Assembler::vinsertf64x4(dst, dst, src, 0); 1766 } 1767 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1768 Assembler::vextracti64x4(dst, src, 0); 1769 } 1770 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1771 Assembler::vextractf64x4(dst, src, 0); 1772 } 1773 void vextractf64x4_low(Address dst, XMMRegister src) { 1774 Assembler::vextractf64x4(dst, src, 0); 1775 } 1776 void vinsertf64x4_low(XMMRegister dst, Address src) { 1777 Assembler::vinsertf64x4(dst, dst, src, 0); 1778 } 1779 1780 // Carry-Less Multiplication Quadword 1781 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1782 // 0x00 - multiply lower 64 bits [0:63] 1783 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1784 } 1785 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1786 // 0x11 - multiply upper 64 bits [64:127] 1787 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1788 } 1789 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1790 // 0x10 - multiply nds[0:63] and src[64:127] 1791 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1792 } 1793 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1794 //0x01 - multiply nds[64:127] and src[0:63] 1795 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1796 } 1797 1798 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1799 // 0x00 - multiply lower 64 bits [0:63] 1800 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1801 } 1802 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1803 // 0x11 - multiply upper 64 bits [64:127] 1804 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1805 } 1806 1807 // AVX-512 mask operations. 1808 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1809 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1810 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1811 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1812 void kortest(uint masklen, KRegister src1, KRegister src2); 1813 void ktest(uint masklen, KRegister src1, KRegister src2); 1814 1815 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1816 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1817 1818 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1819 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1820 1821 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1822 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1823 1824 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1825 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1826 1827 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1828 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1829 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1830 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1831 1832 using Assembler::evpandq; 1833 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1834 1835 using Assembler::evpaddq; 1836 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1837 1838 using Assembler::evporq; 1839 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1840 1841 using Assembler::vpshufb; 1842 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1843 1844 using Assembler::vpor; 1845 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1846 1847 using Assembler::vpternlogq; 1848 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1849 1850 void cmov32( Condition cc, Register dst, Address src); 1851 void cmov32( Condition cc, Register dst, Register src); 1852 1853 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1854 1855 void cmovptr(Condition cc, Register dst, Address src) { cmovq(cc, dst, src); } 1856 void cmovptr(Condition cc, Register dst, Register src) { cmovq(cc, dst, src); } 1857 1858 void movoop(Register dst, jobject obj); 1859 void movoop(Address dst, jobject obj, Register rscratch); 1860 1861 void mov_metadata(Register dst, Metadata* obj); 1862 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1863 1864 void movptr(Register dst, Register src); 1865 void movptr(Register dst, Address src); 1866 void movptr(Register dst, AddressLiteral src); 1867 void movptr(Register dst, ArrayAddress src); 1868 void movptr(Register dst, intptr_t src); 1869 void movptr(Address dst, Register src); 1870 void movptr(Address dst, int32_t imm); 1871 void movptr(Address dst, intptr_t src, Register rscratch); 1872 void movptr(ArrayAddress dst, Register src, Register rscratch); 1873 1874 void movptr(Register dst, RegisterOrConstant src) { 1875 if (src.is_constant()) movptr(dst, src.as_constant()); 1876 else movptr(dst, src.as_register()); 1877 } 1878 1879 1880 // to avoid hiding movl 1881 void mov32(Register dst, AddressLiteral src); 1882 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 1883 1884 // Import other mov() methods from the parent class or else 1885 // they will be hidden by the following overriding declaration. 1886 using Assembler::movdl; 1887 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1888 1889 using Assembler::movq; 1890 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1891 1892 // Can push value or effective address 1893 void pushptr(AddressLiteral src, Register rscratch); 1894 1895 void pushptr(Address src) { pushq(src); } 1896 void popptr(Address src) { popq(src); } 1897 1898 void pushoop(jobject obj, Register rscratch); 1899 void pushklass(Metadata* obj, Register rscratch); 1900 1901 // sign extend as need a l to ptr sized element 1902 void movl2ptr(Register dst, Address src) { movslq(dst, src); } 1903 void movl2ptr(Register dst, Register src) { movslq(dst, src); } 1904 1905 1906 public: 1907 // clear memory of size 'cnt' qwords, starting at 'base'; 1908 // if 'is_large' is set, do not try to produce short loop 1909 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1910 1911 // clear memory initialization sequence for constant size; 1912 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1913 1914 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1915 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1916 1917 // Fill primitive arrays 1918 void generate_fill(BasicType t, bool aligned, 1919 Register to, Register value, Register count, 1920 Register rtmp, XMMRegister xtmp); 1921 1922 void encode_iso_array(Register src, Register dst, Register len, 1923 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1924 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1925 1926 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1927 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1928 Register y, Register y_idx, Register z, 1929 Register carry, Register product, 1930 Register idx, Register kdx); 1931 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1932 Register yz_idx, Register idx, 1933 Register carry, Register product, int offset); 1934 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1935 Register carry, Register carry2, 1936 Register idx, Register jdx, 1937 Register yz_idx1, Register yz_idx2, 1938 Register tmp, Register tmp3, Register tmp4); 1939 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1940 Register yz_idx, Register idx, Register jdx, 1941 Register carry, Register product, 1942 Register carry2); 1943 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 1944 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1945 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1946 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1947 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1948 Register tmp2); 1949 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1950 Register rdxReg, Register raxReg); 1951 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1952 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1953 Register tmp3, Register tmp4); 1954 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1955 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1956 1957 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1958 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1959 Register raxReg); 1960 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1961 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1962 Register raxReg); 1963 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1964 Register result, Register tmp1, Register tmp2, 1965 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1966 1967 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1968 void update_byte_crc32(Register crc, Register val, Register table); 1969 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1970 1971 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1972 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1973 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1974 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1975 1976 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1977 // Note on a naming convention: 1978 // Prefix w = register only used on a Westmere+ architecture 1979 // Prefix n = register only used on a Nehalem architecture 1980 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1981 Register tmp1, Register tmp2, Register tmp3); 1982 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1983 Register in_out, 1984 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1985 XMMRegister w_xtmp2, 1986 Register tmp1, 1987 Register n_tmp2, Register n_tmp3); 1988 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1989 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1990 Register tmp1, Register tmp2, 1991 Register n_tmp3); 1992 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1993 Register in_out1, Register in_out2, Register in_out3, 1994 Register tmp1, Register tmp2, Register tmp3, 1995 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1996 Register tmp4, Register tmp5, 1997 Register n_tmp6); 1998 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1999 Register tmp1, Register tmp2, Register tmp3, 2000 Register tmp4, Register tmp5, Register tmp6, 2001 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2002 bool is_pclmulqdq_supported); 2003 // Fold 128-bit data chunk 2004 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2005 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2006 // Fold 512-bit data chunk 2007 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2008 // Fold 8-bit data 2009 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2010 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2011 2012 // Compress char[] array to byte[]. 2013 void char_array_compress(Register src, Register dst, Register len, 2014 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2015 XMMRegister tmp4, Register tmp5, Register result, 2016 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2017 2018 // Inflate byte[] array to char[]. 2019 void byte_array_inflate(Register src, Register dst, Register len, 2020 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2021 2022 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2023 Register length, Register temp, int vec_enc); 2024 2025 void fill64_masked(uint shift, Register dst, int disp, 2026 XMMRegister xmm, KRegister mask, Register length, 2027 Register temp, bool use64byteVector = false); 2028 2029 void fill32_masked(uint shift, Register dst, int disp, 2030 XMMRegister xmm, KRegister mask, Register length, 2031 Register temp); 2032 2033 void fill32(Address dst, XMMRegister xmm); 2034 2035 void fill32(Register dst, int disp, XMMRegister xmm); 2036 2037 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2038 2039 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2040 2041 void convert_f2i(Register dst, XMMRegister src); 2042 void convert_d2i(Register dst, XMMRegister src); 2043 void convert_f2l(Register dst, XMMRegister src); 2044 void convert_d2l(Register dst, XMMRegister src); 2045 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2046 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2047 2048 void cache_wb(Address line); 2049 void cache_wbsync(bool is_pre); 2050 2051 #ifdef COMPILER2_OR_JVMCI 2052 void generate_fill_avx3(BasicType type, Register to, Register value, 2053 Register count, Register rtmp, XMMRegister xtmp); 2054 #endif // COMPILER2_OR_JVMCI 2055 2056 void vallones(XMMRegister dst, int vector_len); 2057 2058 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2059 2060 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow); 2061 void lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow); 2062 2063 void save_legacy_gprs(); 2064 void restore_legacy_gprs(); 2065 void setcc(Assembler::Condition comparison, Register dst); 2066 }; 2067 2068 #endif // CPU_X86_MACROASSEMBLER_X86_HPP