1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/signature.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 class ciInlineKlass; 38 39 // MacroAssembler extends Assembler by frequently used macros. 40 // 41 // Instructions for which a 'better' code sequence exists depending 42 // on arguments should also go in here. 43 44 class MacroAssembler: public Assembler { 45 friend class LIR_Assembler; 46 friend class Runtime1; // as_Address() 47 48 public: 49 // Support for VM calls 50 // 51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 52 // may customize this version by overriding it for its purposes (e.g., to save/restore 53 // additional registers when doing a VM call). 54 55 virtual void call_VM_leaf_base( 56 address entry_point, // the entry point 57 int number_of_arguments // the number of arguments to pop after the call 58 ); 59 60 protected: 61 // This is the base routine called by the different versions of call_VM. The interpreter 62 // may customize this version by overriding it for its purposes (e.g., to save/restore 63 // additional registers when doing a VM call). 64 // 65 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 66 // returns the register which contains the thread upon return. If a thread register has been 67 // specified, the return value will correspond to that register. If no last_java_sp is specified 68 // (noreg) than rsp will be used instead. 69 virtual void call_VM_base( // returns the register containing the thread upon return 70 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 71 Register java_thread, // the thread if computed before ; use noreg otherwise 72 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 73 address entry_point, // the entry point 74 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 75 bool check_exceptions // whether to check for pending exceptions after return 76 ); 77 78 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 79 80 // helpers for FPU flag access 81 // tmp is a temporary register, if none is available use noreg 82 void save_rax (Register tmp); 83 void restore_rax(Register tmp); 84 85 public: 86 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 87 88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 89 // The implementation is only non-empty for the InterpreterMacroAssembler, 90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 91 virtual void check_and_handle_popframe(Register java_thread); 92 virtual void check_and_handle_earlyret(Register java_thread); 93 94 Address as_Address(AddressLiteral adr); 95 Address as_Address(ArrayAddress adr, Register rscratch); 96 97 // Support for null-checks 98 // 99 // Generates code that causes a null OS exception if the content of reg is null. 100 // If the accessed location is M[reg + offset] and the offset is known, provide the 101 // offset. No explicit code generation is needed if the offset is within a certain 102 // range (0 <= offset <= page_size). 103 104 void null_check(Register reg, int offset = -1); 105 static bool needs_explicit_null_check(intptr_t offset); 106 static bool uses_implicit_null_check(void* address); 107 108 // markWord tests, kills markWord reg 109 void test_markword_is_inline_type(Register markword, Label& is_inline_type); 110 111 // inlineKlass queries, kills temp_reg 112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type); 113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type); 114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type); 115 116 // Get the default value oop for the given InlineKlass 117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj); 118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields) 119 // get_default_value_oop with extra assertion for empty inline klass 120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj); 121 122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free); 123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free); 124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat); 125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker); 126 127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays 128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label); 129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); 130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array); 131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array); 133 134 // Check array klass layout helper for flat or null-free arrays... 135 void test_flat_array_layout(Register lh, Label& is_flat_array); 136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array); 137 138 // Required platform-specific helpers for Label::patch_instructions. 139 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 140 void pd_patch_instruction(address branch, address target, const char* file, int line) { 141 unsigned char op = branch[0]; 142 assert(op == 0xE8 /* call */ || 143 op == 0xE9 /* jmp */ || 144 op == 0xEB /* short jmp */ || 145 (op & 0xF0) == 0x70 /* short jcc */ || 146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 148 "Invalid opcode at patch point"); 149 150 if (op == 0xEB || (op & 0xF0) == 0x70) { 151 // short offset operators (jmp and jcc) 152 char* disp = (char*) &branch[1]; 153 int imm8 = checked_cast<int>(target - (address) &disp[1]); 154 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 155 file == nullptr ? "<null>" : file, line); 156 *disp = (char)imm8; 157 } else { 158 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 159 int imm32 = checked_cast<int>(target - (address) &disp[1]); 160 *disp = imm32; 161 } 162 } 163 164 // The following 4 methods return the offset of the appropriate move instruction 165 166 // Support for fast byte/short loading with zero extension (depending on particular CPU) 167 int load_unsigned_byte(Register dst, Address src); 168 int load_unsigned_short(Register dst, Address src); 169 170 // Support for fast byte/short loading with sign extension (depending on particular CPU) 171 int load_signed_byte(Register dst, Address src); 172 int load_signed_short(Register dst, Address src); 173 174 // Support for sign-extension (hi:lo = extend_sign(lo)) 175 void extend_sign(Register hi, Register lo); 176 177 // Load and store values by size and signed-ness 178 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 179 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 180 181 // Support for inc/dec with optimal instruction selection depending on value 182 183 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 184 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 185 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 186 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 187 188 void decrementl(Address dst, int value = 1); 189 void decrementl(Register reg, int value = 1); 190 191 void decrementq(Register reg, int value = 1); 192 void decrementq(Address dst, int value = 1); 193 194 void incrementl(Address dst, int value = 1); 195 void incrementl(Register reg, int value = 1); 196 197 void incrementq(Register reg, int value = 1); 198 void incrementq(Address dst, int value = 1); 199 200 void incrementl(AddressLiteral dst, Register rscratch = noreg); 201 void incrementl(ArrayAddress dst, Register rscratch); 202 203 void incrementq(AddressLiteral dst, Register rscratch = noreg); 204 205 // Support optimal SSE move instructions. 206 void movflt(XMMRegister dst, XMMRegister src) { 207 if (dst-> encoding() == src->encoding()) return; 208 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 209 else { movss (dst, src); return; } 210 } 211 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 212 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 213 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 214 215 // Move with zero extension 216 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 217 218 void movdbl(XMMRegister dst, XMMRegister src) { 219 if (dst-> encoding() == src->encoding()) return; 220 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 221 else { movsd (dst, src); return; } 222 } 223 224 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 225 226 void movdbl(XMMRegister dst, Address src) { 227 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 228 else { movlpd(dst, src); return; } 229 } 230 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 231 232 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 233 // Use separate tmp XMM register because caller may 234 // requires src XMM register to be unchanged (as in x86.ad). 235 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 236 movdl(dst, tmp); 237 movswl(dst, dst); 238 } 239 240 void flt16_to_flt(XMMRegister dst, Register src) { 241 movdl(dst, src); 242 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 243 } 244 245 // Alignment 246 void align32(); 247 void align64(); 248 void align(uint modulus); 249 void align(uint modulus, uint target); 250 251 void post_call_nop(); 252 // A 5 byte nop that is safe for patching (see patch_verified_entry) 253 void fat_nop(); 254 255 // Stack frame creation/removal 256 void enter(); 257 void leave(); 258 259 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 260 // The pointer will be loaded into the thread register. 261 void get_thread(Register thread); 262 263 #ifdef _LP64 264 // Support for argument shuffling 265 266 // bias in bytes 267 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 268 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 269 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 270 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 271 void move_ptr(VMRegPair src, VMRegPair dst); 272 void object_move(OopMap* map, 273 int oop_handle_offset, 274 int framesize_in_slots, 275 VMRegPair src, 276 VMRegPair dst, 277 bool is_receiver, 278 int* receiver_offset); 279 #endif // _LP64 280 281 // Support for VM calls 282 // 283 // It is imperative that all calls into the VM are handled via the call_VM macros. 284 // They make sure that the stack linkage is setup correctly. call_VM's correspond 285 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 286 287 288 void call_VM(Register oop_result, 289 address entry_point, 290 bool check_exceptions = true); 291 void call_VM(Register oop_result, 292 address entry_point, 293 Register arg_1, 294 bool check_exceptions = true); 295 void call_VM(Register oop_result, 296 address entry_point, 297 Register arg_1, Register arg_2, 298 bool check_exceptions = true); 299 void call_VM(Register oop_result, 300 address entry_point, 301 Register arg_1, Register arg_2, Register arg_3, 302 bool check_exceptions = true); 303 304 // Overloadings with last_Java_sp 305 void call_VM(Register oop_result, 306 Register last_java_sp, 307 address entry_point, 308 int number_of_arguments = 0, 309 bool check_exceptions = true); 310 void call_VM(Register oop_result, 311 Register last_java_sp, 312 address entry_point, 313 Register arg_1, bool 314 check_exceptions = true); 315 void call_VM(Register oop_result, 316 Register last_java_sp, 317 address entry_point, 318 Register arg_1, Register arg_2, 319 bool check_exceptions = true); 320 void call_VM(Register oop_result, 321 Register last_java_sp, 322 address entry_point, 323 Register arg_1, Register arg_2, Register arg_3, 324 bool check_exceptions = true); 325 326 void get_vm_result (Register oop_result, Register thread); 327 void get_vm_result_2(Register metadata_result, Register thread); 328 329 // These always tightly bind to MacroAssembler::call_VM_base 330 // bypassing the virtual implementation 331 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 332 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 333 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 334 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 335 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 336 337 void call_VM_leaf0(address entry_point); 338 void call_VM_leaf(address entry_point, 339 int number_of_arguments = 0); 340 void call_VM_leaf(address entry_point, 341 Register arg_1); 342 void call_VM_leaf(address entry_point, 343 Register arg_1, Register arg_2); 344 void call_VM_leaf(address entry_point, 345 Register arg_1, Register arg_2, Register arg_3); 346 347 void call_VM_leaf(address entry_point, 348 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 349 350 // These always tightly bind to MacroAssembler::call_VM_leaf_base 351 // bypassing the virtual implementation 352 void super_call_VM_leaf(address entry_point); 353 void super_call_VM_leaf(address entry_point, Register arg_1); 354 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 355 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 356 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 357 358 // last Java Frame (fills frame anchor) 359 void set_last_Java_frame(Register thread, 360 Register last_java_sp, 361 Register last_java_fp, 362 address last_java_pc, 363 Register rscratch); 364 365 // thread in the default location (r15_thread on 64bit) 366 void set_last_Java_frame(Register last_java_sp, 367 Register last_java_fp, 368 address last_java_pc, 369 Register rscratch); 370 371 void reset_last_Java_frame(Register thread, bool clear_fp); 372 373 // thread in the default location (r15_thread on 64bit) 374 void reset_last_Java_frame(bool clear_fp); 375 376 // jobjects 377 void clear_jobject_tag(Register possibly_non_local); 378 void resolve_jobject(Register value, Register thread, Register tmp); 379 void resolve_global_jobject(Register value, Register thread, Register tmp); 380 381 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 382 void c2bool(Register x); 383 384 // C++ bool manipulation 385 386 void movbool(Register dst, Address src); 387 void movbool(Address dst, bool boolconst); 388 void movbool(Address dst, Register src); 389 void testbool(Register dst); 390 391 void resolve_oop_handle(Register result, Register tmp); 392 void resolve_weak_handle(Register result, Register tmp); 393 void load_mirror(Register mirror, Register method, Register tmp); 394 void load_method_holder_cld(Register rresult, Register rmethod); 395 396 void load_method_holder(Register holder, Register method); 397 398 // oop manipulations 399 void load_metadata(Register dst, Register src); 400 void load_klass(Register dst, Register src, Register tmp); 401 void store_klass(Register dst, Register src, Register tmp); 402 403 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 404 Register tmp1, Register thread_tmp); 405 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 406 Register tmp1, Register tmp2, Register tmp3); 407 408 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); 409 410 // inline type data payload offsets... 411 void first_field_offset(Register inline_klass, Register offset); 412 void data_for_oop(Register oop, Register data, Register inline_klass); 413 // get data payload ptr a flat value array at index, kills rcx and index 414 void data_for_value_array_index(Register array, Register array_klass, 415 Register index, Register data); 416 417 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 418 Register thread_tmp = noreg, DecoratorSet decorators = 0); 419 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 420 Register thread_tmp = noreg, DecoratorSet decorators = 0); 421 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 422 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 423 424 // Used for storing null. All other oop constants should be 425 // stored using routines that take a jobject. 426 void store_heap_oop_null(Address dst); 427 428 void load_prototype_header(Register dst, Register src, Register tmp); 429 430 #ifdef _LP64 431 void store_klass_gap(Register dst, Register src); 432 433 // This dummy is to prevent a call to store_heap_oop from 434 // converting a zero (like null) into a Register by giving 435 // the compiler two choices it can't resolve 436 437 void store_heap_oop(Address dst, void* dummy); 438 439 void encode_heap_oop(Register r); 440 void decode_heap_oop(Register r); 441 void encode_heap_oop_not_null(Register r); 442 void decode_heap_oop_not_null(Register r); 443 void encode_heap_oop_not_null(Register dst, Register src); 444 void decode_heap_oop_not_null(Register dst, Register src); 445 446 void set_narrow_oop(Register dst, jobject obj); 447 void set_narrow_oop(Address dst, jobject obj); 448 void cmp_narrow_oop(Register dst, jobject obj); 449 void cmp_narrow_oop(Address dst, jobject obj); 450 451 void encode_klass_not_null(Register r, Register tmp); 452 void decode_klass_not_null(Register r, Register tmp); 453 void encode_and_move_klass_not_null(Register dst, Register src); 454 void decode_and_move_klass_not_null(Register dst, Register src); 455 void set_narrow_klass(Register dst, Klass* k); 456 void set_narrow_klass(Address dst, Klass* k); 457 void cmp_narrow_klass(Register dst, Klass* k); 458 void cmp_narrow_klass(Address dst, Klass* k); 459 460 // if heap base register is used - reinit it with the correct value 461 void reinit_heapbase(); 462 463 DEBUG_ONLY(void verify_heapbase(const char* msg);) 464 465 #endif // _LP64 466 467 // Int division/remainder for Java 468 // (as idivl, but checks for special case as described in JVM spec.) 469 // returns idivl instruction offset for implicit exception handling 470 int corrected_idivl(Register reg); 471 472 // Long division/remainder for Java 473 // (as idivq, but checks for special case as described in JVM spec.) 474 // returns idivq instruction offset for implicit exception handling 475 int corrected_idivq(Register reg); 476 477 void int3(); 478 479 // Long operation macros for a 32bit cpu 480 // Long negation for Java 481 void lneg(Register hi, Register lo); 482 483 // Long multiplication for Java 484 // (destroys contents of eax, ebx, ecx and edx) 485 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 486 487 // Long shifts for Java 488 // (semantics as described in JVM spec.) 489 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 490 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 491 492 // Long compare for Java 493 // (semantics as described in JVM spec.) 494 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 495 496 497 // misc 498 499 // Sign extension 500 void sign_extend_short(Register reg); 501 void sign_extend_byte(Register reg); 502 503 // Division by power of 2, rounding towards 0 504 void division_with_shift(Register reg, int shift_value); 505 506 #ifndef _LP64 507 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 508 // 509 // CF (corresponds to C0) if x < y 510 // PF (corresponds to C2) if unordered 511 // ZF (corresponds to C3) if x = y 512 // 513 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 514 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 515 void fcmp(Register tmp); 516 // Variant of the above which allows y to be further down the stack 517 // and which only pops x and y if specified. If pop_right is 518 // specified then pop_left must also be specified. 519 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 520 521 // Floating-point comparison for Java 522 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 523 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 524 // (semantics as described in JVM spec.) 525 void fcmp2int(Register dst, bool unordered_is_less); 526 // Variant of the above which allows y to be further down the stack 527 // and which only pops x and y if specified. If pop_right is 528 // specified then pop_left must also be specified. 529 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 530 531 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 532 // tmp is a temporary register, if none is available use noreg 533 void fremr(Register tmp); 534 535 // only if +VerifyFPU 536 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 537 #endif // !LP64 538 539 // dst = c = a * b + c 540 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 541 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 542 543 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 544 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 545 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 546 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 547 548 549 // same as fcmp2int, but using SSE2 550 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 551 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 552 553 // branch to L if FPU flag C2 is set/not set 554 // tmp is a temporary register, if none is available use noreg 555 void jC2 (Register tmp, Label& L); 556 void jnC2(Register tmp, Label& L); 557 558 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 559 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 560 void load_float(Address src); 561 562 // Store float value to 'address'. If UseSSE >= 1, the value is stored 563 // from register xmm0. Otherwise, the value is stored from the FPU stack. 564 void store_float(Address dst); 565 566 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 567 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 568 void load_double(Address src); 569 570 // Store double value to 'address'. If UseSSE >= 2, the value is stored 571 // from register xmm0. Otherwise, the value is stored from the FPU stack. 572 void store_double(Address dst); 573 574 #ifndef _LP64 575 // Pop ST (ffree & fincstp combined) 576 void fpop(); 577 578 void empty_FPU_stack(); 579 #endif // !_LP64 580 581 void push_IU_state(); 582 void pop_IU_state(); 583 584 void push_FPU_state(); 585 void pop_FPU_state(); 586 587 void push_CPU_state(); 588 void pop_CPU_state(); 589 590 void push_cont_fastpath(); 591 void pop_cont_fastpath(); 592 593 void inc_held_monitor_count(); 594 void dec_held_monitor_count(); 595 596 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 597 598 // Round up to a power of two 599 void round_to(Register reg, int modulus); 600 601 private: 602 // General purpose and XMM registers potentially clobbered by native code; there 603 // is no need for FPU or AVX opmask related methods because C1/interpreter 604 // - we save/restore FPU state as a whole always 605 // - do not care about AVX-512 opmask 606 static RegSet call_clobbered_gp_registers(); 607 static XMMRegSet call_clobbered_xmm_registers(); 608 609 void push_set(XMMRegSet set, int offset); 610 void pop_set(XMMRegSet set, int offset); 611 612 public: 613 void push_set(RegSet set, int offset = -1); 614 void pop_set(RegSet set, int offset = -1); 615 616 // Push and pop everything that might be clobbered by a native 617 // runtime call. 618 // Only save the lower 64 bits of each vector register. 619 // Additional registers can be excluded in a passed RegSet. 620 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 621 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 622 623 void push_call_clobbered_registers(bool save_fpu = true) { 624 push_call_clobbered_registers_except(RegSet(), save_fpu); 625 } 626 void pop_call_clobbered_registers(bool restore_fpu = true) { 627 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 628 } 629 630 // allocation 631 632 // Object / value buffer allocation... 633 // Allocate instance of klass, assumes klass initialized by caller 634 // new_obj prefers to be rax 635 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 636 void allocate_instance(Register klass, Register new_obj, 637 Register t1, Register t2, 638 bool clear_fields, Label& alloc_failed); 639 640 void tlab_allocate( 641 Register thread, // Current thread 642 Register obj, // result: pointer to object after successful allocation 643 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 644 int con_size_in_bytes, // object size in bytes if known at compile time 645 Register t1, // temp register 646 Register t2, // temp register 647 Label& slow_case // continuation point if fast allocation fails 648 ); 649 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 650 651 // For field "index" within "klass", return inline_klass ... 652 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass); 653 654 void inline_layout_info(Register klass, Register index, Register layout_info); 655 656 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 657 658 // interface method calling 659 void lookup_interface_method(Register recv_klass, 660 Register intf_klass, 661 RegisterOrConstant itable_index, 662 Register method_result, 663 Register scan_temp, 664 Label& no_such_interface, 665 bool return_method = true); 666 667 void lookup_interface_method_stub(Register recv_klass, 668 Register holder_klass, 669 Register resolved_klass, 670 Register method_result, 671 Register scan_temp, 672 Register temp_reg2, 673 Register receiver, 674 int itable_index, 675 Label& L_no_such_interface); 676 677 // virtual method calling 678 void lookup_virtual_method(Register recv_klass, 679 RegisterOrConstant vtable_index, 680 Register method_result); 681 682 // Test sub_klass against super_klass, with fast and slow paths. 683 684 // The fast path produces a tri-state answer: yes / no / maybe-slow. 685 // One of the three labels can be null, meaning take the fall-through. 686 // If super_check_offset is -1, the value is loaded up from super_klass. 687 // No registers are killed, except temp_reg. 688 void check_klass_subtype_fast_path(Register sub_klass, 689 Register super_klass, 690 Register temp_reg, 691 Label* L_success, 692 Label* L_failure, 693 Label* L_slow_path, 694 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 695 696 // The rest of the type check; must be wired to a corresponding fast path. 697 // It does not repeat the fast path logic, so don't use it standalone. 698 // The temp_reg and temp2_reg can be noreg, if no temps are available. 699 // Updates the sub's secondary super cache as necessary. 700 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 701 void check_klass_subtype_slow_path(Register sub_klass, 702 Register super_klass, 703 Register temp_reg, 704 Register temp2_reg, 705 Label* L_success, 706 Label* L_failure, 707 bool set_cond_codes = false); 708 709 #ifdef _LP64 710 // The 64-bit version, which may do a hashed subclass lookup. 711 void check_klass_subtype_slow_path(Register sub_klass, 712 Register super_klass, 713 Register temp_reg, 714 Register temp2_reg, 715 Register temp3_reg, 716 Register temp4_reg, 717 Label* L_success, 718 Label* L_failure); 719 #endif 720 721 // Three parts of a hashed subclass lookup: a simple linear search, 722 // a table lookup, and a fallback that does linear probing in the 723 // event of a hash collision. 724 void check_klass_subtype_slow_path_linear(Register sub_klass, 725 Register super_klass, 726 Register temp_reg, 727 Register temp2_reg, 728 Label* L_success, 729 Label* L_failure, 730 bool set_cond_codes = false); 731 void check_klass_subtype_slow_path_table(Register sub_klass, 732 Register super_klass, 733 Register temp_reg, 734 Register temp2_reg, 735 Register temp3_reg, 736 Register result_reg, 737 Label* L_success, 738 Label* L_failure); 739 void hashed_check_klass_subtype_slow_path(Register sub_klass, 740 Register super_klass, 741 Register temp_reg, 742 Label* L_success, 743 Label* L_failure); 744 745 // As above, but with a constant super_klass. 746 // The result is in Register result, not the condition codes. 747 void lookup_secondary_supers_table_const(Register sub_klass, 748 Register super_klass, 749 Register temp1, 750 Register temp2, 751 Register temp3, 752 Register temp4, 753 Register result, 754 u1 super_klass_slot); 755 756 #ifdef _LP64 757 using Assembler::salq; 758 void salq(Register dest, Register count); 759 using Assembler::rorq; 760 void rorq(Register dest, Register count); 761 void lookup_secondary_supers_table_var(Register sub_klass, 762 Register super_klass, 763 Register temp1, 764 Register temp2, 765 Register temp3, 766 Register temp4, 767 Register result); 768 769 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 770 Register r_array_base, 771 Register r_array_index, 772 Register r_bitmap, 773 Register temp1, 774 Register temp2, 775 Label* L_success, 776 Label* L_failure = nullptr); 777 778 void verify_secondary_supers_table(Register r_sub_klass, 779 Register r_super_klass, 780 Register expected, 781 Register temp1, 782 Register temp2, 783 Register temp3); 784 #endif 785 786 void repne_scanq(Register addr, Register value, Register count, Register limit, 787 Label* L_success, 788 Label* L_failure = nullptr); 789 790 // If r is valid, return r. 791 // If r is invalid, remove a register r2 from available_regs, add r2 792 // to regs_to_push, then return r2. 793 Register allocate_if_noreg(const Register r, 794 RegSetIterator<Register> &available_regs, 795 RegSet ®s_to_push); 796 797 // Simplified, combined version, good for typical uses. 798 // Falls through on failure. 799 void check_klass_subtype(Register sub_klass, 800 Register super_klass, 801 Register temp_reg, 802 Label& L_success); 803 804 void clinit_barrier(Register klass, 805 Register thread, 806 Label* L_fast_path = nullptr, 807 Label* L_slow_path = nullptr); 808 809 // method handles (JSR 292) 810 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 811 812 // Debugging 813 814 // only if +VerifyOops 815 void _verify_oop(Register reg, const char* s, const char* file, int line); 816 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 817 818 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 819 if (VerifyOops) { 820 _verify_oop(reg, s, file, line); 821 } 822 } 823 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 824 if (VerifyOops) { 825 _verify_oop_addr(reg, s, file, line); 826 } 827 } 828 829 // TODO: verify method and klass metadata (compare against vptr?) 830 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 831 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 832 833 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 834 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 835 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 836 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 837 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 838 839 // Verify or restore cpu control state after JNI call 840 void restore_cpu_control_state_after_jni(Register rscratch); 841 842 // prints msg, dumps registers and stops execution 843 void stop(const char* msg); 844 845 // prints msg and continues 846 void warn(const char* msg); 847 848 // dumps registers and other state 849 void print_state(); 850 851 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 852 static void debug64(char* msg, int64_t pc, int64_t regs[]); 853 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 854 static void print_state64(int64_t pc, int64_t regs[]); 855 856 void os_breakpoint(); 857 858 void untested() { stop("untested"); } 859 860 void unimplemented(const char* what = ""); 861 862 void should_not_reach_here() { stop("should not reach here"); } 863 864 void print_CPU_state(); 865 866 // Stack overflow checking 867 void bang_stack_with_offset(int offset) { 868 // stack grows down, caller passes positive offset 869 assert(offset > 0, "must bang with negative offset"); 870 movl(Address(rsp, (-offset)), rax); 871 } 872 873 // Writes to stack successive pages until offset reached to check for 874 // stack overflow + shadow pages. Also, clobbers tmp 875 void bang_stack_size(Register size, Register tmp); 876 877 // Check for reserved stack access in method being exited (for JIT) 878 void reserved_stack_check(); 879 880 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 881 882 void verify_tlab(); 883 884 static Condition negate_condition(Condition cond); 885 886 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 887 // operands. In general the names are modified to avoid hiding the instruction in Assembler 888 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 889 // here in MacroAssembler. The major exception to this rule is call 890 891 // Arithmetics 892 893 894 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 895 void addptr(Address dst, Register src); 896 897 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 898 void addptr(Register dst, int32_t src); 899 void addptr(Register dst, Register src); 900 void addptr(Register dst, RegisterOrConstant src) { 901 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 902 else addptr(dst, src.as_register()); 903 } 904 905 void andptr(Register dst, int32_t src); 906 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 907 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 908 909 #ifdef _LP64 910 using Assembler::andq; 911 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 912 #endif 913 914 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 915 916 // renamed to drag out the casting of address to int32_t/intptr_t 917 void cmp32(Register src1, int32_t imm); 918 919 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 920 // compare reg - mem, or reg - &mem 921 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 922 923 void cmp32(Register src1, Address src2); 924 925 #ifndef _LP64 926 void cmpklass(Address dst, Metadata* obj); 927 void cmpklass(Register dst, Metadata* obj); 928 void cmpoop(Address dst, jobject obj); 929 #endif // _LP64 930 931 void cmpoop(Register src1, Register src2); 932 void cmpoop(Register src1, Address src2); 933 void cmpoop(Register dst, jobject obj, Register rscratch); 934 935 // NOTE src2 must be the lval. This is NOT an mem-mem compare 936 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 937 938 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 939 940 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 941 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 942 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 943 944 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 945 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 946 947 // cmp64 to avoild hiding cmpq 948 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 949 950 void cmpxchgptr(Register reg, Address adr); 951 952 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 953 954 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 955 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 956 957 958 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 959 960 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 961 962 void shlptr(Register dst, int32_t shift); 963 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 964 965 void shrptr(Register dst, int32_t shift); 966 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 967 968 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 969 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 970 971 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 972 973 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 974 void subptr(Register dst, int32_t src); 975 // Force generation of a 4 byte immediate value even if it fits into 8bit 976 void subptr_imm32(Register dst, int32_t src); 977 void subptr(Register dst, Register src); 978 void subptr(Register dst, RegisterOrConstant src) { 979 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 980 else subptr(dst, src.as_register()); 981 } 982 983 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 984 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 985 986 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 987 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 988 989 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 990 991 992 993 // Helper functions for statistics gathering. 994 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 995 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 996 // Unconditional atomic increment. 997 void atomic_incl(Address counter_addr); 998 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 999 #ifdef _LP64 1000 void atomic_incq(Address counter_addr); 1001 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 1002 #endif 1003 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 1004 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 1005 1006 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 1007 void lea(Register dst, AddressLiteral adr); 1008 void lea(Address dst, AddressLiteral adr, Register rscratch); 1009 1010 void leal32(Register dst, Address src) { leal(dst, src); } 1011 1012 // Import other testl() methods from the parent class or else 1013 // they will be hidden by the following overriding declaration. 1014 using Assembler::testl; 1015 void testl(Address dst, int32_t imm32); 1016 void testl(Register dst, int32_t imm32); 1017 void testl(Register dst, AddressLiteral src); // requires reachable address 1018 using Assembler::testq; 1019 void testq(Address dst, int32_t imm32); 1020 void testq(Register dst, int32_t imm32); 1021 1022 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1023 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1024 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1025 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 1026 1027 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 1028 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 1029 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 1030 void testptr(Register src1, Register src2); 1031 1032 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 1033 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 1034 1035 // Calls 1036 1037 void call(Label& L, relocInfo::relocType rtype); 1038 void call(Register entry); 1039 void call(Address addr) { Assembler::call(addr); } 1040 1041 // NOTE: this call transfers to the effective address of entry NOT 1042 // the address contained by entry. This is because this is more natural 1043 // for jumps/calls. 1044 void call(AddressLiteral entry, Register rscratch = rax); 1045 1046 // Emit the CompiledIC call idiom 1047 void ic_call(address entry, jint method_index = 0); 1048 static int ic_check_size(); 1049 int ic_check(int end_alignment); 1050 1051 void emit_static_call_stub(); 1052 1053 // Jumps 1054 1055 // NOTE: these jumps transfer to the effective address of dst NOT 1056 // the address contained by dst. This is because this is more natural 1057 // for jumps/calls. 1058 void jump(AddressLiteral dst, Register rscratch = noreg); 1059 1060 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 1061 1062 // 32bit can do a case table jump in one instruction but we no longer allow the base 1063 // to be installed in the Address class. This jump will transfer to the address 1064 // contained in the location described by entry (not the address of entry) 1065 void jump(ArrayAddress entry, Register rscratch); 1066 1067 // Adding more natural conditional jump instructions 1068 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 1069 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 1070 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 1071 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 1072 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 1073 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 1074 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 1075 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 1076 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1077 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1078 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 1079 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1080 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1081 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 1082 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1083 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1084 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1085 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1086 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1087 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1088 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1089 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1090 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1091 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1092 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1093 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1094 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1095 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1096 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1097 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1098 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1099 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1100 1101 // Short versions of the above 1102 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 1103 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 1104 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 1105 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 1106 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 1107 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 1108 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 1109 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1110 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1111 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1112 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1113 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1114 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1115 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1116 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1117 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1118 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1119 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1120 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1121 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1122 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1123 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1124 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1125 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1126 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1127 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1128 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1129 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1130 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1131 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1132 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1133 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1134 1135 // Floating 1136 1137 void push_f(XMMRegister r); 1138 void pop_f(XMMRegister r); 1139 void push_d(XMMRegister r); 1140 void pop_d(XMMRegister r); 1141 1142 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1143 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1144 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1145 1146 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1147 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1148 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1149 1150 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1151 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1152 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1153 1154 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1155 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1156 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1157 1158 #ifndef _LP64 1159 void fadd_s(Address src) { Assembler::fadd_s(src); } 1160 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1161 1162 void fldcw(Address src) { Assembler::fldcw(src); } 1163 void fldcw(AddressLiteral src); 1164 1165 void fld_s(int index) { Assembler::fld_s(index); } 1166 void fld_s(Address src) { Assembler::fld_s(src); } 1167 void fld_s(AddressLiteral src); 1168 1169 void fld_d(Address src) { Assembler::fld_d(src); } 1170 void fld_d(AddressLiteral src); 1171 1172 void fld_x(Address src) { Assembler::fld_x(src); } 1173 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1174 1175 void fmul_s(Address src) { Assembler::fmul_s(src); } 1176 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1177 #endif // !_LP64 1178 1179 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1180 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1181 1182 #ifdef _LP64 1183 private: 1184 void sha256_AVX2_one_round_compute( 1185 Register reg_old_h, 1186 Register reg_a, 1187 Register reg_b, 1188 Register reg_c, 1189 Register reg_d, 1190 Register reg_e, 1191 Register reg_f, 1192 Register reg_g, 1193 Register reg_h, 1194 int iter); 1195 void sha256_AVX2_four_rounds_compute_first(int start); 1196 void sha256_AVX2_four_rounds_compute_last(int start); 1197 void sha256_AVX2_one_round_and_sched( 1198 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1199 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1200 XMMRegister xmm_2, /* ymm6 */ 1201 XMMRegister xmm_3, /* ymm7 */ 1202 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1203 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1204 Register reg_c, /* edi */ 1205 Register reg_d, /* esi */ 1206 Register reg_e, /* r8d */ 1207 Register reg_f, /* r9d */ 1208 Register reg_g, /* r10d */ 1209 Register reg_h, /* r11d */ 1210 int iter); 1211 1212 void addm(int disp, Register r1, Register r2); 1213 1214 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1215 Register e, Register f, Register g, Register h, int iteration); 1216 1217 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1218 Register a, Register b, Register c, Register d, Register e, Register f, 1219 Register g, Register h, int iteration); 1220 1221 void addmq(int disp, Register r1, Register r2); 1222 public: 1223 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1224 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1225 Register buf, Register state, Register ofs, Register limit, Register rsp, 1226 bool multi_block, XMMRegister shuf_mask); 1227 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1228 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1229 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1230 XMMRegister shuf_mask); 1231 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block); 1232 #endif // _LP64 1233 1234 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1235 bool multi_block); 1236 1237 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1238 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1239 Register buf, Register state, Register ofs, Register limit, Register rsp, 1240 bool multi_block); 1241 1242 #ifdef _LP64 1243 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1244 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1245 Register buf, Register state, Register ofs, Register limit, Register rsp, 1246 bool multi_block, XMMRegister shuf_mask); 1247 #else 1248 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1249 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1250 Register buf, Register state, Register ofs, Register limit, Register rsp, 1251 bool multi_block); 1252 #endif 1253 1254 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1255 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1256 Register rax, Register rcx, Register rdx, Register tmp); 1257 1258 #ifndef _LP64 1259 private: 1260 // Initialized in macroAssembler_x86_constants.cpp 1261 static address ONES; 1262 static address L_2IL0FLOATPACKET_0; 1263 static address PI4_INV; 1264 static address PI4X3; 1265 static address PI4X4; 1266 1267 public: 1268 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1269 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1270 Register rax, Register rcx, Register rdx, Register tmp1); 1271 1272 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1273 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1274 Register rax, Register rcx, Register rdx, Register tmp); 1275 1276 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1277 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1278 Register rdx, Register tmp); 1279 1280 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1281 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1282 Register rax, Register rbx, Register rdx); 1283 1284 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1285 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1286 Register rax, Register rcx, Register rdx, Register tmp); 1287 1288 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1289 Register edx, Register ebx, Register esi, Register edi, 1290 Register ebp, Register esp); 1291 1292 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1293 Register esi, Register edi, Register ebp, Register esp); 1294 1295 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1296 Register edx, Register ebx, Register esi, Register edi, 1297 Register ebp, Register esp); 1298 1299 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1300 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1301 Register rax, Register rcx, Register rdx, Register tmp); 1302 #endif // !_LP64 1303 1304 private: 1305 1306 // these are private because users should be doing movflt/movdbl 1307 1308 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1309 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1310 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1311 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1312 1313 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1314 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1315 1316 public: 1317 1318 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1319 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1320 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1321 1322 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1323 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1324 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1325 1326 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1327 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1328 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1329 1330 using Assembler::vbroadcasti128; 1331 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1332 1333 using Assembler::vbroadcastsd; 1334 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1335 1336 using Assembler::vbroadcastss; 1337 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1338 1339 // Vector float blend 1340 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1341 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1342 1343 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1344 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1345 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1346 1347 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1348 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1349 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1350 1351 // Move Unaligned Double Quadword 1352 void movdqu(Address dst, XMMRegister src); 1353 void movdqu(XMMRegister dst, XMMRegister src); 1354 void movdqu(XMMRegister dst, Address src); 1355 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1356 1357 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1358 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1359 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1360 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1361 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1362 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1363 1364 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1365 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1366 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1367 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1368 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1369 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1370 1371 // Safe move operation, lowers down to 16bit moves for targets supporting 1372 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1373 void kmov(Address dst, KRegister src); 1374 void kmov(KRegister dst, Address src); 1375 void kmov(KRegister dst, KRegister src); 1376 void kmov(Register dst, KRegister src); 1377 void kmov(KRegister dst, Register src); 1378 1379 using Assembler::movddup; 1380 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1381 1382 using Assembler::vmovddup; 1383 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1384 1385 // AVX Unaligned forms 1386 void vmovdqu(Address dst, XMMRegister src); 1387 void vmovdqu(XMMRegister dst, Address src); 1388 void vmovdqu(XMMRegister dst, XMMRegister src); 1389 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1390 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1391 1392 // AVX512 Unaligned 1393 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1394 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1395 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1396 1397 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1398 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1399 1400 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1401 if (dst->encoding() != src->encoding() || mask != k0) { 1402 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1403 } 1404 } 1405 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1406 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1407 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1408 1409 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1410 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1411 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1412 1413 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1414 if (dst->encoding() != src->encoding() || mask != k0) { 1415 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1416 } 1417 } 1418 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1419 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1420 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1421 1422 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1423 if (dst->encoding() != src->encoding()) { 1424 Assembler::evmovdqul(dst, src, vector_len); 1425 } 1426 } 1427 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1428 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1429 1430 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1431 if (dst->encoding() != src->encoding() || mask != k0) { 1432 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1433 } 1434 } 1435 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1436 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1437 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1438 1439 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1440 if (dst->encoding() != src->encoding()) { 1441 Assembler::evmovdquq(dst, src, vector_len); 1442 } 1443 } 1444 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1445 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1446 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1447 1448 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1449 if (dst->encoding() != src->encoding() || mask != k0) { 1450 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1451 } 1452 } 1453 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1454 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1455 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1456 1457 // Move Aligned Double Quadword 1458 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1459 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1460 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1461 1462 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1463 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1464 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1465 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1466 1467 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1468 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1469 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1470 1471 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1472 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1473 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1474 1475 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1476 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1477 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1478 1479 // Carry-Less Multiplication Quadword 1480 void pclmulldq(XMMRegister dst, XMMRegister src) { 1481 // 0x00 - multiply lower 64 bits [0:63] 1482 Assembler::pclmulqdq(dst, src, 0x00); 1483 } 1484 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1485 // 0x11 - multiply upper 64 bits [64:127] 1486 Assembler::pclmulqdq(dst, src, 0x11); 1487 } 1488 1489 void pcmpeqb(XMMRegister dst, XMMRegister src); 1490 void pcmpeqw(XMMRegister dst, XMMRegister src); 1491 1492 void pcmpestri(XMMRegister dst, Address src, int imm8); 1493 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1494 1495 void pmovzxbw(XMMRegister dst, XMMRegister src); 1496 void pmovzxbw(XMMRegister dst, Address src); 1497 1498 void pmovmskb(Register dst, XMMRegister src); 1499 1500 void ptest(XMMRegister dst, XMMRegister src); 1501 1502 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1503 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1504 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1505 1506 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1507 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1508 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1509 1510 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1511 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1512 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1513 1514 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1515 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1516 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1517 1518 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1519 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1520 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1521 1522 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1523 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1524 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1525 1526 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1527 void xorpd(XMMRegister dst, XMMRegister src); 1528 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1529 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1530 1531 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1532 void xorps(XMMRegister dst, XMMRegister src); 1533 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1534 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1535 1536 // Shuffle Bytes 1537 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1538 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1539 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1540 // AVX 3-operands instructions 1541 1542 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1543 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1544 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1545 1546 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1547 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1548 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1549 1550 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1551 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1552 1553 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1554 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1555 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1556 1557 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1558 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1559 1560 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1561 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1562 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1563 1564 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1565 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1566 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1567 1568 using Assembler::vpbroadcastd; 1569 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1570 1571 using Assembler::vpbroadcastq; 1572 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1573 1574 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1575 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1576 1577 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1578 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1579 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1580 1581 // Vector compares 1582 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1583 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1584 } 1585 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1586 1587 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1588 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1589 } 1590 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1591 1592 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1593 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1594 } 1595 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1596 1597 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1598 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1599 } 1600 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1601 1602 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1603 1604 // Emit comparison instruction for the specified comparison predicate. 1605 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1606 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1607 1608 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1609 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1610 1611 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1612 1613 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1614 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1615 1616 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1617 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1618 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1619 1620 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); } 1621 1622 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1623 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1624 1625 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1626 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1627 1628 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1629 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1630 1631 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1632 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1633 1634 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1635 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1636 1637 using Assembler::evpsllw; 1638 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1639 if (!is_varshift) { 1640 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1641 } else { 1642 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1643 } 1644 } 1645 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1646 if (!is_varshift) { 1647 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1648 } else { 1649 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1650 } 1651 } 1652 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1653 if (!is_varshift) { 1654 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1655 } else { 1656 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1657 } 1658 } 1659 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1660 if (!is_varshift) { 1661 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1662 } else { 1663 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1664 } 1665 } 1666 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1667 if (!is_varshift) { 1668 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1669 } else { 1670 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1671 } 1672 } 1673 1674 using Assembler::evpsrlq; 1675 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1676 if (!is_varshift) { 1677 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1678 } else { 1679 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1680 } 1681 } 1682 using Assembler::evpsraw; 1683 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1684 if (!is_varshift) { 1685 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1686 } else { 1687 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1688 } 1689 } 1690 using Assembler::evpsrad; 1691 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1692 if (!is_varshift) { 1693 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1694 } else { 1695 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1696 } 1697 } 1698 using Assembler::evpsraq; 1699 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1700 if (!is_varshift) { 1701 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1702 } else { 1703 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1704 } 1705 } 1706 1707 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1708 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1709 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1710 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1711 1712 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1713 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1714 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1715 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1716 1717 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1718 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1719 1720 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1721 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1722 1723 void vptest(XMMRegister dst, XMMRegister src); 1724 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1725 1726 void punpcklbw(XMMRegister dst, XMMRegister src); 1727 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1728 1729 void pshufd(XMMRegister dst, Address src, int mode); 1730 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1731 1732 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1733 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1734 1735 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1736 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1737 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1738 1739 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1740 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1741 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1742 1743 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1744 1745 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1746 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1747 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1748 1749 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1750 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1751 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1752 1753 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1754 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1755 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1756 1757 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1758 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1759 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1760 1761 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1762 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1763 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1764 1765 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1766 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1767 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1768 1769 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1770 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1771 1772 // AVX Vector instructions 1773 1774 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1775 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1776 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1777 1778 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1779 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1780 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1781 1782 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1783 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1784 Assembler::vpxor(dst, nds, src, vector_len); 1785 else 1786 Assembler::vxorpd(dst, nds, src, vector_len); 1787 } 1788 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1789 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1790 Assembler::vpxor(dst, nds, src, vector_len); 1791 else 1792 Assembler::vxorpd(dst, nds, src, vector_len); 1793 } 1794 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1795 1796 // Simple version for AVX2 256bit vectors 1797 void vpxor(XMMRegister dst, XMMRegister src) { 1798 assert(UseAVX >= 2, "Should be at least AVX2"); 1799 Assembler::vpxor(dst, dst, src, AVX_256bit); 1800 } 1801 void vpxor(XMMRegister dst, Address src) { 1802 assert(UseAVX >= 2, "Should be at least AVX2"); 1803 Assembler::vpxor(dst, dst, src, AVX_256bit); 1804 } 1805 1806 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1807 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1808 1809 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1810 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1811 Assembler::vinserti32x4(dst, nds, src, imm8); 1812 } else if (UseAVX > 1) { 1813 // vinserti128 is available only in AVX2 1814 Assembler::vinserti128(dst, nds, src, imm8); 1815 } else { 1816 Assembler::vinsertf128(dst, nds, src, imm8); 1817 } 1818 } 1819 1820 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1821 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1822 Assembler::vinserti32x4(dst, nds, src, imm8); 1823 } else if (UseAVX > 1) { 1824 // vinserti128 is available only in AVX2 1825 Assembler::vinserti128(dst, nds, src, imm8); 1826 } else { 1827 Assembler::vinsertf128(dst, nds, src, imm8); 1828 } 1829 } 1830 1831 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1832 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1833 Assembler::vextracti32x4(dst, src, imm8); 1834 } else if (UseAVX > 1) { 1835 // vextracti128 is available only in AVX2 1836 Assembler::vextracti128(dst, src, imm8); 1837 } else { 1838 Assembler::vextractf128(dst, src, imm8); 1839 } 1840 } 1841 1842 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1843 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1844 Assembler::vextracti32x4(dst, src, imm8); 1845 } else if (UseAVX > 1) { 1846 // vextracti128 is available only in AVX2 1847 Assembler::vextracti128(dst, src, imm8); 1848 } else { 1849 Assembler::vextractf128(dst, src, imm8); 1850 } 1851 } 1852 1853 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1854 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1855 vinserti128(dst, dst, src, 1); 1856 } 1857 void vinserti128_high(XMMRegister dst, Address src) { 1858 vinserti128(dst, dst, src, 1); 1859 } 1860 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1861 vextracti128(dst, src, 1); 1862 } 1863 void vextracti128_high(Address dst, XMMRegister src) { 1864 vextracti128(dst, src, 1); 1865 } 1866 1867 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1868 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1869 Assembler::vinsertf32x4(dst, dst, src, 1); 1870 } else { 1871 Assembler::vinsertf128(dst, dst, src, 1); 1872 } 1873 } 1874 1875 void vinsertf128_high(XMMRegister dst, Address src) { 1876 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1877 Assembler::vinsertf32x4(dst, dst, src, 1); 1878 } else { 1879 Assembler::vinsertf128(dst, dst, src, 1); 1880 } 1881 } 1882 1883 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1884 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1885 Assembler::vextractf32x4(dst, src, 1); 1886 } else { 1887 Assembler::vextractf128(dst, src, 1); 1888 } 1889 } 1890 1891 void vextractf128_high(Address dst, XMMRegister src) { 1892 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1893 Assembler::vextractf32x4(dst, src, 1); 1894 } else { 1895 Assembler::vextractf128(dst, src, 1); 1896 } 1897 } 1898 1899 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1900 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1901 Assembler::vinserti64x4(dst, dst, src, 1); 1902 } 1903 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1904 Assembler::vinsertf64x4(dst, dst, src, 1); 1905 } 1906 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1907 Assembler::vextracti64x4(dst, src, 1); 1908 } 1909 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1910 Assembler::vextractf64x4(dst, src, 1); 1911 } 1912 void vextractf64x4_high(Address dst, XMMRegister src) { 1913 Assembler::vextractf64x4(dst, src, 1); 1914 } 1915 void vinsertf64x4_high(XMMRegister dst, Address src) { 1916 Assembler::vinsertf64x4(dst, dst, src, 1); 1917 } 1918 1919 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1920 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1921 vinserti128(dst, dst, src, 0); 1922 } 1923 void vinserti128_low(XMMRegister dst, Address src) { 1924 vinserti128(dst, dst, src, 0); 1925 } 1926 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1927 vextracti128(dst, src, 0); 1928 } 1929 void vextracti128_low(Address dst, XMMRegister src) { 1930 vextracti128(dst, src, 0); 1931 } 1932 1933 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1934 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1935 Assembler::vinsertf32x4(dst, dst, src, 0); 1936 } else { 1937 Assembler::vinsertf128(dst, dst, src, 0); 1938 } 1939 } 1940 1941 void vinsertf128_low(XMMRegister dst, Address src) { 1942 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1943 Assembler::vinsertf32x4(dst, dst, src, 0); 1944 } else { 1945 Assembler::vinsertf128(dst, dst, src, 0); 1946 } 1947 } 1948 1949 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1950 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1951 Assembler::vextractf32x4(dst, src, 0); 1952 } else { 1953 Assembler::vextractf128(dst, src, 0); 1954 } 1955 } 1956 1957 void vextractf128_low(Address dst, XMMRegister src) { 1958 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1959 Assembler::vextractf32x4(dst, src, 0); 1960 } else { 1961 Assembler::vextractf128(dst, src, 0); 1962 } 1963 } 1964 1965 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1966 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1967 Assembler::vinserti64x4(dst, dst, src, 0); 1968 } 1969 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1970 Assembler::vinsertf64x4(dst, dst, src, 0); 1971 } 1972 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1973 Assembler::vextracti64x4(dst, src, 0); 1974 } 1975 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1976 Assembler::vextractf64x4(dst, src, 0); 1977 } 1978 void vextractf64x4_low(Address dst, XMMRegister src) { 1979 Assembler::vextractf64x4(dst, src, 0); 1980 } 1981 void vinsertf64x4_low(XMMRegister dst, Address src) { 1982 Assembler::vinsertf64x4(dst, dst, src, 0); 1983 } 1984 1985 // Carry-Less Multiplication Quadword 1986 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1987 // 0x00 - multiply lower 64 bits [0:63] 1988 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1989 } 1990 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1991 // 0x11 - multiply upper 64 bits [64:127] 1992 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1993 } 1994 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1995 // 0x10 - multiply nds[0:63] and src[64:127] 1996 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1997 } 1998 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1999 //0x01 - multiply nds[64:127] and src[0:63] 2000 Assembler::vpclmulqdq(dst, nds, src, 0x01); 2001 } 2002 2003 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2004 // 0x00 - multiply lower 64 bits [0:63] 2005 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 2006 } 2007 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2008 // 0x11 - multiply upper 64 bits [64:127] 2009 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 2010 } 2011 2012 // AVX-512 mask operations. 2013 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 2014 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 2015 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 2016 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 2017 void kortest(uint masklen, KRegister src1, KRegister src2); 2018 void ktest(uint masklen, KRegister src1, KRegister src2); 2019 2020 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2021 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2022 2023 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2024 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2025 2026 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2027 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2028 2029 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2030 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2031 2032 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2033 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2034 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2035 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2036 2037 using Assembler::evpandq; 2038 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2039 2040 using Assembler::evpaddq; 2041 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 2042 2043 using Assembler::evporq; 2044 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2045 2046 using Assembler::vpshufb; 2047 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2048 2049 using Assembler::vpor; 2050 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2051 2052 using Assembler::vpternlogq; 2053 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 2054 2055 void cmov32( Condition cc, Register dst, Address src); 2056 void cmov32( Condition cc, Register dst, Register src); 2057 2058 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 2059 2060 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2061 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2062 2063 void movoop(Register dst, jobject obj); 2064 void movoop(Address dst, jobject obj, Register rscratch); 2065 2066 void mov_metadata(Register dst, Metadata* obj); 2067 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 2068 2069 void movptr(Register dst, Register src); 2070 void movptr(Register dst, Address src); 2071 void movptr(Register dst, AddressLiteral src); 2072 void movptr(Register dst, ArrayAddress src); 2073 void movptr(Register dst, intptr_t src); 2074 void movptr(Address dst, Register src); 2075 void movptr(Address dst, int32_t imm); 2076 void movptr(Address dst, intptr_t src, Register rscratch); 2077 void movptr(ArrayAddress dst, Register src, Register rscratch); 2078 2079 void movptr(Register dst, RegisterOrConstant src) { 2080 if (src.is_constant()) movptr(dst, src.as_constant()); 2081 else movptr(dst, src.as_register()); 2082 } 2083 2084 2085 // to avoid hiding movl 2086 void mov32(Register dst, AddressLiteral src); 2087 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 2088 2089 // Import other mov() methods from the parent class or else 2090 // they will be hidden by the following overriding declaration. 2091 using Assembler::movdl; 2092 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2093 2094 using Assembler::movq; 2095 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2096 2097 // Can push value or effective address 2098 void pushptr(AddressLiteral src, Register rscratch); 2099 2100 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2101 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2102 2103 void pushoop(jobject obj, Register rscratch); 2104 void pushklass(Metadata* obj, Register rscratch); 2105 2106 // sign extend as need a l to ptr sized element 2107 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2108 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2109 2110 2111 public: 2112 // Inline type specific methods 2113 #include "asm/macroAssembler_common.hpp" 2114 2115 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true); 2116 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]); 2117 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 2118 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 2119 RegState reg_state[]); 2120 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 2121 VMRegPair* from, int from_count, int& from_index, VMReg to, 2122 RegState reg_state[], Register val_array); 2123 int extend_stack_for_inline_args(int args_on_stack); 2124 void remove_frame(int initial_framesize, bool needs_stack_repair); 2125 VMReg spill_reg_for(VMReg reg); 2126 2127 // clear memory of size 'cnt' qwords, starting at 'base'; 2128 // if 'is_large' is set, do not try to produce short loop 2129 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg); 2130 2131 // clear memory initialization sequence for constant size; 2132 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2133 2134 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 2135 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2136 2137 // Fill primitive arrays 2138 void generate_fill(BasicType t, bool aligned, 2139 Register to, Register value, Register count, 2140 Register rtmp, XMMRegister xtmp); 2141 2142 void encode_iso_array(Register src, Register dst, Register len, 2143 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2144 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2145 2146 #ifdef _LP64 2147 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2148 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2149 Register y, Register y_idx, Register z, 2150 Register carry, Register product, 2151 Register idx, Register kdx); 2152 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2153 Register yz_idx, Register idx, 2154 Register carry, Register product, int offset); 2155 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2156 Register carry, Register carry2, 2157 Register idx, Register jdx, 2158 Register yz_idx1, Register yz_idx2, 2159 Register tmp, Register tmp3, Register tmp4); 2160 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2161 Register yz_idx, Register idx, Register jdx, 2162 Register carry, Register product, 2163 Register carry2); 2164 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2165 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2166 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2167 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2168 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2169 Register tmp2); 2170 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2171 Register rdxReg, Register raxReg); 2172 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2173 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2174 Register tmp3, Register tmp4); 2175 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2176 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2177 2178 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2179 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2180 Register raxReg); 2181 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2182 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2183 Register raxReg); 2184 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2185 Register result, Register tmp1, Register tmp2, 2186 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2187 #endif 2188 2189 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2190 void update_byte_crc32(Register crc, Register val, Register table); 2191 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2192 2193 2194 #ifdef _LP64 2195 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2196 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2197 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2198 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2199 #endif // _LP64 2200 2201 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2202 // Note on a naming convention: 2203 // Prefix w = register only used on a Westmere+ architecture 2204 // Prefix n = register only used on a Nehalem architecture 2205 #ifdef _LP64 2206 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2207 Register tmp1, Register tmp2, Register tmp3); 2208 #else 2209 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2210 Register tmp1, Register tmp2, Register tmp3, 2211 XMMRegister xtmp1, XMMRegister xtmp2); 2212 #endif 2213 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2214 Register in_out, 2215 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2216 XMMRegister w_xtmp2, 2217 Register tmp1, 2218 Register n_tmp2, Register n_tmp3); 2219 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2220 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2221 Register tmp1, Register tmp2, 2222 Register n_tmp3); 2223 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2224 Register in_out1, Register in_out2, Register in_out3, 2225 Register tmp1, Register tmp2, Register tmp3, 2226 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2227 Register tmp4, Register tmp5, 2228 Register n_tmp6); 2229 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2230 Register tmp1, Register tmp2, Register tmp3, 2231 Register tmp4, Register tmp5, Register tmp6, 2232 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2233 bool is_pclmulqdq_supported); 2234 // Fold 128-bit data chunk 2235 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2236 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2237 #ifdef _LP64 2238 // Fold 512-bit data chunk 2239 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2240 #endif // _LP64 2241 // Fold 8-bit data 2242 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2243 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2244 2245 // Compress char[] array to byte[]. 2246 void char_array_compress(Register src, Register dst, Register len, 2247 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2248 XMMRegister tmp4, Register tmp5, Register result, 2249 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2250 2251 // Inflate byte[] array to char[]. 2252 void byte_array_inflate(Register src, Register dst, Register len, 2253 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2254 2255 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2256 Register length, Register temp, int vec_enc); 2257 2258 void fill64_masked(uint shift, Register dst, int disp, 2259 XMMRegister xmm, KRegister mask, Register length, 2260 Register temp, bool use64byteVector = false); 2261 2262 void fill32_masked(uint shift, Register dst, int disp, 2263 XMMRegister xmm, KRegister mask, Register length, 2264 Register temp); 2265 2266 void fill32(Address dst, XMMRegister xmm); 2267 2268 void fill32(Register dst, int disp, XMMRegister xmm); 2269 2270 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2271 2272 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2273 2274 #ifdef _LP64 2275 void convert_f2i(Register dst, XMMRegister src); 2276 void convert_d2i(Register dst, XMMRegister src); 2277 void convert_f2l(Register dst, XMMRegister src); 2278 void convert_d2l(Register dst, XMMRegister src); 2279 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2280 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2281 2282 void cache_wb(Address line); 2283 void cache_wbsync(bool is_pre); 2284 2285 #ifdef COMPILER2_OR_JVMCI 2286 void generate_fill_avx3(BasicType type, Register to, Register value, 2287 Register count, Register rtmp, XMMRegister xtmp); 2288 #endif // COMPILER2_OR_JVMCI 2289 #endif // _LP64 2290 2291 void vallones(XMMRegister dst, int vector_len); 2292 2293 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2294 2295 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2296 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2297 2298 #ifdef _LP64 2299 void save_legacy_gprs(); 2300 void restore_legacy_gprs(); 2301 void setcc(Assembler::Condition comparison, Register dst); 2302 #endif 2303 }; 2304 2305 #endif // CPU_X86_MACROASSEMBLER_X86_HPP