1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/signature.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 class ciInlineKlass; 38 39 // MacroAssembler extends Assembler by frequently used macros. 40 // 41 // Instructions for which a 'better' code sequence exists depending 42 // on arguments should also go in here. 43 44 class MacroAssembler: public Assembler { 45 friend class LIR_Assembler; 46 friend class Runtime1; // as_Address() 47 48 public: 49 // Support for VM calls 50 // 51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 52 // may customize this version by overriding it for its purposes (e.g., to save/restore 53 // additional registers when doing a VM call). 54 55 virtual void call_VM_leaf_base( 56 address entry_point, // the entry point 57 int number_of_arguments // the number of arguments to pop after the call 58 ); 59 60 protected: 61 // This is the base routine called by the different versions of call_VM. The interpreter 62 // may customize this version by overriding it for its purposes (e.g., to save/restore 63 // additional registers when doing a VM call). 64 // 65 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 66 // returns the register which contains the thread upon return. If a thread register has been 67 // specified, the return value will correspond to that register. If no last_java_sp is specified 68 // (noreg) than rsp will be used instead. 69 virtual void call_VM_base( // returns the register containing the thread upon return 70 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 71 Register java_thread, // the thread if computed before ; use noreg otherwise 72 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 73 address entry_point, // the entry point 74 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 75 bool check_exceptions // whether to check for pending exceptions after return 76 ); 77 78 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 79 80 // helpers for FPU flag access 81 // tmp is a temporary register, if none is available use noreg 82 void save_rax (Register tmp); 83 void restore_rax(Register tmp); 84 85 public: 86 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 87 88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 89 // The implementation is only non-empty for the InterpreterMacroAssembler, 90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 91 virtual void check_and_handle_popframe(Register java_thread); 92 virtual void check_and_handle_earlyret(Register java_thread); 93 94 Address as_Address(AddressLiteral adr); 95 Address as_Address(ArrayAddress adr, Register rscratch); 96 97 // Support for null-checks 98 // 99 // Generates code that causes a null OS exception if the content of reg is null. 100 // If the accessed location is M[reg + offset] and the offset is known, provide the 101 // offset. No explicit code generation is needed if the offset is within a certain 102 // range (0 <= offset <= page_size). 103 104 void null_check(Register reg, int offset = -1); 105 static bool needs_explicit_null_check(intptr_t offset); 106 static bool uses_implicit_null_check(void* address); 107 108 // markWord tests, kills markWord reg 109 void test_markword_is_inline_type(Register markword, Label& is_inline_type); 110 111 // inlineKlass queries, kills temp_reg 112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type); 113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type); 114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type); 115 116 // Get the default value oop for the given InlineKlass 117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj); 118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields) 119 // get_default_value_oop with extra assertion for empty inline klass 120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj); 121 122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free); 123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free); 124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat); 125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker); 126 127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays 128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label); 129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); 130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array); 131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array); 133 134 // Check array klass layout helper for flat or null-free arrays... 135 void test_flat_array_layout(Register lh, Label& is_flat_array); 136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array); 137 138 // Required platform-specific helpers for Label::patch_instructions. 139 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 140 void pd_patch_instruction(address branch, address target, const char* file, int line) { 141 unsigned char op = branch[0]; 142 assert(op == 0xE8 /* call */ || 143 op == 0xE9 /* jmp */ || 144 op == 0xEB /* short jmp */ || 145 (op & 0xF0) == 0x70 /* short jcc */ || 146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ || 148 (op == 0x8D) /* lea */, 149 "Invalid opcode at patch point"); 150 151 if (op == 0xEB || (op & 0xF0) == 0x70) { 152 // short offset operators (jmp and jcc) 153 char* disp = (char*) &branch[1]; 154 int imm8 = checked_cast<int>(target - (address) &disp[1]); 155 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 156 file == nullptr ? "<null>" : file, line); 157 *disp = (char)imm8; 158 } else { 159 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7 || op == 0x8D) ? 2 : 1]; 160 int imm32 = checked_cast<int>(target - (address) &disp[1]); 161 *disp = imm32; 162 } 163 } 164 165 // The following 4 methods return the offset of the appropriate move instruction 166 167 // Support for fast byte/short loading with zero extension (depending on particular CPU) 168 int load_unsigned_byte(Register dst, Address src); 169 int load_unsigned_short(Register dst, Address src); 170 171 // Support for fast byte/short loading with sign extension (depending on particular CPU) 172 int load_signed_byte(Register dst, Address src); 173 int load_signed_short(Register dst, Address src); 174 175 // Support for sign-extension (hi:lo = extend_sign(lo)) 176 void extend_sign(Register hi, Register lo); 177 178 // Load and store values by size and signed-ness 179 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 180 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 181 182 // Support for inc/dec with optimal instruction selection depending on value 183 184 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 185 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 186 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 187 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 188 189 void decrementl(Address dst, int value = 1); 190 void decrementl(Register reg, int value = 1); 191 192 void decrementq(Register reg, int value = 1); 193 void decrementq(Address dst, int value = 1); 194 195 void incrementl(Address dst, int value = 1); 196 void incrementl(Register reg, int value = 1); 197 198 void incrementq(Register reg, int value = 1); 199 void incrementq(Address dst, int value = 1); 200 201 void incrementl(AddressLiteral dst, Register rscratch = noreg); 202 void incrementl(ArrayAddress dst, Register rscratch); 203 204 void incrementq(AddressLiteral dst, Register rscratch = noreg); 205 206 // Support optimal SSE move instructions. 207 void movflt(XMMRegister dst, XMMRegister src) { 208 if (dst-> encoding() == src->encoding()) return; 209 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 210 else { movss (dst, src); return; } 211 } 212 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 213 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 214 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 215 216 // Move with zero extension 217 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 218 219 void movdbl(XMMRegister dst, XMMRegister src) { 220 if (dst-> encoding() == src->encoding()) return; 221 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 222 else { movsd (dst, src); return; } 223 } 224 225 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 226 227 void movdbl(XMMRegister dst, Address src) { 228 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 229 else { movlpd(dst, src); return; } 230 } 231 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 232 233 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 234 // Use separate tmp XMM register because caller may 235 // requires src XMM register to be unchanged (as in x86.ad). 236 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 237 movdl(dst, tmp); 238 movswl(dst, dst); 239 } 240 241 void flt16_to_flt(XMMRegister dst, Register src) { 242 movdl(dst, src); 243 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 244 } 245 246 // Alignment 247 void align32(); 248 void align64(); 249 void align(uint modulus); 250 void align(uint modulus, uint target); 251 252 void post_call_nop(); 253 // A 5 byte nop that is safe for patching (see patch_verified_entry) 254 void fat_nop(); 255 256 // Stack frame creation/removal 257 void enter(); 258 void leave(); 259 260 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 261 // The pointer will be loaded into the thread register. 262 void get_thread(Register thread); 263 264 #ifdef _LP64 265 // Support for argument shuffling 266 267 // bias in bytes 268 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 269 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 270 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 271 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 272 void move_ptr(VMRegPair src, VMRegPair dst); 273 void object_move(OopMap* map, 274 int oop_handle_offset, 275 int framesize_in_slots, 276 VMRegPair src, 277 VMRegPair dst, 278 bool is_receiver, 279 int* receiver_offset); 280 #endif // _LP64 281 282 // Support for VM calls 283 // 284 // It is imperative that all calls into the VM are handled via the call_VM macros. 285 // They make sure that the stack linkage is setup correctly. call_VM's correspond 286 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 287 288 289 void call_VM(Register oop_result, 290 address entry_point, 291 bool check_exceptions = true); 292 void call_VM(Register oop_result, 293 address entry_point, 294 Register arg_1, 295 bool check_exceptions = true); 296 void call_VM(Register oop_result, 297 address entry_point, 298 Register arg_1, Register arg_2, 299 bool check_exceptions = true); 300 void call_VM(Register oop_result, 301 address entry_point, 302 Register arg_1, Register arg_2, Register arg_3, 303 bool check_exceptions = true); 304 305 // Overloadings with last_Java_sp 306 void call_VM(Register oop_result, 307 Register last_java_sp, 308 address entry_point, 309 int number_of_arguments = 0, 310 bool check_exceptions = true); 311 void call_VM(Register oop_result, 312 Register last_java_sp, 313 address entry_point, 314 Register arg_1, bool 315 check_exceptions = true); 316 void call_VM(Register oop_result, 317 Register last_java_sp, 318 address entry_point, 319 Register arg_1, Register arg_2, 320 bool check_exceptions = true); 321 void call_VM(Register oop_result, 322 Register last_java_sp, 323 address entry_point, 324 Register arg_1, Register arg_2, Register arg_3, 325 bool check_exceptions = true); 326 327 void get_vm_result (Register oop_result, Register thread); 328 void get_vm_result_2(Register metadata_result, Register thread); 329 330 // These always tightly bind to MacroAssembler::call_VM_base 331 // bypassing the virtual implementation 332 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 333 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 334 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 335 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 336 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 337 338 void call_VM_leaf0(address entry_point); 339 void call_VM_leaf(address entry_point, 340 int number_of_arguments = 0); 341 void call_VM_leaf(address entry_point, 342 Register arg_1); 343 void call_VM_leaf(address entry_point, 344 Register arg_1, Register arg_2); 345 void call_VM_leaf(address entry_point, 346 Register arg_1, Register arg_2, Register arg_3); 347 348 void call_VM_leaf(address entry_point, 349 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 350 351 // These always tightly bind to MacroAssembler::call_VM_leaf_base 352 // bypassing the virtual implementation 353 void super_call_VM_leaf(address entry_point); 354 void super_call_VM_leaf(address entry_point, Register arg_1); 355 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 356 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 357 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 358 359 // last Java Frame (fills frame anchor) 360 void set_last_Java_frame(Register thread, 361 Register last_java_sp, 362 Register last_java_fp, 363 address last_java_pc, 364 Register rscratch); 365 366 // thread in the default location (r15_thread on 64bit) 367 void set_last_Java_frame(Register last_java_sp, 368 Register last_java_fp, 369 address last_java_pc, 370 Register rscratch); 371 372 #ifdef _LP64 373 void set_last_Java_frame(Register last_java_sp, 374 Register last_java_fp, 375 Label &last_java_pc, 376 Register scratch); 377 #endif 378 379 void reset_last_Java_frame(Register thread, bool clear_fp); 380 381 // thread in the default location (r15_thread on 64bit) 382 void reset_last_Java_frame(bool clear_fp); 383 384 // jobjects 385 void clear_jobject_tag(Register possibly_non_local); 386 void resolve_jobject(Register value, Register thread, Register tmp); 387 void resolve_global_jobject(Register value, Register thread, Register tmp); 388 389 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 390 void c2bool(Register x); 391 392 // C++ bool manipulation 393 394 void movbool(Register dst, Address src); 395 void movbool(Address dst, bool boolconst); 396 void movbool(Address dst, Register src); 397 void testbool(Register dst); 398 399 void resolve_oop_handle(Register result, Register tmp); 400 void resolve_weak_handle(Register result, Register tmp); 401 void load_mirror(Register mirror, Register method, Register tmp); 402 void load_method_holder_cld(Register rresult, Register rmethod); 403 404 void load_method_holder(Register holder, Register method); 405 406 // oop manipulations 407 408 // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops) 409 void load_metadata(Register dst, Register src); 410 #ifdef _LP64 411 void load_narrow_klass_compact(Register dst, Register src); 412 #endif 413 void load_klass(Register dst, Register src, Register tmp); 414 void store_klass(Register dst, Register src, Register tmp); 415 416 // Compares the Klass pointer of an object to a given Klass (which might be narrow, 417 // depending on UseCompressedClassPointers). 418 void cmp_klass(Register klass, Register obj, Register tmp); 419 420 // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. 421 // Uses tmp1 and tmp2 as temporary registers. 422 void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); 423 424 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 425 Register tmp1, Register thread_tmp); 426 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 427 Register tmp1, Register tmp2, Register tmp3); 428 429 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); 430 431 // inline type data payload offsets... 432 void payload_offset(Register inline_klass, Register offset); 433 void payload_addr(Register oop, Register data, Register inline_klass); 434 // get data payload ptr a flat value array at index, kills rcx and index 435 void data_for_value_array_index(Register array, Register array_klass, 436 Register index, Register data); 437 438 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 439 Register thread_tmp = noreg, DecoratorSet decorators = 0); 440 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 441 Register thread_tmp = noreg, DecoratorSet decorators = 0); 442 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 443 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 444 445 // Used for storing null. All other oop constants should be 446 // stored using routines that take a jobject. 447 void store_heap_oop_null(Address dst); 448 449 void load_prototype_header(Register dst, Register src, Register tmp); 450 451 #ifdef _LP64 452 void store_klass_gap(Register dst, Register src); 453 454 // This dummy is to prevent a call to store_heap_oop from 455 // converting a zero (like null) into a Register by giving 456 // the compiler two choices it can't resolve 457 458 void store_heap_oop(Address dst, void* dummy); 459 460 void encode_heap_oop(Register r); 461 void decode_heap_oop(Register r); 462 void encode_heap_oop_not_null(Register r); 463 void decode_heap_oop_not_null(Register r); 464 void encode_heap_oop_not_null(Register dst, Register src); 465 void decode_heap_oop_not_null(Register dst, Register src); 466 467 void set_narrow_oop(Register dst, jobject obj); 468 void set_narrow_oop(Address dst, jobject obj); 469 void cmp_narrow_oop(Register dst, jobject obj); 470 void cmp_narrow_oop(Address dst, jobject obj); 471 472 void encode_klass_not_null(Register r, Register tmp); 473 void decode_klass_not_null(Register r, Register tmp); 474 void encode_and_move_klass_not_null(Register dst, Register src); 475 void decode_and_move_klass_not_null(Register dst, Register src); 476 void set_narrow_klass(Register dst, Klass* k); 477 void set_narrow_klass(Address dst, Klass* k); 478 void cmp_narrow_klass(Register dst, Klass* k); 479 void cmp_narrow_klass(Address dst, Klass* k); 480 481 // if heap base register is used - reinit it with the correct value 482 void reinit_heapbase(); 483 484 DEBUG_ONLY(void verify_heapbase(const char* msg);) 485 486 #endif // _LP64 487 488 // Int division/remainder for Java 489 // (as idivl, but checks for special case as described in JVM spec.) 490 // returns idivl instruction offset for implicit exception handling 491 int corrected_idivl(Register reg); 492 493 // Long division/remainder for Java 494 // (as idivq, but checks for special case as described in JVM spec.) 495 // returns idivq instruction offset for implicit exception handling 496 int corrected_idivq(Register reg); 497 498 void int3(); 499 500 // Long operation macros for a 32bit cpu 501 // Long negation for Java 502 void lneg(Register hi, Register lo); 503 504 // Long multiplication for Java 505 // (destroys contents of eax, ebx, ecx and edx) 506 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 507 508 // Long shifts for Java 509 // (semantics as described in JVM spec.) 510 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 511 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 512 513 // Long compare for Java 514 // (semantics as described in JVM spec.) 515 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 516 517 518 // misc 519 520 // Sign extension 521 void sign_extend_short(Register reg); 522 void sign_extend_byte(Register reg); 523 524 // Division by power of 2, rounding towards 0 525 void division_with_shift(Register reg, int shift_value); 526 527 #ifndef _LP64 528 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 529 // 530 // CF (corresponds to C0) if x < y 531 // PF (corresponds to C2) if unordered 532 // ZF (corresponds to C3) if x = y 533 // 534 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 535 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 536 void fcmp(Register tmp); 537 // Variant of the above which allows y to be further down the stack 538 // and which only pops x and y if specified. If pop_right is 539 // specified then pop_left must also be specified. 540 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 541 542 // Floating-point comparison for Java 543 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 544 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 545 // (semantics as described in JVM spec.) 546 void fcmp2int(Register dst, bool unordered_is_less); 547 // Variant of the above which allows y to be further down the stack 548 // and which only pops x and y if specified. If pop_right is 549 // specified then pop_left must also be specified. 550 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 551 552 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 553 // tmp is a temporary register, if none is available use noreg 554 void fremr(Register tmp); 555 556 // only if +VerifyFPU 557 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 558 #endif // !LP64 559 560 // dst = c = a * b + c 561 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 562 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 563 564 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 565 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 566 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 567 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 568 569 570 // same as fcmp2int, but using SSE2 571 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 572 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 573 574 // branch to L if FPU flag C2 is set/not set 575 // tmp is a temporary register, if none is available use noreg 576 void jC2 (Register tmp, Label& L); 577 void jnC2(Register tmp, Label& L); 578 579 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 580 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 581 void load_float(Address src); 582 583 // Store float value to 'address'. If UseSSE >= 1, the value is stored 584 // from register xmm0. Otherwise, the value is stored from the FPU stack. 585 void store_float(Address dst); 586 587 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 588 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 589 void load_double(Address src); 590 591 // Store double value to 'address'. If UseSSE >= 2, the value is stored 592 // from register xmm0. Otherwise, the value is stored from the FPU stack. 593 void store_double(Address dst); 594 595 #ifndef _LP64 596 // Pop ST (ffree & fincstp combined) 597 void fpop(); 598 599 void empty_FPU_stack(); 600 #endif // !_LP64 601 602 void push_IU_state(); 603 void pop_IU_state(); 604 605 void push_FPU_state(); 606 void pop_FPU_state(); 607 608 void push_CPU_state(); 609 void pop_CPU_state(); 610 611 void push_cont_fastpath(); 612 void pop_cont_fastpath(); 613 614 void inc_held_monitor_count(); 615 void dec_held_monitor_count(); 616 617 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 618 619 // Round up to a power of two 620 void round_to(Register reg, int modulus); 621 622 private: 623 // General purpose and XMM registers potentially clobbered by native code; there 624 // is no need for FPU or AVX opmask related methods because C1/interpreter 625 // - we save/restore FPU state as a whole always 626 // - do not care about AVX-512 opmask 627 static RegSet call_clobbered_gp_registers(); 628 static XMMRegSet call_clobbered_xmm_registers(); 629 630 void push_set(XMMRegSet set, int offset); 631 void pop_set(XMMRegSet set, int offset); 632 633 public: 634 void push_set(RegSet set, int offset = -1); 635 void pop_set(RegSet set, int offset = -1); 636 637 // Push and pop everything that might be clobbered by a native 638 // runtime call. 639 // Only save the lower 64 bits of each vector register. 640 // Additional registers can be excluded in a passed RegSet. 641 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 642 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 643 644 void push_call_clobbered_registers(bool save_fpu = true) { 645 push_call_clobbered_registers_except(RegSet(), save_fpu); 646 } 647 void pop_call_clobbered_registers(bool restore_fpu = true) { 648 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 649 } 650 651 // allocation 652 653 // Object / value buffer allocation... 654 // Allocate instance of klass, assumes klass initialized by caller 655 // new_obj prefers to be rax 656 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 657 void allocate_instance(Register klass, Register new_obj, 658 Register t1, Register t2, 659 bool clear_fields, Label& alloc_failed); 660 661 void tlab_allocate( 662 Register thread, // Current thread 663 Register obj, // result: pointer to object after successful allocation 664 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 665 int con_size_in_bytes, // object size in bytes if known at compile time 666 Register t1, // temp register 667 Register t2, // temp register 668 Label& slow_case // continuation point if fast allocation fails 669 ); 670 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 671 672 // For field "index" within "klass", return inline_klass ... 673 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass); 674 675 void inline_layout_info(Register klass, Register index, Register layout_info); 676 677 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 678 679 // interface method calling 680 void lookup_interface_method(Register recv_klass, 681 Register intf_klass, 682 RegisterOrConstant itable_index, 683 Register method_result, 684 Register scan_temp, 685 Label& no_such_interface, 686 bool return_method = true); 687 688 void lookup_interface_method_stub(Register recv_klass, 689 Register holder_klass, 690 Register resolved_klass, 691 Register method_result, 692 Register scan_temp, 693 Register temp_reg2, 694 Register receiver, 695 int itable_index, 696 Label& L_no_such_interface); 697 698 // virtual method calling 699 void lookup_virtual_method(Register recv_klass, 700 RegisterOrConstant vtable_index, 701 Register method_result); 702 703 // Test sub_klass against super_klass, with fast and slow paths. 704 705 // The fast path produces a tri-state answer: yes / no / maybe-slow. 706 // One of the three labels can be null, meaning take the fall-through. 707 // If super_check_offset is -1, the value is loaded up from super_klass. 708 // No registers are killed, except temp_reg. 709 void check_klass_subtype_fast_path(Register sub_klass, 710 Register super_klass, 711 Register temp_reg, 712 Label* L_success, 713 Label* L_failure, 714 Label* L_slow_path, 715 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 716 717 // The rest of the type check; must be wired to a corresponding fast path. 718 // It does not repeat the fast path logic, so don't use it standalone. 719 // The temp_reg and temp2_reg can be noreg, if no temps are available. 720 // Updates the sub's secondary super cache as necessary. 721 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 722 void check_klass_subtype_slow_path(Register sub_klass, 723 Register super_klass, 724 Register temp_reg, 725 Register temp2_reg, 726 Label* L_success, 727 Label* L_failure, 728 bool set_cond_codes = false); 729 730 #ifdef _LP64 731 // The 64-bit version, which may do a hashed subclass lookup. 732 void check_klass_subtype_slow_path(Register sub_klass, 733 Register super_klass, 734 Register temp_reg, 735 Register temp2_reg, 736 Register temp3_reg, 737 Register temp4_reg, 738 Label* L_success, 739 Label* L_failure); 740 #endif 741 742 // Three parts of a hashed subclass lookup: a simple linear search, 743 // a table lookup, and a fallback that does linear probing in the 744 // event of a hash collision. 745 void check_klass_subtype_slow_path_linear(Register sub_klass, 746 Register super_klass, 747 Register temp_reg, 748 Register temp2_reg, 749 Label* L_success, 750 Label* L_failure, 751 bool set_cond_codes = false); 752 void check_klass_subtype_slow_path_table(Register sub_klass, 753 Register super_klass, 754 Register temp_reg, 755 Register temp2_reg, 756 Register temp3_reg, 757 Register result_reg, 758 Label* L_success, 759 Label* L_failure); 760 void hashed_check_klass_subtype_slow_path(Register sub_klass, 761 Register super_klass, 762 Register temp_reg, 763 Label* L_success, 764 Label* L_failure); 765 766 // As above, but with a constant super_klass. 767 // The result is in Register result, not the condition codes. 768 void lookup_secondary_supers_table_const(Register sub_klass, 769 Register super_klass, 770 Register temp1, 771 Register temp2, 772 Register temp3, 773 Register temp4, 774 Register result, 775 u1 super_klass_slot); 776 777 #ifdef _LP64 778 using Assembler::salq; 779 void salq(Register dest, Register count); 780 using Assembler::rorq; 781 void rorq(Register dest, Register count); 782 void lookup_secondary_supers_table_var(Register sub_klass, 783 Register super_klass, 784 Register temp1, 785 Register temp2, 786 Register temp3, 787 Register temp4, 788 Register result); 789 790 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 791 Register r_array_base, 792 Register r_array_index, 793 Register r_bitmap, 794 Register temp1, 795 Register temp2, 796 Label* L_success, 797 Label* L_failure = nullptr); 798 799 void verify_secondary_supers_table(Register r_sub_klass, 800 Register r_super_klass, 801 Register expected, 802 Register temp1, 803 Register temp2, 804 Register temp3); 805 #endif 806 807 void repne_scanq(Register addr, Register value, Register count, Register limit, 808 Label* L_success, 809 Label* L_failure = nullptr); 810 811 // If r is valid, return r. 812 // If r is invalid, remove a register r2 from available_regs, add r2 813 // to regs_to_push, then return r2. 814 Register allocate_if_noreg(const Register r, 815 RegSetIterator<Register> &available_regs, 816 RegSet ®s_to_push); 817 818 // Simplified, combined version, good for typical uses. 819 // Falls through on failure. 820 void check_klass_subtype(Register sub_klass, 821 Register super_klass, 822 Register temp_reg, 823 Label& L_success); 824 825 void clinit_barrier(Register klass, 826 Register thread, 827 Label* L_fast_path = nullptr, 828 Label* L_slow_path = nullptr); 829 830 // method handles (JSR 292) 831 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 832 833 // Debugging 834 835 // only if +VerifyOops 836 void _verify_oop(Register reg, const char* s, const char* file, int line); 837 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 838 839 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 840 if (VerifyOops) { 841 _verify_oop(reg, s, file, line); 842 } 843 } 844 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 845 if (VerifyOops) { 846 _verify_oop_addr(reg, s, file, line); 847 } 848 } 849 850 // TODO: verify method and klass metadata (compare against vptr?) 851 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 852 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 853 854 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 855 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 856 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 857 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 858 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 859 860 // Verify or restore cpu control state after JNI call 861 void restore_cpu_control_state_after_jni(Register rscratch); 862 863 // prints msg, dumps registers and stops execution 864 void stop(const char* msg); 865 866 // prints msg and continues 867 void warn(const char* msg); 868 869 // dumps registers and other state 870 void print_state(); 871 872 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 873 static void debug64(char* msg, int64_t pc, int64_t regs[]); 874 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 875 static void print_state64(int64_t pc, int64_t regs[]); 876 877 void os_breakpoint(); 878 879 void untested() { stop("untested"); } 880 881 void unimplemented(const char* what = ""); 882 883 void should_not_reach_here() { stop("should not reach here"); } 884 885 void print_CPU_state(); 886 887 // Stack overflow checking 888 void bang_stack_with_offset(int offset) { 889 // stack grows down, caller passes positive offset 890 assert(offset > 0, "must bang with negative offset"); 891 movl(Address(rsp, (-offset)), rax); 892 } 893 894 // Writes to stack successive pages until offset reached to check for 895 // stack overflow + shadow pages. Also, clobbers tmp 896 void bang_stack_size(Register size, Register tmp); 897 898 // Check for reserved stack access in method being exited (for JIT) 899 void reserved_stack_check(); 900 901 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 902 903 void verify_tlab(); 904 905 static Condition negate_condition(Condition cond); 906 907 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 908 // operands. In general the names are modified to avoid hiding the instruction in Assembler 909 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 910 // here in MacroAssembler. The major exception to this rule is call 911 912 // Arithmetics 913 914 915 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 916 void addptr(Address dst, Register src); 917 918 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 919 void addptr(Register dst, int32_t src); 920 void addptr(Register dst, Register src); 921 void addptr(Register dst, RegisterOrConstant src) { 922 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 923 else addptr(dst, src.as_register()); 924 } 925 926 void andptr(Register dst, int32_t src); 927 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 928 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 929 930 #ifdef _LP64 931 using Assembler::andq; 932 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 933 #endif 934 935 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 936 937 // renamed to drag out the casting of address to int32_t/intptr_t 938 void cmp32(Register src1, int32_t imm); 939 940 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 941 // compare reg - mem, or reg - &mem 942 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 943 944 void cmp32(Register src1, Address src2); 945 946 #ifndef _LP64 947 void cmpklass(Address dst, Metadata* obj); 948 void cmpklass(Register dst, Metadata* obj); 949 void cmpoop(Address dst, jobject obj); 950 #endif // _LP64 951 952 void cmpoop(Register src1, Register src2); 953 void cmpoop(Register src1, Address src2); 954 void cmpoop(Register dst, jobject obj, Register rscratch); 955 956 // NOTE src2 must be the lval. This is NOT an mem-mem compare 957 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 958 959 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 960 961 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 962 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 963 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 964 965 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 966 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 967 968 // cmp64 to avoild hiding cmpq 969 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 970 971 void cmpxchgptr(Register reg, Address adr); 972 973 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 974 975 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 976 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 977 978 979 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 980 981 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 982 983 void shlptr(Register dst, int32_t shift); 984 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 985 986 void shrptr(Register dst, int32_t shift); 987 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 988 989 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 990 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 991 992 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 993 994 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 995 void subptr(Register dst, int32_t src); 996 // Force generation of a 4 byte immediate value even if it fits into 8bit 997 void subptr_imm32(Register dst, int32_t src); 998 void subptr(Register dst, Register src); 999 void subptr(Register dst, RegisterOrConstant src) { 1000 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 1001 else subptr(dst, src.as_register()); 1002 } 1003 1004 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 1005 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 1006 1007 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 1008 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 1009 1010 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 1011 1012 1013 1014 // Helper functions for statistics gathering. 1015 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 1016 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 1017 // Unconditional atomic increment. 1018 void atomic_incl(Address counter_addr); 1019 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 1020 #ifdef _LP64 1021 void atomic_incq(Address counter_addr); 1022 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 1023 #endif 1024 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 1025 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 1026 1027 using Assembler::lea; 1028 void lea(Register dst, AddressLiteral adr); 1029 void lea(Address dst, AddressLiteral adr, Register rscratch); 1030 1031 void leal32(Register dst, Address src) { leal(dst, src); } 1032 1033 // Import other testl() methods from the parent class or else 1034 // they will be hidden by the following overriding declaration. 1035 using Assembler::testl; 1036 void testl(Address dst, int32_t imm32); 1037 void testl(Register dst, int32_t imm32); 1038 void testl(Register dst, AddressLiteral src); // requires reachable address 1039 using Assembler::testq; 1040 void testq(Address dst, int32_t imm32); 1041 void testq(Register dst, int32_t imm32); 1042 1043 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1044 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1045 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 1046 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 1047 1048 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 1049 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 1050 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 1051 void testptr(Register src1, Register src2); 1052 1053 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 1054 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 1055 1056 // Calls 1057 1058 void call(Label& L, relocInfo::relocType rtype); 1059 void call(Register entry); 1060 void call(Address addr) { Assembler::call(addr); } 1061 1062 // NOTE: this call transfers to the effective address of entry NOT 1063 // the address contained by entry. This is because this is more natural 1064 // for jumps/calls. 1065 void call(AddressLiteral entry, Register rscratch = rax); 1066 1067 // Emit the CompiledIC call idiom 1068 void ic_call(address entry, jint method_index = 0); 1069 static int ic_check_size(); 1070 int ic_check(int end_alignment); 1071 1072 void emit_static_call_stub(); 1073 1074 // Jumps 1075 1076 // NOTE: these jumps transfer to the effective address of dst NOT 1077 // the address contained by dst. This is because this is more natural 1078 // for jumps/calls. 1079 void jump(AddressLiteral dst, Register rscratch = noreg); 1080 1081 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 1082 1083 // 32bit can do a case table jump in one instruction but we no longer allow the base 1084 // to be installed in the Address class. This jump will transfer to the address 1085 // contained in the location described by entry (not the address of entry) 1086 void jump(ArrayAddress entry, Register rscratch); 1087 1088 // Adding more natural conditional jump instructions 1089 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 1090 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 1091 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 1092 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 1093 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 1094 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 1095 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 1096 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 1097 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1098 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1099 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 1100 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1101 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1102 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 1103 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1104 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1105 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1106 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1107 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1108 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1109 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1110 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1111 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1112 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1113 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1114 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1115 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1116 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1117 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1118 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1119 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1120 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1121 1122 // Short versions of the above 1123 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 1124 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 1125 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 1126 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 1127 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 1128 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 1129 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 1130 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1131 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1132 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1133 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1134 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1135 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1136 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1137 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1138 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1139 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1140 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1141 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1142 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1143 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1144 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1145 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1146 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1147 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1148 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1149 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1150 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1151 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1152 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1153 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1154 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1155 1156 // Floating 1157 1158 void push_f(XMMRegister r); 1159 void pop_f(XMMRegister r); 1160 void push_d(XMMRegister r); 1161 void pop_d(XMMRegister r); 1162 1163 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1164 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1165 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1166 1167 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1168 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1169 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1170 1171 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1172 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1173 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1174 1175 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1176 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1177 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1178 1179 #ifndef _LP64 1180 void fadd_s(Address src) { Assembler::fadd_s(src); } 1181 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1182 1183 void fldcw(Address src) { Assembler::fldcw(src); } 1184 void fldcw(AddressLiteral src); 1185 1186 void fld_s(int index) { Assembler::fld_s(index); } 1187 void fld_s(Address src) { Assembler::fld_s(src); } 1188 void fld_s(AddressLiteral src); 1189 1190 void fld_d(Address src) { Assembler::fld_d(src); } 1191 void fld_d(AddressLiteral src); 1192 1193 void fld_x(Address src) { Assembler::fld_x(src); } 1194 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1195 1196 void fmul_s(Address src) { Assembler::fmul_s(src); } 1197 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1198 #endif // !_LP64 1199 1200 void cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch = noreg); 1201 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1202 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1203 1204 #ifdef _LP64 1205 private: 1206 void sha256_AVX2_one_round_compute( 1207 Register reg_old_h, 1208 Register reg_a, 1209 Register reg_b, 1210 Register reg_c, 1211 Register reg_d, 1212 Register reg_e, 1213 Register reg_f, 1214 Register reg_g, 1215 Register reg_h, 1216 int iter); 1217 void sha256_AVX2_four_rounds_compute_first(int start); 1218 void sha256_AVX2_four_rounds_compute_last(int start); 1219 void sha256_AVX2_one_round_and_sched( 1220 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1221 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1222 XMMRegister xmm_2, /* ymm6 */ 1223 XMMRegister xmm_3, /* ymm7 */ 1224 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1225 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1226 Register reg_c, /* edi */ 1227 Register reg_d, /* esi */ 1228 Register reg_e, /* r8d */ 1229 Register reg_f, /* r9d */ 1230 Register reg_g, /* r10d */ 1231 Register reg_h, /* r11d */ 1232 int iter); 1233 1234 void addm(int disp, Register r1, Register r2); 1235 1236 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1237 Register e, Register f, Register g, Register h, int iteration); 1238 1239 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1240 Register a, Register b, Register c, Register d, Register e, Register f, 1241 Register g, Register h, int iteration); 1242 1243 void addmq(int disp, Register r1, Register r2); 1244 public: 1245 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1246 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1247 Register buf, Register state, Register ofs, Register limit, Register rsp, 1248 bool multi_block, XMMRegister shuf_mask); 1249 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1250 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1251 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1252 XMMRegister shuf_mask); 1253 void sha512_update_ni_x1(Register arg_hash, Register arg_msg, Register ofs, Register limit, bool multi_block); 1254 #endif // _LP64 1255 1256 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1257 bool multi_block); 1258 1259 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1260 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1261 Register buf, Register state, Register ofs, Register limit, Register rsp, 1262 bool multi_block); 1263 1264 #ifdef _LP64 1265 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1266 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1267 Register buf, Register state, Register ofs, Register limit, Register rsp, 1268 bool multi_block, XMMRegister shuf_mask); 1269 #else 1270 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1271 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1272 Register buf, Register state, Register ofs, Register limit, Register rsp, 1273 bool multi_block); 1274 #endif 1275 1276 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1277 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1278 Register rax, Register rcx, Register rdx, Register tmp); 1279 1280 #ifndef _LP64 1281 private: 1282 // Initialized in macroAssembler_x86_constants.cpp 1283 static address ONES; 1284 static address L_2IL0FLOATPACKET_0; 1285 static address PI4_INV; 1286 static address PI4X3; 1287 static address PI4X4; 1288 1289 public: 1290 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1291 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1292 Register rax, Register rcx, Register rdx, Register tmp1); 1293 1294 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1295 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1296 Register rax, Register rcx, Register rdx, Register tmp); 1297 1298 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1299 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1300 Register rdx, Register tmp); 1301 1302 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1303 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1304 Register rax, Register rbx, Register rdx); 1305 1306 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1307 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1308 Register rax, Register rcx, Register rdx, Register tmp); 1309 1310 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1311 Register edx, Register ebx, Register esi, Register edi, 1312 Register ebp, Register esp); 1313 1314 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1315 Register esi, Register edi, Register ebp, Register esp); 1316 1317 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1318 Register edx, Register ebx, Register esi, Register edi, 1319 Register ebp, Register esp); 1320 1321 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1322 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1323 Register rax, Register rcx, Register rdx, Register tmp); 1324 #endif // !_LP64 1325 1326 private: 1327 1328 // these are private because users should be doing movflt/movdbl 1329 1330 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1331 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1332 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1333 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1334 1335 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1336 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1337 1338 public: 1339 1340 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1341 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1342 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1343 1344 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1345 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1346 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1347 1348 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1349 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1350 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1351 1352 using Assembler::vbroadcasti128; 1353 void vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1354 1355 using Assembler::vbroadcastsd; 1356 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1357 1358 using Assembler::vbroadcastss; 1359 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1360 1361 // Vector float blend 1362 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1363 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1364 1365 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1366 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1367 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1368 1369 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1370 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1371 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1372 1373 // Move Unaligned Double Quadword 1374 void movdqu(Address dst, XMMRegister src); 1375 void movdqu(XMMRegister dst, XMMRegister src); 1376 void movdqu(XMMRegister dst, Address src); 1377 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1378 1379 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1380 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1381 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1382 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1383 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1384 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1385 1386 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1387 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1388 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1389 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1390 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1391 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1392 1393 // Safe move operation, lowers down to 16bit moves for targets supporting 1394 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1395 void kmov(Address dst, KRegister src); 1396 void kmov(KRegister dst, Address src); 1397 void kmov(KRegister dst, KRegister src); 1398 void kmov(Register dst, KRegister src); 1399 void kmov(KRegister dst, Register src); 1400 1401 using Assembler::movddup; 1402 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1403 1404 using Assembler::vmovddup; 1405 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1406 1407 // AVX Unaligned forms 1408 void vmovdqu(Address dst, XMMRegister src); 1409 void vmovdqu(XMMRegister dst, Address src); 1410 void vmovdqu(XMMRegister dst, XMMRegister src); 1411 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1412 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1413 1414 // AVX512 Unaligned 1415 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1416 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1417 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len); 1418 1419 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1420 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1421 1422 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1423 if (dst->encoding() != src->encoding() || mask != k0) { 1424 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1425 } 1426 } 1427 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1428 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1429 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1430 1431 void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1432 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1433 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1434 1435 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1436 if (dst->encoding() != src->encoding() || mask != k0) { 1437 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1438 } 1439 } 1440 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1441 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1442 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1443 1444 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1445 if (dst->encoding() != src->encoding()) { 1446 Assembler::evmovdqul(dst, src, vector_len); 1447 } 1448 } 1449 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1450 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1451 1452 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1453 if (dst->encoding() != src->encoding() || mask != k0) { 1454 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1455 } 1456 } 1457 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1458 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1459 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1460 1461 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1462 if (dst->encoding() != src->encoding()) { 1463 Assembler::evmovdquq(dst, src, vector_len); 1464 } 1465 } 1466 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1467 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1468 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1469 1470 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1471 if (dst->encoding() != src->encoding() || mask != k0) { 1472 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1473 } 1474 } 1475 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1476 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1477 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1478 1479 // Move Aligned Double Quadword 1480 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1481 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1482 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1483 1484 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1485 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1486 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1487 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1488 1489 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1490 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1491 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1492 1493 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1494 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1495 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1496 1497 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1498 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1499 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1500 1501 // Carry-Less Multiplication Quadword 1502 void pclmulldq(XMMRegister dst, XMMRegister src) { 1503 // 0x00 - multiply lower 64 bits [0:63] 1504 Assembler::pclmulqdq(dst, src, 0x00); 1505 } 1506 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1507 // 0x11 - multiply upper 64 bits [64:127] 1508 Assembler::pclmulqdq(dst, src, 0x11); 1509 } 1510 1511 void pcmpeqb(XMMRegister dst, XMMRegister src); 1512 void pcmpeqw(XMMRegister dst, XMMRegister src); 1513 1514 void pcmpestri(XMMRegister dst, Address src, int imm8); 1515 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1516 1517 void pmovzxbw(XMMRegister dst, XMMRegister src); 1518 void pmovzxbw(XMMRegister dst, Address src); 1519 1520 void pmovmskb(Register dst, XMMRegister src); 1521 1522 void ptest(XMMRegister dst, XMMRegister src); 1523 1524 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1525 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1526 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1527 1528 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1529 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1530 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1531 1532 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1533 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1534 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1535 1536 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1537 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1538 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1539 1540 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1541 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1542 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1543 1544 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1545 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1546 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1547 1548 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1549 void xorpd(XMMRegister dst, XMMRegister src); 1550 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1551 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1552 1553 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1554 void xorps(XMMRegister dst, XMMRegister src); 1555 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1556 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1557 1558 // Shuffle Bytes 1559 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1560 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1561 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1562 // AVX 3-operands instructions 1563 1564 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1565 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1566 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1567 1568 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1569 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1570 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1571 1572 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1573 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1574 1575 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1576 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1577 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1578 1579 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1580 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1581 1582 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1583 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1584 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1585 1586 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1587 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1588 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1589 1590 using Assembler::vpbroadcastd; 1591 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1592 1593 using Assembler::vpbroadcastq; 1594 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1595 1596 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1597 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1598 1599 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1600 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1601 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1602 1603 // Vector compares 1604 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1605 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1606 } 1607 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1608 1609 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1610 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1611 } 1612 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1613 1614 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1615 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1616 } 1617 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1618 1619 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1620 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1621 } 1622 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1623 1624 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1625 1626 // Emit comparison instruction for the specified comparison predicate. 1627 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1628 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1629 1630 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1631 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1632 1633 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1634 1635 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1636 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1637 1638 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1639 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1640 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1641 1642 void vpmuldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmuldq(dst, nds, src, vector_len); } 1643 1644 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1645 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1646 1647 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1648 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1649 1650 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1651 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1652 1653 void evpsrad(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1654 void evpsrad(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1655 1656 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1657 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1658 1659 using Assembler::evpsllw; 1660 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1661 if (!is_varshift) { 1662 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1663 } else { 1664 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1665 } 1666 } 1667 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1668 if (!is_varshift) { 1669 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1670 } else { 1671 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1672 } 1673 } 1674 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1675 if (!is_varshift) { 1676 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1677 } else { 1678 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1679 } 1680 } 1681 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1682 if (!is_varshift) { 1683 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1684 } else { 1685 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1686 } 1687 } 1688 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1689 if (!is_varshift) { 1690 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1691 } else { 1692 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1693 } 1694 } 1695 1696 using Assembler::evpsrlq; 1697 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1698 if (!is_varshift) { 1699 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1700 } else { 1701 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1702 } 1703 } 1704 using Assembler::evpsraw; 1705 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1706 if (!is_varshift) { 1707 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1708 } else { 1709 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1710 } 1711 } 1712 using Assembler::evpsrad; 1713 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1714 if (!is_varshift) { 1715 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1716 } else { 1717 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1718 } 1719 } 1720 using Assembler::evpsraq; 1721 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1722 if (!is_varshift) { 1723 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1724 } else { 1725 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1726 } 1727 } 1728 1729 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1730 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1731 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1732 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1733 1734 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1735 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1736 void evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1737 void evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1738 1739 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1740 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1741 1742 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1743 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1744 1745 void vptest(XMMRegister dst, XMMRegister src); 1746 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1747 1748 void punpcklbw(XMMRegister dst, XMMRegister src); 1749 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1750 1751 void pshufd(XMMRegister dst, Address src, int mode); 1752 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1753 1754 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1755 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1756 1757 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1758 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1759 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1760 1761 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1762 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1763 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1764 1765 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1766 1767 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1768 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1769 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1770 1771 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1772 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1773 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1774 1775 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1776 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1777 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1778 1779 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1780 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1781 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1782 1783 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1784 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1785 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1786 1787 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1788 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1789 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1790 1791 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1792 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1793 1794 // AVX Vector instructions 1795 1796 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1797 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1798 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1799 1800 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1801 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1802 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1803 1804 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1805 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1806 Assembler::vpxor(dst, nds, src, vector_len); 1807 else 1808 Assembler::vxorpd(dst, nds, src, vector_len); 1809 } 1810 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1811 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1812 Assembler::vpxor(dst, nds, src, vector_len); 1813 else 1814 Assembler::vxorpd(dst, nds, src, vector_len); 1815 } 1816 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1817 1818 // Simple version for AVX2 256bit vectors 1819 void vpxor(XMMRegister dst, XMMRegister src) { 1820 assert(UseAVX >= 2, "Should be at least AVX2"); 1821 Assembler::vpxor(dst, dst, src, AVX_256bit); 1822 } 1823 void vpxor(XMMRegister dst, Address src) { 1824 assert(UseAVX >= 2, "Should be at least AVX2"); 1825 Assembler::vpxor(dst, dst, src, AVX_256bit); 1826 } 1827 1828 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1829 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1830 1831 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1832 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1833 Assembler::vinserti32x4(dst, nds, src, imm8); 1834 } else if (UseAVX > 1) { 1835 // vinserti128 is available only in AVX2 1836 Assembler::vinserti128(dst, nds, src, imm8); 1837 } else { 1838 Assembler::vinsertf128(dst, nds, src, imm8); 1839 } 1840 } 1841 1842 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1843 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1844 Assembler::vinserti32x4(dst, nds, src, imm8); 1845 } else if (UseAVX > 1) { 1846 // vinserti128 is available only in AVX2 1847 Assembler::vinserti128(dst, nds, src, imm8); 1848 } else { 1849 Assembler::vinsertf128(dst, nds, src, imm8); 1850 } 1851 } 1852 1853 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1854 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1855 Assembler::vextracti32x4(dst, src, imm8); 1856 } else if (UseAVX > 1) { 1857 // vextracti128 is available only in AVX2 1858 Assembler::vextracti128(dst, src, imm8); 1859 } else { 1860 Assembler::vextractf128(dst, src, imm8); 1861 } 1862 } 1863 1864 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1865 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1866 Assembler::vextracti32x4(dst, src, imm8); 1867 } else if (UseAVX > 1) { 1868 // vextracti128 is available only in AVX2 1869 Assembler::vextracti128(dst, src, imm8); 1870 } else { 1871 Assembler::vextractf128(dst, src, imm8); 1872 } 1873 } 1874 1875 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1876 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1877 vinserti128(dst, dst, src, 1); 1878 } 1879 void vinserti128_high(XMMRegister dst, Address src) { 1880 vinserti128(dst, dst, src, 1); 1881 } 1882 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1883 vextracti128(dst, src, 1); 1884 } 1885 void vextracti128_high(Address dst, XMMRegister src) { 1886 vextracti128(dst, src, 1); 1887 } 1888 1889 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1890 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1891 Assembler::vinsertf32x4(dst, dst, src, 1); 1892 } else { 1893 Assembler::vinsertf128(dst, dst, src, 1); 1894 } 1895 } 1896 1897 void vinsertf128_high(XMMRegister dst, Address src) { 1898 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1899 Assembler::vinsertf32x4(dst, dst, src, 1); 1900 } else { 1901 Assembler::vinsertf128(dst, dst, src, 1); 1902 } 1903 } 1904 1905 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1906 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1907 Assembler::vextractf32x4(dst, src, 1); 1908 } else { 1909 Assembler::vextractf128(dst, src, 1); 1910 } 1911 } 1912 1913 void vextractf128_high(Address dst, XMMRegister src) { 1914 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1915 Assembler::vextractf32x4(dst, src, 1); 1916 } else { 1917 Assembler::vextractf128(dst, src, 1); 1918 } 1919 } 1920 1921 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1922 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1923 Assembler::vinserti64x4(dst, dst, src, 1); 1924 } 1925 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1926 Assembler::vinsertf64x4(dst, dst, src, 1); 1927 } 1928 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1929 Assembler::vextracti64x4(dst, src, 1); 1930 } 1931 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1932 Assembler::vextractf64x4(dst, src, 1); 1933 } 1934 void vextractf64x4_high(Address dst, XMMRegister src) { 1935 Assembler::vextractf64x4(dst, src, 1); 1936 } 1937 void vinsertf64x4_high(XMMRegister dst, Address src) { 1938 Assembler::vinsertf64x4(dst, dst, src, 1); 1939 } 1940 1941 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1942 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1943 vinserti128(dst, dst, src, 0); 1944 } 1945 void vinserti128_low(XMMRegister dst, Address src) { 1946 vinserti128(dst, dst, src, 0); 1947 } 1948 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1949 vextracti128(dst, src, 0); 1950 } 1951 void vextracti128_low(Address dst, XMMRegister src) { 1952 vextracti128(dst, src, 0); 1953 } 1954 1955 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1956 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1957 Assembler::vinsertf32x4(dst, dst, src, 0); 1958 } else { 1959 Assembler::vinsertf128(dst, dst, src, 0); 1960 } 1961 } 1962 1963 void vinsertf128_low(XMMRegister dst, Address src) { 1964 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1965 Assembler::vinsertf32x4(dst, dst, src, 0); 1966 } else { 1967 Assembler::vinsertf128(dst, dst, src, 0); 1968 } 1969 } 1970 1971 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1972 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1973 Assembler::vextractf32x4(dst, src, 0); 1974 } else { 1975 Assembler::vextractf128(dst, src, 0); 1976 } 1977 } 1978 1979 void vextractf128_low(Address dst, XMMRegister src) { 1980 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1981 Assembler::vextractf32x4(dst, src, 0); 1982 } else { 1983 Assembler::vextractf128(dst, src, 0); 1984 } 1985 } 1986 1987 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1988 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1989 Assembler::vinserti64x4(dst, dst, src, 0); 1990 } 1991 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1992 Assembler::vinsertf64x4(dst, dst, src, 0); 1993 } 1994 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1995 Assembler::vextracti64x4(dst, src, 0); 1996 } 1997 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1998 Assembler::vextractf64x4(dst, src, 0); 1999 } 2000 void vextractf64x4_low(Address dst, XMMRegister src) { 2001 Assembler::vextractf64x4(dst, src, 0); 2002 } 2003 void vinsertf64x4_low(XMMRegister dst, Address src) { 2004 Assembler::vinsertf64x4(dst, dst, src, 0); 2005 } 2006 2007 // Carry-Less Multiplication Quadword 2008 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 2009 // 0x00 - multiply lower 64 bits [0:63] 2010 Assembler::vpclmulqdq(dst, nds, src, 0x00); 2011 } 2012 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 2013 // 0x11 - multiply upper 64 bits [64:127] 2014 Assembler::vpclmulqdq(dst, nds, src, 0x11); 2015 } 2016 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 2017 // 0x10 - multiply nds[0:63] and src[64:127] 2018 Assembler::vpclmulqdq(dst, nds, src, 0x10); 2019 } 2020 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 2021 //0x01 - multiply nds[64:127] and src[0:63] 2022 Assembler::vpclmulqdq(dst, nds, src, 0x01); 2023 } 2024 2025 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2026 // 0x00 - multiply lower 64 bits [0:63] 2027 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 2028 } 2029 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2030 // 0x11 - multiply upper 64 bits [64:127] 2031 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 2032 } 2033 2034 // AVX-512 mask operations. 2035 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 2036 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 2037 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 2038 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 2039 void kortest(uint masklen, KRegister src1, KRegister src2); 2040 void ktest(uint masklen, KRegister src1, KRegister src2); 2041 2042 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2043 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2044 2045 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2046 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2047 2048 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2049 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2050 2051 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 2052 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 2053 2054 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2055 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2056 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 2057 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 2058 2059 using Assembler::evpandq; 2060 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2061 2062 using Assembler::evpaddq; 2063 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 2064 2065 using Assembler::evporq; 2066 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2067 2068 using Assembler::vpshufb; 2069 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2070 2071 using Assembler::vpor; 2072 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 2073 2074 using Assembler::vpternlogq; 2075 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 2076 2077 void cmov32( Condition cc, Register dst, Address src); 2078 void cmov32( Condition cc, Register dst, Register src); 2079 2080 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 2081 2082 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2083 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2084 2085 void movoop(Register dst, jobject obj); 2086 void movoop(Address dst, jobject obj, Register rscratch); 2087 2088 void mov_metadata(Register dst, Metadata* obj); 2089 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 2090 2091 void movptr(Register dst, Register src); 2092 void movptr(Register dst, Address src); 2093 void movptr(Register dst, AddressLiteral src); 2094 void movptr(Register dst, ArrayAddress src); 2095 void movptr(Register dst, intptr_t src); 2096 void movptr(Address dst, Register src); 2097 void movptr(Address dst, int32_t imm); 2098 void movptr(Address dst, intptr_t src, Register rscratch); 2099 void movptr(ArrayAddress dst, Register src, Register rscratch); 2100 2101 void movptr(Register dst, RegisterOrConstant src) { 2102 if (src.is_constant()) movptr(dst, src.as_constant()); 2103 else movptr(dst, src.as_register()); 2104 } 2105 2106 2107 // to avoid hiding movl 2108 void mov32(Register dst, AddressLiteral src); 2109 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 2110 2111 // Import other mov() methods from the parent class or else 2112 // they will be hidden by the following overriding declaration. 2113 using Assembler::movdl; 2114 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2115 2116 using Assembler::movq; 2117 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2118 2119 // Can push value or effective address 2120 void pushptr(AddressLiteral src, Register rscratch); 2121 2122 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2123 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2124 2125 void pushoop(jobject obj, Register rscratch); 2126 void pushklass(Metadata* obj, Register rscratch); 2127 2128 // sign extend as need a l to ptr sized element 2129 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2130 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2131 2132 2133 public: 2134 // Inline type specific methods 2135 #include "asm/macroAssembler_common.hpp" 2136 2137 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true); 2138 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]); 2139 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 2140 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 2141 RegState reg_state[]); 2142 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 2143 VMRegPair* from, int from_count, int& from_index, VMReg to, 2144 RegState reg_state[], Register val_array); 2145 int extend_stack_for_inline_args(int args_on_stack); 2146 void remove_frame(int initial_framesize, bool needs_stack_repair); 2147 VMReg spill_reg_for(VMReg reg); 2148 2149 // clear memory of size 'cnt' qwords, starting at 'base'; 2150 // if 'is_large' is set, do not try to produce short loop 2151 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg); 2152 2153 // clear memory initialization sequence for constant size; 2154 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2155 2156 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 2157 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2158 2159 // Fill primitive arrays 2160 void generate_fill(BasicType t, bool aligned, 2161 Register to, Register value, Register count, 2162 Register rtmp, XMMRegister xtmp); 2163 2164 void encode_iso_array(Register src, Register dst, Register len, 2165 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2166 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2167 2168 #ifdef _LP64 2169 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2170 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2171 Register y, Register y_idx, Register z, 2172 Register carry, Register product, 2173 Register idx, Register kdx); 2174 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2175 Register yz_idx, Register idx, 2176 Register carry, Register product, int offset); 2177 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2178 Register carry, Register carry2, 2179 Register idx, Register jdx, 2180 Register yz_idx1, Register yz_idx2, 2181 Register tmp, Register tmp3, Register tmp4); 2182 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2183 Register yz_idx, Register idx, Register jdx, 2184 Register carry, Register product, 2185 Register carry2); 2186 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2187 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2188 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2189 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2190 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2191 Register tmp2); 2192 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2193 Register rdxReg, Register raxReg); 2194 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2195 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2196 Register tmp3, Register tmp4); 2197 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2198 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2199 2200 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2201 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2202 Register raxReg); 2203 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2204 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2205 Register raxReg); 2206 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2207 Register result, Register tmp1, Register tmp2, 2208 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2209 #endif 2210 2211 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2212 void update_byte_crc32(Register crc, Register val, Register table); 2213 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2214 2215 2216 #ifdef _LP64 2217 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2218 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2219 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2220 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2221 #endif // _LP64 2222 2223 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2224 // Note on a naming convention: 2225 // Prefix w = register only used on a Westmere+ architecture 2226 // Prefix n = register only used on a Nehalem architecture 2227 #ifdef _LP64 2228 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2229 Register tmp1, Register tmp2, Register tmp3); 2230 #else 2231 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2232 Register tmp1, Register tmp2, Register tmp3, 2233 XMMRegister xtmp1, XMMRegister xtmp2); 2234 #endif 2235 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2236 Register in_out, 2237 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2238 XMMRegister w_xtmp2, 2239 Register tmp1, 2240 Register n_tmp2, Register n_tmp3); 2241 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2242 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2243 Register tmp1, Register tmp2, 2244 Register n_tmp3); 2245 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2246 Register in_out1, Register in_out2, Register in_out3, 2247 Register tmp1, Register tmp2, Register tmp3, 2248 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2249 Register tmp4, Register tmp5, 2250 Register n_tmp6); 2251 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2252 Register tmp1, Register tmp2, Register tmp3, 2253 Register tmp4, Register tmp5, Register tmp6, 2254 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2255 bool is_pclmulqdq_supported); 2256 // Fold 128-bit data chunk 2257 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2258 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2259 #ifdef _LP64 2260 // Fold 512-bit data chunk 2261 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2262 #endif // _LP64 2263 // Fold 8-bit data 2264 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2265 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2266 2267 // Compress char[] array to byte[]. 2268 void char_array_compress(Register src, Register dst, Register len, 2269 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2270 XMMRegister tmp4, Register tmp5, Register result, 2271 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2272 2273 // Inflate byte[] array to char[]. 2274 void byte_array_inflate(Register src, Register dst, Register len, 2275 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2276 2277 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2278 Register length, Register temp, int vec_enc); 2279 2280 void fill64_masked(uint shift, Register dst, int disp, 2281 XMMRegister xmm, KRegister mask, Register length, 2282 Register temp, bool use64byteVector = false); 2283 2284 void fill32_masked(uint shift, Register dst, int disp, 2285 XMMRegister xmm, KRegister mask, Register length, 2286 Register temp); 2287 2288 void fill32(Address dst, XMMRegister xmm); 2289 2290 void fill32(Register dst, int disp, XMMRegister xmm); 2291 2292 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2293 2294 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2295 2296 #ifdef _LP64 2297 void convert_f2i(Register dst, XMMRegister src); 2298 void convert_d2i(Register dst, XMMRegister src); 2299 void convert_f2l(Register dst, XMMRegister src); 2300 void convert_d2l(Register dst, XMMRegister src); 2301 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2302 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2303 2304 void cache_wb(Address line); 2305 void cache_wbsync(bool is_pre); 2306 2307 #ifdef COMPILER2_OR_JVMCI 2308 void generate_fill_avx3(BasicType type, Register to, Register value, 2309 Register count, Register rtmp, XMMRegister xtmp); 2310 #endif // COMPILER2_OR_JVMCI 2311 #endif // _LP64 2312 2313 void vallones(XMMRegister dst, int vector_len); 2314 2315 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2316 2317 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2318 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2319 2320 #ifdef _LP64 2321 void save_legacy_gprs(); 2322 void restore_legacy_gprs(); 2323 void setcc(Assembler::Condition comparison, Register dst); 2324 #endif 2325 }; 2326 2327 #endif // CPU_X86_MACROASSEMBLER_X86_HPP