1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/signature.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 class ciInlineKlass; 38 39 // MacroAssembler extends Assembler by frequently used macros. 40 // 41 // Instructions for which a 'better' code sequence exists depending 42 // on arguments should also go in here. 43 44 class MacroAssembler: public Assembler { 45 friend class LIR_Assembler; 46 friend class Runtime1; // as_Address() 47 48 public: 49 // Support for VM calls 50 // 51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 52 // may customize this version by overriding it for its purposes (e.g., to save/restore 53 // additional registers when doing a VM call). 54 55 virtual void call_VM_leaf_base( 56 address entry_point, // the entry point 57 int number_of_arguments // the number of arguments to pop after the call 58 ); 59 60 protected: 61 // This is the base routine called by the different versions of call_VM. The interpreter 62 // may customize this version by overriding it for its purposes (e.g., to save/restore 63 // additional registers when doing a VM call). 64 // 65 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 66 // returns the register which contains the thread upon return. If a thread register has been 67 // specified, the return value will correspond to that register. If no last_java_sp is specified 68 // (noreg) than rsp will be used instead. 69 virtual void call_VM_base( // returns the register containing the thread upon return 70 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 71 Register java_thread, // the thread if computed before ; use noreg otherwise 72 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 73 address entry_point, // the entry point 74 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 75 bool check_exceptions // whether to check for pending exceptions after return 76 ); 77 78 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 79 80 // helpers for FPU flag access 81 // tmp is a temporary register, if none is available use noreg 82 void save_rax (Register tmp); 83 void restore_rax(Register tmp); 84 85 public: 86 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 87 88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 89 // The implementation is only non-empty for the InterpreterMacroAssembler, 90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 91 virtual void check_and_handle_popframe(Register java_thread); 92 virtual void check_and_handle_earlyret(Register java_thread); 93 94 Address as_Address(AddressLiteral adr); 95 Address as_Address(ArrayAddress adr, Register rscratch); 96 97 // Support for null-checks 98 // 99 // Generates code that causes a null OS exception if the content of reg is null. 100 // If the accessed location is M[reg + offset] and the offset is known, provide the 101 // offset. No explicit code generation is needed if the offset is within a certain 102 // range (0 <= offset <= page_size). 103 104 void null_check(Register reg, int offset = -1); 105 static bool needs_explicit_null_check(intptr_t offset); 106 static bool uses_implicit_null_check(void* address); 107 108 // markWord tests, kills markWord reg 109 void test_markword_is_inline_type(Register markword, Label& is_inline_type); 110 111 // inlineKlass queries, kills temp_reg 112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type); 113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type); 114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type); 115 116 // Get the default value oop for the given InlineKlass 117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj); 118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields) 119 // get_default_value_oop with extra assertion for empty inline klass 120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj); 121 122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free); 123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free); 124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat); 125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker); 126 127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays 128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label); 129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); 130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array); 131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array); 133 134 // Check array klass layout helper for flat or null-free arrays... 135 void test_flat_array_layout(Register lh, Label& is_flat_array); 136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array); 137 138 // Required platform-specific helpers for Label::patch_instructions. 139 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 140 void pd_patch_instruction(address branch, address target, const char* file, int line) { 141 unsigned char op = branch[0]; 142 assert(op == 0xE8 /* call */ || 143 op == 0xE9 /* jmp */ || 144 op == 0xEB /* short jmp */ || 145 (op & 0xF0) == 0x70 /* short jcc */ || 146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 148 "Invalid opcode at patch point"); 149 150 if (op == 0xEB || (op & 0xF0) == 0x70) { 151 // short offset operators (jmp and jcc) 152 char* disp = (char*) &branch[1]; 153 int imm8 = checked_cast<int>(target - (address) &disp[1]); 154 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 155 file == nullptr ? "<null>" : file, line); 156 *disp = (char)imm8; 157 } else { 158 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 159 int imm32 = checked_cast<int>(target - (address) &disp[1]); 160 *disp = imm32; 161 } 162 } 163 164 // The following 4 methods return the offset of the appropriate move instruction 165 166 // Support for fast byte/short loading with zero extension (depending on particular CPU) 167 int load_unsigned_byte(Register dst, Address src); 168 int load_unsigned_short(Register dst, Address src); 169 170 // Support for fast byte/short loading with sign extension (depending on particular CPU) 171 int load_signed_byte(Register dst, Address src); 172 int load_signed_short(Register dst, Address src); 173 174 // Support for sign-extension (hi:lo = extend_sign(lo)) 175 void extend_sign(Register hi, Register lo); 176 177 // Load and store values by size and signed-ness 178 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 179 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 180 181 // Support for inc/dec with optimal instruction selection depending on value 182 183 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 184 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 185 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 186 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 187 188 void decrementl(Address dst, int value = 1); 189 void decrementl(Register reg, int value = 1); 190 191 void decrementq(Register reg, int value = 1); 192 void decrementq(Address dst, int value = 1); 193 194 void incrementl(Address dst, int value = 1); 195 void incrementl(Register reg, int value = 1); 196 197 void incrementq(Register reg, int value = 1); 198 void incrementq(Address dst, int value = 1); 199 200 void incrementl(AddressLiteral dst, Register rscratch = noreg); 201 void incrementl(ArrayAddress dst, Register rscratch); 202 203 void incrementq(AddressLiteral dst, Register rscratch = noreg); 204 205 // Support optimal SSE move instructions. 206 void movflt(XMMRegister dst, XMMRegister src) { 207 if (dst-> encoding() == src->encoding()) return; 208 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 209 else { movss (dst, src); return; } 210 } 211 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 212 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 213 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 214 215 // Move with zero extension 216 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 217 218 void movdbl(XMMRegister dst, XMMRegister src) { 219 if (dst-> encoding() == src->encoding()) return; 220 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 221 else { movsd (dst, src); return; } 222 } 223 224 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 225 226 void movdbl(XMMRegister dst, Address src) { 227 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 228 else { movlpd(dst, src); return; } 229 } 230 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 231 232 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 233 // Use separate tmp XMM register because caller may 234 // requires src XMM register to be unchanged (as in x86.ad). 235 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 236 movdl(dst, tmp); 237 movswl(dst, dst); 238 } 239 240 void flt16_to_flt(XMMRegister dst, Register src) { 241 movdl(dst, src); 242 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 243 } 244 245 // Alignment 246 void align32(); 247 void align64(); 248 void align(uint modulus); 249 void align(uint modulus, uint target); 250 251 void post_call_nop(); 252 // A 5 byte nop that is safe for patching (see patch_verified_entry) 253 void fat_nop(); 254 255 // Stack frame creation/removal 256 void enter(); 257 void leave(); 258 259 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 260 // The pointer will be loaded into the thread register. 261 void get_thread(Register thread); 262 263 #ifdef _LP64 264 // Support for argument shuffling 265 266 // bias in bytes 267 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 268 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 269 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 270 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 271 void move_ptr(VMRegPair src, VMRegPair dst); 272 void object_move(OopMap* map, 273 int oop_handle_offset, 274 int framesize_in_slots, 275 VMRegPair src, 276 VMRegPair dst, 277 bool is_receiver, 278 int* receiver_offset); 279 #endif // _LP64 280 281 // Support for VM calls 282 // 283 // It is imperative that all calls into the VM are handled via the call_VM macros. 284 // They make sure that the stack linkage is setup correctly. call_VM's correspond 285 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 286 287 288 void call_VM(Register oop_result, 289 address entry_point, 290 bool check_exceptions = true); 291 void call_VM(Register oop_result, 292 address entry_point, 293 Register arg_1, 294 bool check_exceptions = true); 295 void call_VM(Register oop_result, 296 address entry_point, 297 Register arg_1, Register arg_2, 298 bool check_exceptions = true); 299 void call_VM(Register oop_result, 300 address entry_point, 301 Register arg_1, Register arg_2, Register arg_3, 302 bool check_exceptions = true); 303 304 // Overloadings with last_Java_sp 305 void call_VM(Register oop_result, 306 Register last_java_sp, 307 address entry_point, 308 int number_of_arguments = 0, 309 bool check_exceptions = true); 310 void call_VM(Register oop_result, 311 Register last_java_sp, 312 address entry_point, 313 Register arg_1, bool 314 check_exceptions = true); 315 void call_VM(Register oop_result, 316 Register last_java_sp, 317 address entry_point, 318 Register arg_1, Register arg_2, 319 bool check_exceptions = true); 320 void call_VM(Register oop_result, 321 Register last_java_sp, 322 address entry_point, 323 Register arg_1, Register arg_2, Register arg_3, 324 bool check_exceptions = true); 325 326 void get_vm_result (Register oop_result, Register thread); 327 void get_vm_result_2(Register metadata_result, Register thread); 328 329 // These always tightly bind to MacroAssembler::call_VM_base 330 // bypassing the virtual implementation 331 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 332 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 333 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 334 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 335 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 336 337 void call_VM_leaf0(address entry_point); 338 void call_VM_leaf(address entry_point, 339 int number_of_arguments = 0); 340 void call_VM_leaf(address entry_point, 341 Register arg_1); 342 void call_VM_leaf(address entry_point, 343 Register arg_1, Register arg_2); 344 void call_VM_leaf(address entry_point, 345 Register arg_1, Register arg_2, Register arg_3); 346 347 void call_VM_leaf(address entry_point, 348 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 349 350 // These always tightly bind to MacroAssembler::call_VM_leaf_base 351 // bypassing the virtual implementation 352 void super_call_VM_leaf(address entry_point); 353 void super_call_VM_leaf(address entry_point, Register arg_1); 354 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 355 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 356 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 357 358 // last Java Frame (fills frame anchor) 359 void set_last_Java_frame(Register thread, 360 Register last_java_sp, 361 Register last_java_fp, 362 address last_java_pc, 363 Register rscratch); 364 365 // thread in the default location (r15_thread on 64bit) 366 void set_last_Java_frame(Register last_java_sp, 367 Register last_java_fp, 368 address last_java_pc, 369 Register rscratch); 370 371 void reset_last_Java_frame(Register thread, bool clear_fp); 372 373 // thread in the default location (r15_thread on 64bit) 374 void reset_last_Java_frame(bool clear_fp); 375 376 // jobjects 377 void clear_jobject_tag(Register possibly_non_local); 378 void resolve_jobject(Register value, Register thread, Register tmp); 379 void resolve_global_jobject(Register value, Register thread, Register tmp); 380 381 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 382 void c2bool(Register x); 383 384 // C++ bool manipulation 385 386 void movbool(Register dst, Address src); 387 void movbool(Address dst, bool boolconst); 388 void movbool(Address dst, Register src); 389 void testbool(Register dst); 390 391 void resolve_oop_handle(Register result, Register tmp); 392 void resolve_weak_handle(Register result, Register tmp); 393 void load_mirror(Register mirror, Register method, Register tmp); 394 void load_method_holder_cld(Register rresult, Register rmethod); 395 396 void load_method_holder(Register holder, Register method); 397 398 // oop manipulations 399 void load_metadata(Register dst, Register src); 400 void load_klass(Register dst, Register src, Register tmp); 401 void store_klass(Register dst, Register src, Register tmp); 402 403 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 404 Register tmp1, Register thread_tmp); 405 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 406 Register tmp1, Register tmp2, Register tmp3); 407 408 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass); 409 void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info); 410 // We probably need the following for arrays: TODO FIXME 411 // void flat_element_copy(DecoratorSet decorators, Register src, Register dst, Register array); 412 413 // inline type data payload offsets... 414 void first_field_offset(Register inline_klass, Register offset); 415 void data_for_oop(Register oop, Register data, Register inline_klass); 416 // get data payload ptr a flat value array at index, kills rcx and index 417 void data_for_value_array_index(Register array, Register array_klass, 418 Register index, Register data); 419 420 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 421 Register thread_tmp = noreg, DecoratorSet decorators = 0); 422 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 423 Register thread_tmp = noreg, DecoratorSet decorators = 0); 424 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 425 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 426 427 // Used for storing null. All other oop constants should be 428 // stored using routines that take a jobject. 429 void store_heap_oop_null(Address dst); 430 431 void load_prototype_header(Register dst, Register src, Register tmp); 432 433 #ifdef _LP64 434 void store_klass_gap(Register dst, Register src); 435 436 // This dummy is to prevent a call to store_heap_oop from 437 // converting a zero (like null) into a Register by giving 438 // the compiler two choices it can't resolve 439 440 void store_heap_oop(Address dst, void* dummy); 441 442 void encode_heap_oop(Register r); 443 void decode_heap_oop(Register r); 444 void encode_heap_oop_not_null(Register r); 445 void decode_heap_oop_not_null(Register r); 446 void encode_heap_oop_not_null(Register dst, Register src); 447 void decode_heap_oop_not_null(Register dst, Register src); 448 449 void set_narrow_oop(Register dst, jobject obj); 450 void set_narrow_oop(Address dst, jobject obj); 451 void cmp_narrow_oop(Register dst, jobject obj); 452 void cmp_narrow_oop(Address dst, jobject obj); 453 454 void encode_klass_not_null(Register r, Register tmp); 455 void decode_klass_not_null(Register r, Register tmp); 456 void encode_and_move_klass_not_null(Register dst, Register src); 457 void decode_and_move_klass_not_null(Register dst, Register src); 458 void set_narrow_klass(Register dst, Klass* k); 459 void set_narrow_klass(Address dst, Klass* k); 460 void cmp_narrow_klass(Register dst, Klass* k); 461 void cmp_narrow_klass(Address dst, Klass* k); 462 463 // if heap base register is used - reinit it with the correct value 464 void reinit_heapbase(); 465 466 DEBUG_ONLY(void verify_heapbase(const char* msg);) 467 468 #endif // _LP64 469 470 // Int division/remainder for Java 471 // (as idivl, but checks for special case as described in JVM spec.) 472 // returns idivl instruction offset for implicit exception handling 473 int corrected_idivl(Register reg); 474 475 // Long division/remainder for Java 476 // (as idivq, but checks for special case as described in JVM spec.) 477 // returns idivq instruction offset for implicit exception handling 478 int corrected_idivq(Register reg); 479 480 void int3(); 481 482 // Long operation macros for a 32bit cpu 483 // Long negation for Java 484 void lneg(Register hi, Register lo); 485 486 // Long multiplication for Java 487 // (destroys contents of eax, ebx, ecx and edx) 488 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 489 490 // Long shifts for Java 491 // (semantics as described in JVM spec.) 492 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 493 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 494 495 // Long compare for Java 496 // (semantics as described in JVM spec.) 497 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 498 499 500 // misc 501 502 // Sign extension 503 void sign_extend_short(Register reg); 504 void sign_extend_byte(Register reg); 505 506 // Division by power of 2, rounding towards 0 507 void division_with_shift(Register reg, int shift_value); 508 509 #ifndef _LP64 510 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 511 // 512 // CF (corresponds to C0) if x < y 513 // PF (corresponds to C2) if unordered 514 // ZF (corresponds to C3) if x = y 515 // 516 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 517 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 518 void fcmp(Register tmp); 519 // Variant of the above which allows y to be further down the stack 520 // and which only pops x and y if specified. If pop_right is 521 // specified then pop_left must also be specified. 522 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 523 524 // Floating-point comparison for Java 525 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 526 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 527 // (semantics as described in JVM spec.) 528 void fcmp2int(Register dst, bool unordered_is_less); 529 // Variant of the above which allows y to be further down the stack 530 // and which only pops x and y if specified. If pop_right is 531 // specified then pop_left must also be specified. 532 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 533 534 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 535 // tmp is a temporary register, if none is available use noreg 536 void fremr(Register tmp); 537 538 // only if +VerifyFPU 539 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 540 #endif // !LP64 541 542 // dst = c = a * b + c 543 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 544 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 545 546 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 547 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 548 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 549 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 550 551 552 // same as fcmp2int, but using SSE2 553 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 554 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 555 556 // branch to L if FPU flag C2 is set/not set 557 // tmp is a temporary register, if none is available use noreg 558 void jC2 (Register tmp, Label& L); 559 void jnC2(Register tmp, Label& L); 560 561 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 562 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 563 void load_float(Address src); 564 565 // Store float value to 'address'. If UseSSE >= 1, the value is stored 566 // from register xmm0. Otherwise, the value is stored from the FPU stack. 567 void store_float(Address dst); 568 569 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 570 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 571 void load_double(Address src); 572 573 // Store double value to 'address'. If UseSSE >= 2, the value is stored 574 // from register xmm0. Otherwise, the value is stored from the FPU stack. 575 void store_double(Address dst); 576 577 #ifndef _LP64 578 // Pop ST (ffree & fincstp combined) 579 void fpop(); 580 581 void empty_FPU_stack(); 582 #endif // !_LP64 583 584 void push_IU_state(); 585 void pop_IU_state(); 586 587 void push_FPU_state(); 588 void pop_FPU_state(); 589 590 void push_CPU_state(); 591 void pop_CPU_state(); 592 593 void push_cont_fastpath(); 594 void pop_cont_fastpath(); 595 596 void inc_held_monitor_count(); 597 void dec_held_monitor_count(); 598 599 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 600 601 // Round up to a power of two 602 void round_to(Register reg, int modulus); 603 604 private: 605 // General purpose and XMM registers potentially clobbered by native code; there 606 // is no need for FPU or AVX opmask related methods because C1/interpreter 607 // - we save/restore FPU state as a whole always 608 // - do not care about AVX-512 opmask 609 static RegSet call_clobbered_gp_registers(); 610 static XMMRegSet call_clobbered_xmm_registers(); 611 612 void push_set(XMMRegSet set, int offset); 613 void pop_set(XMMRegSet set, int offset); 614 615 public: 616 void push_set(RegSet set, int offset = -1); 617 void pop_set(RegSet set, int offset = -1); 618 619 // Push and pop everything that might be clobbered by a native 620 // runtime call. 621 // Only save the lower 64 bits of each vector register. 622 // Additional registers can be excluded in a passed RegSet. 623 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 624 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 625 626 void push_call_clobbered_registers(bool save_fpu = true) { 627 push_call_clobbered_registers_except(RegSet(), save_fpu); 628 } 629 void pop_call_clobbered_registers(bool restore_fpu = true) { 630 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 631 } 632 633 // allocation 634 635 // Object / value buffer allocation... 636 // Allocate instance of klass, assumes klass initialized by caller 637 // new_obj prefers to be rax 638 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 639 void allocate_instance(Register klass, Register new_obj, 640 Register t1, Register t2, 641 bool clear_fields, Label& alloc_failed); 642 643 void tlab_allocate( 644 Register thread, // Current thread 645 Register obj, // result: pointer to object after successful allocation 646 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 647 int con_size_in_bytes, // object size in bytes if known at compile time 648 Register t1, // temp register 649 Register t2, // temp register 650 Label& slow_case // continuation point if fast allocation fails 651 ); 652 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 653 654 // For field "index" within "klass", return inline_klass ... 655 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass); 656 657 void inline_layout_info(Register klass, Register index, Register layout_info); 658 659 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 660 661 // interface method calling 662 void lookup_interface_method(Register recv_klass, 663 Register intf_klass, 664 RegisterOrConstant itable_index, 665 Register method_result, 666 Register scan_temp, 667 Label& no_such_interface, 668 bool return_method = true); 669 670 void lookup_interface_method_stub(Register recv_klass, 671 Register holder_klass, 672 Register resolved_klass, 673 Register method_result, 674 Register scan_temp, 675 Register temp_reg2, 676 Register receiver, 677 int itable_index, 678 Label& L_no_such_interface); 679 680 // virtual method calling 681 void lookup_virtual_method(Register recv_klass, 682 RegisterOrConstant vtable_index, 683 Register method_result); 684 685 // Test sub_klass against super_klass, with fast and slow paths. 686 687 // The fast path produces a tri-state answer: yes / no / maybe-slow. 688 // One of the three labels can be null, meaning take the fall-through. 689 // If super_check_offset is -1, the value is loaded up from super_klass. 690 // No registers are killed, except temp_reg. 691 void check_klass_subtype_fast_path(Register sub_klass, 692 Register super_klass, 693 Register temp_reg, 694 Label* L_success, 695 Label* L_failure, 696 Label* L_slow_path, 697 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 698 699 // The rest of the type check; must be wired to a corresponding fast path. 700 // It does not repeat the fast path logic, so don't use it standalone. 701 // The temp_reg and temp2_reg can be noreg, if no temps are available. 702 // Updates the sub's secondary super cache as necessary. 703 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 704 void check_klass_subtype_slow_path(Register sub_klass, 705 Register super_klass, 706 Register temp_reg, 707 Register temp2_reg, 708 Label* L_success, 709 Label* L_failure, 710 bool set_cond_codes = false); 711 void hashed_check_klass_subtype_slow_path(Register sub_klass, 712 Register super_klass, 713 Register temp_reg, 714 Register temp2_reg, 715 Label* L_success, 716 Label* L_failure, 717 bool set_cond_codes = false); 718 719 // As above, but with a constant super_klass. 720 // The result is in Register result, not the condition codes. 721 void lookup_secondary_supers_table(Register sub_klass, 722 Register super_klass, 723 Register temp1, 724 Register temp2, 725 Register temp3, 726 Register temp4, 727 Register result, 728 u1 super_klass_slot); 729 730 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 731 Register r_array_base, 732 Register r_array_index, 733 Register r_bitmap, 734 Register temp1, 735 Register temp2, 736 Label* L_success, 737 Label* L_failure = nullptr); 738 739 void verify_secondary_supers_table(Register r_sub_klass, 740 Register r_super_klass, 741 Register expected, 742 Register temp1, 743 Register temp2, 744 Register temp3); 745 746 void repne_scanq(Register addr, Register value, Register count, Register limit, 747 Label* L_success, 748 Label* L_failure = nullptr); 749 750 // Simplified, combined version, good for typical uses. 751 // Falls through on failure. 752 void check_klass_subtype(Register sub_klass, 753 Register super_klass, 754 Register temp_reg, 755 Label& L_success); 756 757 void clinit_barrier(Register klass, 758 Register thread, 759 Label* L_fast_path = nullptr, 760 Label* L_slow_path = nullptr); 761 762 // method handles (JSR 292) 763 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 764 765 // Debugging 766 767 // only if +VerifyOops 768 void _verify_oop(Register reg, const char* s, const char* file, int line); 769 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 770 771 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 772 if (VerifyOops) { 773 _verify_oop(reg, s, file, line); 774 } 775 } 776 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 777 if (VerifyOops) { 778 _verify_oop_addr(reg, s, file, line); 779 } 780 } 781 782 // TODO: verify method and klass metadata (compare against vptr?) 783 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 784 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 785 786 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 787 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 788 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 789 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 790 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 791 792 // Verify or restore cpu control state after JNI call 793 void restore_cpu_control_state_after_jni(Register rscratch); 794 795 // prints msg, dumps registers and stops execution 796 void stop(const char* msg); 797 798 // prints msg and continues 799 void warn(const char* msg); 800 801 // dumps registers and other state 802 void print_state(); 803 804 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 805 static void debug64(char* msg, int64_t pc, int64_t regs[]); 806 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 807 static void print_state64(int64_t pc, int64_t regs[]); 808 809 void os_breakpoint(); 810 811 void untested() { stop("untested"); } 812 813 void unimplemented(const char* what = ""); 814 815 void should_not_reach_here() { stop("should not reach here"); } 816 817 void print_CPU_state(); 818 819 // Stack overflow checking 820 void bang_stack_with_offset(int offset) { 821 // stack grows down, caller passes positive offset 822 assert(offset > 0, "must bang with negative offset"); 823 movl(Address(rsp, (-offset)), rax); 824 } 825 826 // Writes to stack successive pages until offset reached to check for 827 // stack overflow + shadow pages. Also, clobbers tmp 828 void bang_stack_size(Register size, Register tmp); 829 830 // Check for reserved stack access in method being exited (for JIT) 831 void reserved_stack_check(); 832 833 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 834 835 void verify_tlab(); 836 837 static Condition negate_condition(Condition cond); 838 839 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 840 // operands. In general the names are modified to avoid hiding the instruction in Assembler 841 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 842 // here in MacroAssembler. The major exception to this rule is call 843 844 // Arithmetics 845 846 847 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 848 void addptr(Address dst, Register src); 849 850 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 851 void addptr(Register dst, int32_t src); 852 void addptr(Register dst, Register src); 853 void addptr(Register dst, RegisterOrConstant src) { 854 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 855 else addptr(dst, src.as_register()); 856 } 857 858 void andptr(Register dst, int32_t src); 859 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 860 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 861 862 #ifdef _LP64 863 using Assembler::andq; 864 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 865 #endif 866 867 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 868 869 // renamed to drag out the casting of address to int32_t/intptr_t 870 void cmp32(Register src1, int32_t imm); 871 872 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 873 // compare reg - mem, or reg - &mem 874 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 875 876 void cmp32(Register src1, Address src2); 877 878 #ifndef _LP64 879 void cmpklass(Address dst, Metadata* obj); 880 void cmpklass(Register dst, Metadata* obj); 881 void cmpoop(Address dst, jobject obj); 882 #endif // _LP64 883 884 void cmpoop(Register src1, Register src2); 885 void cmpoop(Register src1, Address src2); 886 void cmpoop(Register dst, jobject obj, Register rscratch); 887 888 // NOTE src2 must be the lval. This is NOT an mem-mem compare 889 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 890 891 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 892 893 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 894 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 895 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 896 897 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 898 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 899 900 // cmp64 to avoild hiding cmpq 901 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 902 903 void cmpxchgptr(Register reg, Address adr); 904 905 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 906 907 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 908 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 909 910 911 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 912 913 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 914 915 void shlptr(Register dst, int32_t shift); 916 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 917 918 void shrptr(Register dst, int32_t shift); 919 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 920 921 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 922 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 923 924 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 925 926 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 927 void subptr(Register dst, int32_t src); 928 // Force generation of a 4 byte immediate value even if it fits into 8bit 929 void subptr_imm32(Register dst, int32_t src); 930 void subptr(Register dst, Register src); 931 void subptr(Register dst, RegisterOrConstant src) { 932 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 933 else subptr(dst, src.as_register()); 934 } 935 936 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 937 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 938 939 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 940 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 941 942 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 943 944 945 946 // Helper functions for statistics gathering. 947 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 948 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 949 // Unconditional atomic increment. 950 void atomic_incl(Address counter_addr); 951 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 952 #ifdef _LP64 953 void atomic_incq(Address counter_addr); 954 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 955 #endif 956 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 957 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 958 959 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 960 void lea(Register dst, AddressLiteral adr); 961 void lea(Address dst, AddressLiteral adr, Register rscratch); 962 963 void leal32(Register dst, Address src) { leal(dst, src); } 964 965 // Import other testl() methods from the parent class or else 966 // they will be hidden by the following overriding declaration. 967 using Assembler::testl; 968 void testl(Address dst, int32_t imm32); 969 void testl(Register dst, int32_t imm32); 970 void testl(Register dst, AddressLiteral src); // requires reachable address 971 using Assembler::testq; 972 void testq(Address dst, int32_t imm32); 973 void testq(Register dst, int32_t imm32); 974 975 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 976 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 977 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 978 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 979 980 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 981 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 982 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 983 void testptr(Register src1, Register src2); 984 985 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 986 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 987 988 // Calls 989 990 void call(Label& L, relocInfo::relocType rtype); 991 void call(Register entry); 992 void call(Address addr) { Assembler::call(addr); } 993 994 // NOTE: this call transfers to the effective address of entry NOT 995 // the address contained by entry. This is because this is more natural 996 // for jumps/calls. 997 void call(AddressLiteral entry, Register rscratch = rax); 998 999 // Emit the CompiledIC call idiom 1000 void ic_call(address entry, jint method_index = 0); 1001 static int ic_check_size(); 1002 int ic_check(int end_alignment); 1003 1004 void emit_static_call_stub(); 1005 1006 // Jumps 1007 1008 // NOTE: these jumps transfer to the effective address of dst NOT 1009 // the address contained by dst. This is because this is more natural 1010 // for jumps/calls. 1011 void jump(AddressLiteral dst, Register rscratch = noreg); 1012 1013 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 1014 1015 // 32bit can do a case table jump in one instruction but we no longer allow the base 1016 // to be installed in the Address class. This jump will transfer to the address 1017 // contained in the location described by entry (not the address of entry) 1018 void jump(ArrayAddress entry, Register rscratch); 1019 1020 // Adding more natural conditional jump instructions 1021 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 1022 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 1023 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 1024 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 1025 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 1026 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 1027 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 1028 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 1029 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1030 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1031 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 1032 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1033 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1034 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 1035 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1036 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1037 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1038 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1039 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1040 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1041 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1042 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1043 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1044 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1045 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1046 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1047 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1048 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1049 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1050 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1051 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1052 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1053 1054 // Short versions of the above 1055 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 1056 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 1057 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 1058 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 1059 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 1060 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 1061 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 1062 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1063 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1064 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1065 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1066 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1067 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1068 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1069 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1070 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1071 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1072 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1073 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1074 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1075 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1076 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1077 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1078 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1079 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1080 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1081 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1082 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1083 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1084 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1085 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1086 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1087 1088 // Floating 1089 1090 void push_f(XMMRegister r); 1091 void pop_f(XMMRegister r); 1092 void push_d(XMMRegister r); 1093 void pop_d(XMMRegister r); 1094 1095 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1096 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1097 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1098 1099 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1100 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1101 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1102 1103 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1104 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1105 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1106 1107 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1108 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1109 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1110 1111 #ifndef _LP64 1112 void fadd_s(Address src) { Assembler::fadd_s(src); } 1113 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1114 1115 void fldcw(Address src) { Assembler::fldcw(src); } 1116 void fldcw(AddressLiteral src); 1117 1118 void fld_s(int index) { Assembler::fld_s(index); } 1119 void fld_s(Address src) { Assembler::fld_s(src); } 1120 void fld_s(AddressLiteral src); 1121 1122 void fld_d(Address src) { Assembler::fld_d(src); } 1123 void fld_d(AddressLiteral src); 1124 1125 void fld_x(Address src) { Assembler::fld_x(src); } 1126 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1127 1128 void fmul_s(Address src) { Assembler::fmul_s(src); } 1129 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1130 #endif // !_LP64 1131 1132 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1133 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1134 1135 #ifdef _LP64 1136 private: 1137 void sha256_AVX2_one_round_compute( 1138 Register reg_old_h, 1139 Register reg_a, 1140 Register reg_b, 1141 Register reg_c, 1142 Register reg_d, 1143 Register reg_e, 1144 Register reg_f, 1145 Register reg_g, 1146 Register reg_h, 1147 int iter); 1148 void sha256_AVX2_four_rounds_compute_first(int start); 1149 void sha256_AVX2_four_rounds_compute_last(int start); 1150 void sha256_AVX2_one_round_and_sched( 1151 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1152 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1153 XMMRegister xmm_2, /* ymm6 */ 1154 XMMRegister xmm_3, /* ymm7 */ 1155 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1156 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1157 Register reg_c, /* edi */ 1158 Register reg_d, /* esi */ 1159 Register reg_e, /* r8d */ 1160 Register reg_f, /* r9d */ 1161 Register reg_g, /* r10d */ 1162 Register reg_h, /* r11d */ 1163 int iter); 1164 1165 void addm(int disp, Register r1, Register r2); 1166 1167 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1168 Register e, Register f, Register g, Register h, int iteration); 1169 1170 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1171 Register a, Register b, Register c, Register d, Register e, Register f, 1172 Register g, Register h, int iteration); 1173 1174 void addmq(int disp, Register r1, Register r2); 1175 public: 1176 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1177 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1178 Register buf, Register state, Register ofs, Register limit, Register rsp, 1179 bool multi_block, XMMRegister shuf_mask); 1180 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1181 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1182 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1183 XMMRegister shuf_mask); 1184 #endif // _LP64 1185 1186 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1187 bool multi_block); 1188 1189 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1190 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1191 Register buf, Register state, Register ofs, Register limit, Register rsp, 1192 bool multi_block); 1193 1194 #ifdef _LP64 1195 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1196 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1197 Register buf, Register state, Register ofs, Register limit, Register rsp, 1198 bool multi_block, XMMRegister shuf_mask); 1199 #else 1200 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1201 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1202 Register buf, Register state, Register ofs, Register limit, Register rsp, 1203 bool multi_block); 1204 #endif 1205 1206 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1207 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1208 Register rax, Register rcx, Register rdx, Register tmp); 1209 1210 #ifndef _LP64 1211 private: 1212 // Initialized in macroAssembler_x86_constants.cpp 1213 static address ONES; 1214 static address L_2IL0FLOATPACKET_0; 1215 static address PI4_INV; 1216 static address PI4X3; 1217 static address PI4X4; 1218 1219 public: 1220 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1221 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1222 Register rax, Register rcx, Register rdx, Register tmp1); 1223 1224 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1225 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1226 Register rax, Register rcx, Register rdx, Register tmp); 1227 1228 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1229 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1230 Register rdx, Register tmp); 1231 1232 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1233 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1234 Register rax, Register rbx, Register rdx); 1235 1236 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1237 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1238 Register rax, Register rcx, Register rdx, Register tmp); 1239 1240 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1241 Register edx, Register ebx, Register esi, Register edi, 1242 Register ebp, Register esp); 1243 1244 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1245 Register esi, Register edi, Register ebp, Register esp); 1246 1247 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1248 Register edx, Register ebx, Register esi, Register edi, 1249 Register ebp, Register esp); 1250 1251 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1252 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1253 Register rax, Register rcx, Register rdx, Register tmp); 1254 #endif // !_LP64 1255 1256 private: 1257 1258 // these are private because users should be doing movflt/movdbl 1259 1260 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1261 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1262 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1263 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1264 1265 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1266 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1267 1268 public: 1269 1270 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1271 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1272 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1273 1274 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1275 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1276 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1277 1278 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1279 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1280 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1281 1282 using Assembler::vbroadcastsd; 1283 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1284 1285 using Assembler::vbroadcastss; 1286 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1287 1288 // Vector float blend 1289 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1290 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1291 1292 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1293 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1294 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1295 1296 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1297 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1298 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1299 1300 // Move Unaligned Double Quadword 1301 void movdqu(Address dst, XMMRegister src); 1302 void movdqu(XMMRegister dst, XMMRegister src); 1303 void movdqu(XMMRegister dst, Address src); 1304 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1305 1306 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1307 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1308 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1309 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1310 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1311 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1312 1313 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1314 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1315 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1316 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1317 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1318 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1319 1320 // Safe move operation, lowers down to 16bit moves for targets supporting 1321 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1322 void kmov(Address dst, KRegister src); 1323 void kmov(KRegister dst, Address src); 1324 void kmov(KRegister dst, KRegister src); 1325 void kmov(Register dst, KRegister src); 1326 void kmov(KRegister dst, Register src); 1327 1328 using Assembler::movddup; 1329 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1330 1331 using Assembler::vmovddup; 1332 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1333 1334 // AVX Unaligned forms 1335 void vmovdqu(Address dst, XMMRegister src); 1336 void vmovdqu(XMMRegister dst, Address src); 1337 void vmovdqu(XMMRegister dst, XMMRegister src); 1338 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1339 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1340 1341 // AVX512 Unaligned 1342 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1343 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1344 1345 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1346 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1347 1348 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1349 if (dst->encoding() != src->encoding() || mask != k0) { 1350 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1351 } 1352 } 1353 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1354 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1355 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1356 1357 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1358 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1359 1360 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1361 if (dst->encoding() != src->encoding() || mask != k0) { 1362 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1363 } 1364 } 1365 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1366 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1367 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1368 1369 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1370 if (dst->encoding() != src->encoding()) { 1371 Assembler::evmovdqul(dst, src, vector_len); 1372 } 1373 } 1374 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1375 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1376 1377 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1378 if (dst->encoding() != src->encoding() || mask != k0) { 1379 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1380 } 1381 } 1382 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1383 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1384 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1385 1386 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1387 if (dst->encoding() != src->encoding()) { 1388 Assembler::evmovdquq(dst, src, vector_len); 1389 } 1390 } 1391 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1392 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1393 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1394 1395 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1396 if (dst->encoding() != src->encoding() || mask != k0) { 1397 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1398 } 1399 } 1400 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1401 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1402 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1403 1404 // Move Aligned Double Quadword 1405 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1406 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1407 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1408 1409 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1410 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1411 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1412 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1413 1414 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1415 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1416 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1417 1418 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1419 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1420 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1421 1422 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1423 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1424 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1425 1426 // Carry-Less Multiplication Quadword 1427 void pclmulldq(XMMRegister dst, XMMRegister src) { 1428 // 0x00 - multiply lower 64 bits [0:63] 1429 Assembler::pclmulqdq(dst, src, 0x00); 1430 } 1431 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1432 // 0x11 - multiply upper 64 bits [64:127] 1433 Assembler::pclmulqdq(dst, src, 0x11); 1434 } 1435 1436 void pcmpeqb(XMMRegister dst, XMMRegister src); 1437 void pcmpeqw(XMMRegister dst, XMMRegister src); 1438 1439 void pcmpestri(XMMRegister dst, Address src, int imm8); 1440 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1441 1442 void pmovzxbw(XMMRegister dst, XMMRegister src); 1443 void pmovzxbw(XMMRegister dst, Address src); 1444 1445 void pmovmskb(Register dst, XMMRegister src); 1446 1447 void ptest(XMMRegister dst, XMMRegister src); 1448 1449 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1450 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1451 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1452 1453 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1454 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1455 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1456 1457 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1458 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1459 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1460 1461 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1462 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1463 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1464 1465 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1466 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1467 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1468 1469 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1470 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1471 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1472 1473 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1474 void xorpd(XMMRegister dst, XMMRegister src); 1475 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1476 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1477 1478 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1479 void xorps(XMMRegister dst, XMMRegister src); 1480 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1481 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1482 1483 // Shuffle Bytes 1484 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1485 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1486 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1487 // AVX 3-operands instructions 1488 1489 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1490 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1491 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1492 1493 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1494 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1495 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1496 1497 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1498 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1499 1500 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1501 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1502 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1503 1504 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1505 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1506 1507 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1508 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1509 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1510 1511 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1512 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1513 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1514 1515 using Assembler::vpbroadcastd; 1516 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1517 1518 using Assembler::vpbroadcastq; 1519 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1520 1521 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1522 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1523 1524 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1525 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1526 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1527 1528 // Vector compares 1529 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1530 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1531 } 1532 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1533 1534 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1535 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1536 } 1537 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1538 1539 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1540 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1541 } 1542 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1543 1544 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1545 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1546 } 1547 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1548 1549 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1550 1551 // Emit comparison instruction for the specified comparison predicate. 1552 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1553 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1554 1555 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1556 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1557 1558 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1559 1560 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1561 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1562 1563 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1564 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1565 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1566 1567 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1568 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1569 1570 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1571 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1572 1573 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1574 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1575 1576 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1577 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1578 1579 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1580 if (!is_varshift) { 1581 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1582 } else { 1583 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1584 } 1585 } 1586 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1587 if (!is_varshift) { 1588 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1589 } else { 1590 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1591 } 1592 } 1593 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1594 if (!is_varshift) { 1595 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1596 } else { 1597 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1598 } 1599 } 1600 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1601 if (!is_varshift) { 1602 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1603 } else { 1604 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1605 } 1606 } 1607 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1608 if (!is_varshift) { 1609 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1610 } else { 1611 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1612 } 1613 } 1614 1615 using Assembler::evpsrlq; 1616 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1617 if (!is_varshift) { 1618 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1619 } else { 1620 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1621 } 1622 } 1623 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1624 if (!is_varshift) { 1625 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1626 } else { 1627 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1628 } 1629 } 1630 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1631 if (!is_varshift) { 1632 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1633 } else { 1634 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1635 } 1636 } 1637 using Assembler::evpsraq; 1638 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1639 if (!is_varshift) { 1640 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1641 } else { 1642 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1643 } 1644 } 1645 1646 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1647 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1648 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1649 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1650 1651 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1652 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1653 1654 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1655 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1656 1657 void vptest(XMMRegister dst, XMMRegister src); 1658 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1659 1660 void punpcklbw(XMMRegister dst, XMMRegister src); 1661 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1662 1663 void pshufd(XMMRegister dst, Address src, int mode); 1664 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1665 1666 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1667 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1668 1669 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1670 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1671 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1672 1673 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1674 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1675 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1676 1677 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1678 1679 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1680 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1681 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1682 1683 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1684 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1685 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1686 1687 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1688 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1689 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1690 1691 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1692 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1693 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1694 1695 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1696 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1697 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1698 1699 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1700 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1701 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1702 1703 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1704 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1705 1706 // AVX Vector instructions 1707 1708 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1709 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1710 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1711 1712 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1713 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1714 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1715 1716 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1717 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1718 Assembler::vpxor(dst, nds, src, vector_len); 1719 else 1720 Assembler::vxorpd(dst, nds, src, vector_len); 1721 } 1722 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1723 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1724 Assembler::vpxor(dst, nds, src, vector_len); 1725 else 1726 Assembler::vxorpd(dst, nds, src, vector_len); 1727 } 1728 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1729 1730 // Simple version for AVX2 256bit vectors 1731 void vpxor(XMMRegister dst, XMMRegister src) { 1732 assert(UseAVX >= 2, "Should be at least AVX2"); 1733 Assembler::vpxor(dst, dst, src, AVX_256bit); 1734 } 1735 void vpxor(XMMRegister dst, Address src) { 1736 assert(UseAVX >= 2, "Should be at least AVX2"); 1737 Assembler::vpxor(dst, dst, src, AVX_256bit); 1738 } 1739 1740 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1741 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1742 1743 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1744 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1745 Assembler::vinserti32x4(dst, nds, src, imm8); 1746 } else if (UseAVX > 1) { 1747 // vinserti128 is available only in AVX2 1748 Assembler::vinserti128(dst, nds, src, imm8); 1749 } else { 1750 Assembler::vinsertf128(dst, nds, src, imm8); 1751 } 1752 } 1753 1754 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1755 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1756 Assembler::vinserti32x4(dst, nds, src, imm8); 1757 } else if (UseAVX > 1) { 1758 // vinserti128 is available only in AVX2 1759 Assembler::vinserti128(dst, nds, src, imm8); 1760 } else { 1761 Assembler::vinsertf128(dst, nds, src, imm8); 1762 } 1763 } 1764 1765 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1766 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1767 Assembler::vextracti32x4(dst, src, imm8); 1768 } else if (UseAVX > 1) { 1769 // vextracti128 is available only in AVX2 1770 Assembler::vextracti128(dst, src, imm8); 1771 } else { 1772 Assembler::vextractf128(dst, src, imm8); 1773 } 1774 } 1775 1776 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1777 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1778 Assembler::vextracti32x4(dst, src, imm8); 1779 } else if (UseAVX > 1) { 1780 // vextracti128 is available only in AVX2 1781 Assembler::vextracti128(dst, src, imm8); 1782 } else { 1783 Assembler::vextractf128(dst, src, imm8); 1784 } 1785 } 1786 1787 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1788 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1789 vinserti128(dst, dst, src, 1); 1790 } 1791 void vinserti128_high(XMMRegister dst, Address src) { 1792 vinserti128(dst, dst, src, 1); 1793 } 1794 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1795 vextracti128(dst, src, 1); 1796 } 1797 void vextracti128_high(Address dst, XMMRegister src) { 1798 vextracti128(dst, src, 1); 1799 } 1800 1801 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1802 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1803 Assembler::vinsertf32x4(dst, dst, src, 1); 1804 } else { 1805 Assembler::vinsertf128(dst, dst, src, 1); 1806 } 1807 } 1808 1809 void vinsertf128_high(XMMRegister dst, Address src) { 1810 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1811 Assembler::vinsertf32x4(dst, dst, src, 1); 1812 } else { 1813 Assembler::vinsertf128(dst, dst, src, 1); 1814 } 1815 } 1816 1817 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1818 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1819 Assembler::vextractf32x4(dst, src, 1); 1820 } else { 1821 Assembler::vextractf128(dst, src, 1); 1822 } 1823 } 1824 1825 void vextractf128_high(Address dst, XMMRegister src) { 1826 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1827 Assembler::vextractf32x4(dst, src, 1); 1828 } else { 1829 Assembler::vextractf128(dst, src, 1); 1830 } 1831 } 1832 1833 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1834 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1835 Assembler::vinserti64x4(dst, dst, src, 1); 1836 } 1837 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1838 Assembler::vinsertf64x4(dst, dst, src, 1); 1839 } 1840 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1841 Assembler::vextracti64x4(dst, src, 1); 1842 } 1843 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1844 Assembler::vextractf64x4(dst, src, 1); 1845 } 1846 void vextractf64x4_high(Address dst, XMMRegister src) { 1847 Assembler::vextractf64x4(dst, src, 1); 1848 } 1849 void vinsertf64x4_high(XMMRegister dst, Address src) { 1850 Assembler::vinsertf64x4(dst, dst, src, 1); 1851 } 1852 1853 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1854 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1855 vinserti128(dst, dst, src, 0); 1856 } 1857 void vinserti128_low(XMMRegister dst, Address src) { 1858 vinserti128(dst, dst, src, 0); 1859 } 1860 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1861 vextracti128(dst, src, 0); 1862 } 1863 void vextracti128_low(Address dst, XMMRegister src) { 1864 vextracti128(dst, src, 0); 1865 } 1866 1867 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1868 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1869 Assembler::vinsertf32x4(dst, dst, src, 0); 1870 } else { 1871 Assembler::vinsertf128(dst, dst, src, 0); 1872 } 1873 } 1874 1875 void vinsertf128_low(XMMRegister dst, Address src) { 1876 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1877 Assembler::vinsertf32x4(dst, dst, src, 0); 1878 } else { 1879 Assembler::vinsertf128(dst, dst, src, 0); 1880 } 1881 } 1882 1883 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1884 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1885 Assembler::vextractf32x4(dst, src, 0); 1886 } else { 1887 Assembler::vextractf128(dst, src, 0); 1888 } 1889 } 1890 1891 void vextractf128_low(Address dst, XMMRegister src) { 1892 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1893 Assembler::vextractf32x4(dst, src, 0); 1894 } else { 1895 Assembler::vextractf128(dst, src, 0); 1896 } 1897 } 1898 1899 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1900 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1901 Assembler::vinserti64x4(dst, dst, src, 0); 1902 } 1903 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1904 Assembler::vinsertf64x4(dst, dst, src, 0); 1905 } 1906 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1907 Assembler::vextracti64x4(dst, src, 0); 1908 } 1909 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1910 Assembler::vextractf64x4(dst, src, 0); 1911 } 1912 void vextractf64x4_low(Address dst, XMMRegister src) { 1913 Assembler::vextractf64x4(dst, src, 0); 1914 } 1915 void vinsertf64x4_low(XMMRegister dst, Address src) { 1916 Assembler::vinsertf64x4(dst, dst, src, 0); 1917 } 1918 1919 // Carry-Less Multiplication Quadword 1920 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1921 // 0x00 - multiply lower 64 bits [0:63] 1922 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1923 } 1924 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1925 // 0x11 - multiply upper 64 bits [64:127] 1926 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1927 } 1928 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1929 // 0x10 - multiply nds[0:63] and src[64:127] 1930 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1931 } 1932 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1933 //0x01 - multiply nds[64:127] and src[0:63] 1934 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1935 } 1936 1937 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1938 // 0x00 - multiply lower 64 bits [0:63] 1939 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1940 } 1941 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1942 // 0x11 - multiply upper 64 bits [64:127] 1943 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1944 } 1945 1946 // AVX-512 mask operations. 1947 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1948 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1949 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1950 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1951 void kortest(uint masklen, KRegister src1, KRegister src2); 1952 void ktest(uint masklen, KRegister src1, KRegister src2); 1953 1954 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1955 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1956 1957 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1958 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1959 1960 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1961 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1962 1963 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1964 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1965 1966 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1967 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1968 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1969 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1970 1971 using Assembler::evpandq; 1972 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1973 1974 using Assembler::evpaddq; 1975 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1976 1977 using Assembler::evporq; 1978 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1979 1980 using Assembler::vpshufb; 1981 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1982 1983 using Assembler::vpor; 1984 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1985 1986 using Assembler::vpternlogq; 1987 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1988 1989 void cmov32( Condition cc, Register dst, Address src); 1990 void cmov32( Condition cc, Register dst, Register src); 1991 1992 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1993 1994 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1995 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1996 1997 void movoop(Register dst, jobject obj); 1998 void movoop(Address dst, jobject obj, Register rscratch); 1999 2000 void mov_metadata(Register dst, Metadata* obj); 2001 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 2002 2003 void movptr(Register dst, Register src); 2004 void movptr(Register dst, Address src); 2005 void movptr(Register dst, AddressLiteral src); 2006 void movptr(Register dst, ArrayAddress src); 2007 void movptr(Register dst, intptr_t src); 2008 void movptr(Address dst, Register src); 2009 void movptr(Address dst, int32_t imm); 2010 void movptr(Address dst, intptr_t src, Register rscratch); 2011 void movptr(ArrayAddress dst, Register src, Register rscratch); 2012 2013 void movptr(Register dst, RegisterOrConstant src) { 2014 if (src.is_constant()) movptr(dst, src.as_constant()); 2015 else movptr(dst, src.as_register()); 2016 } 2017 2018 2019 // to avoid hiding movl 2020 void mov32(Register dst, AddressLiteral src); 2021 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 2022 2023 // Import other mov() methods from the parent class or else 2024 // they will be hidden by the following overriding declaration. 2025 using Assembler::movdl; 2026 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2027 2028 using Assembler::movq; 2029 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2030 2031 // Can push value or effective address 2032 void pushptr(AddressLiteral src, Register rscratch); 2033 2034 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2035 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2036 2037 void pushoop(jobject obj, Register rscratch); 2038 void pushklass(Metadata* obj, Register rscratch); 2039 2040 // sign extend as need a l to ptr sized element 2041 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2042 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2043 2044 2045 public: 2046 // Inline type specific methods 2047 #include "asm/macroAssembler_common.hpp" 2048 2049 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true); 2050 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]); 2051 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 2052 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 2053 RegState reg_state[]); 2054 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 2055 VMRegPair* from, int from_count, int& from_index, VMReg to, 2056 RegState reg_state[], Register val_array); 2057 int extend_stack_for_inline_args(int args_on_stack); 2058 void remove_frame(int initial_framesize, bool needs_stack_repair); 2059 VMReg spill_reg_for(VMReg reg); 2060 2061 // clear memory of size 'cnt' qwords, starting at 'base'; 2062 // if 'is_large' is set, do not try to produce short loop 2063 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg); 2064 2065 // clear memory initialization sequence for constant size; 2066 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2067 2068 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 2069 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2070 2071 // Fill primitive arrays 2072 void generate_fill(BasicType t, bool aligned, 2073 Register to, Register value, Register count, 2074 Register rtmp, XMMRegister xtmp); 2075 2076 void encode_iso_array(Register src, Register dst, Register len, 2077 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2078 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2079 2080 #ifdef _LP64 2081 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2082 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2083 Register y, Register y_idx, Register z, 2084 Register carry, Register product, 2085 Register idx, Register kdx); 2086 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2087 Register yz_idx, Register idx, 2088 Register carry, Register product, int offset); 2089 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2090 Register carry, Register carry2, 2091 Register idx, Register jdx, 2092 Register yz_idx1, Register yz_idx2, 2093 Register tmp, Register tmp3, Register tmp4); 2094 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2095 Register yz_idx, Register idx, Register jdx, 2096 Register carry, Register product, 2097 Register carry2); 2098 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2099 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2100 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2101 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2102 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2103 Register tmp2); 2104 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2105 Register rdxReg, Register raxReg); 2106 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2107 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2108 Register tmp3, Register tmp4); 2109 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2110 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2111 2112 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2113 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2114 Register raxReg); 2115 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2116 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2117 Register raxReg); 2118 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2119 Register result, Register tmp1, Register tmp2, 2120 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2121 #endif 2122 2123 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2124 void update_byte_crc32(Register crc, Register val, Register table); 2125 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2126 2127 2128 #ifdef _LP64 2129 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2130 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2131 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2132 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2133 #endif // _LP64 2134 2135 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2136 // Note on a naming convention: 2137 // Prefix w = register only used on a Westmere+ architecture 2138 // Prefix n = register only used on a Nehalem architecture 2139 #ifdef _LP64 2140 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2141 Register tmp1, Register tmp2, Register tmp3); 2142 #else 2143 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2144 Register tmp1, Register tmp2, Register tmp3, 2145 XMMRegister xtmp1, XMMRegister xtmp2); 2146 #endif 2147 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2148 Register in_out, 2149 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2150 XMMRegister w_xtmp2, 2151 Register tmp1, 2152 Register n_tmp2, Register n_tmp3); 2153 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2154 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2155 Register tmp1, Register tmp2, 2156 Register n_tmp3); 2157 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2158 Register in_out1, Register in_out2, Register in_out3, 2159 Register tmp1, Register tmp2, Register tmp3, 2160 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2161 Register tmp4, Register tmp5, 2162 Register n_tmp6); 2163 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2164 Register tmp1, Register tmp2, Register tmp3, 2165 Register tmp4, Register tmp5, Register tmp6, 2166 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2167 bool is_pclmulqdq_supported); 2168 // Fold 128-bit data chunk 2169 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2170 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2171 #ifdef _LP64 2172 // Fold 512-bit data chunk 2173 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2174 #endif // _LP64 2175 // Fold 8-bit data 2176 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2177 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2178 2179 // Compress char[] array to byte[]. 2180 void char_array_compress(Register src, Register dst, Register len, 2181 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2182 XMMRegister tmp4, Register tmp5, Register result, 2183 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2184 2185 // Inflate byte[] array to char[]. 2186 void byte_array_inflate(Register src, Register dst, Register len, 2187 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2188 2189 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2190 Register length, Register temp, int vec_enc); 2191 2192 void fill64_masked(uint shift, Register dst, int disp, 2193 XMMRegister xmm, KRegister mask, Register length, 2194 Register temp, bool use64byteVector = false); 2195 2196 void fill32_masked(uint shift, Register dst, int disp, 2197 XMMRegister xmm, KRegister mask, Register length, 2198 Register temp); 2199 2200 void fill32(Address dst, XMMRegister xmm); 2201 2202 void fill32(Register dst, int disp, XMMRegister xmm); 2203 2204 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2205 2206 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2207 2208 #ifdef _LP64 2209 void convert_f2i(Register dst, XMMRegister src); 2210 void convert_d2i(Register dst, XMMRegister src); 2211 void convert_f2l(Register dst, XMMRegister src); 2212 void convert_d2l(Register dst, XMMRegister src); 2213 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2214 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2215 2216 void cache_wb(Address line); 2217 void cache_wbsync(bool is_pre); 2218 2219 #ifdef COMPILER2_OR_JVMCI 2220 void generate_fill_avx3(BasicType type, Register to, Register value, 2221 Register count, Register rtmp, XMMRegister xtmp); 2222 #endif // COMPILER2_OR_JVMCI 2223 #endif // _LP64 2224 2225 void vallones(XMMRegister dst, int vector_len); 2226 2227 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2228 2229 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2230 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2231 2232 #ifdef _LP64 2233 void save_legacy_gprs(); 2234 void restore_legacy_gprs(); 2235 void setcc(Assembler::Condition comparison, Register dst); 2236 #endif 2237 }; 2238 2239 /** 2240 * class SkipIfEqual: 2241 * 2242 * Instantiating this class will result in assembly code being output that will 2243 * jump around any code emitted between the creation of the instance and it's 2244 * automatic destruction at the end of a scope block, depending on the value of 2245 * the flag passed to the constructor, which will be checked at run-time. 2246 */ 2247 class SkipIfEqual { 2248 private: 2249 MacroAssembler* _masm; 2250 Label _label; 2251 2252 public: 2253 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); 2254 ~SkipIfEqual(); 2255 }; 2256 2257 #endif // CPU_X86_MACROASSEMBLER_X86_HPP