1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "asm/register.hpp" 30 #include "code/vmreg.inline.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "utilities/macros.hpp" 33 #include "runtime/signature.hpp" 34 #include "runtime/vm_version.hpp" 35 #include "utilities/checkedCast.hpp" 36 37 class ciInlineKlass; 38 39 // MacroAssembler extends Assembler by frequently used macros. 40 // 41 // Instructions for which a 'better' code sequence exists depending 42 // on arguments should also go in here. 43 44 class MacroAssembler: public Assembler { 45 friend class LIR_Assembler; 46 friend class Runtime1; // as_Address() 47 48 public: 49 // Support for VM calls 50 // 51 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 52 // may customize this version by overriding it for its purposes (e.g., to save/restore 53 // additional registers when doing a VM call). 54 55 virtual void call_VM_leaf_base( 56 address entry_point, // the entry point 57 int number_of_arguments // the number of arguments to pop after the call 58 ); 59 60 protected: 61 // This is the base routine called by the different versions of call_VM. The interpreter 62 // may customize this version by overriding it for its purposes (e.g., to save/restore 63 // additional registers when doing a VM call). 64 // 65 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 66 // returns the register which contains the thread upon return. If a thread register has been 67 // specified, the return value will correspond to that register. If no last_java_sp is specified 68 // (noreg) than rsp will be used instead. 69 virtual void call_VM_base( // returns the register containing the thread upon return 70 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 71 Register java_thread, // the thread if computed before ; use noreg otherwise 72 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 73 address entry_point, // the entry point 74 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 75 bool check_exceptions // whether to check for pending exceptions after return 76 ); 77 78 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 79 80 // helpers for FPU flag access 81 // tmp is a temporary register, if none is available use noreg 82 void save_rax (Register tmp); 83 void restore_rax(Register tmp); 84 85 public: 86 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 87 88 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 89 // The implementation is only non-empty for the InterpreterMacroAssembler, 90 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 91 virtual void check_and_handle_popframe(Register java_thread); 92 virtual void check_and_handle_earlyret(Register java_thread); 93 94 Address as_Address(AddressLiteral adr); 95 Address as_Address(ArrayAddress adr, Register rscratch); 96 97 // Support for null-checks 98 // 99 // Generates code that causes a null OS exception if the content of reg is null. 100 // If the accessed location is M[reg + offset] and the offset is known, provide the 101 // offset. No explicit code generation is needed if the offset is within a certain 102 // range (0 <= offset <= page_size). 103 104 void null_check(Register reg, int offset = -1); 105 static bool needs_explicit_null_check(intptr_t offset); 106 static bool uses_implicit_null_check(void* address); 107 108 // markWord tests, kills markWord reg 109 void test_markword_is_inline_type(Register markword, Label& is_inline_type); 110 111 // inlineKlass queries, kills temp_reg 112 void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type); 113 void test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type); 114 void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type); 115 116 // Get the default value oop for the given InlineKlass 117 void get_default_value_oop(Register inline_klass, Register temp_reg, Register obj); 118 // The empty value oop, for the given InlineKlass ("empty" as in no instance fields) 119 // get_default_value_oop with extra assertion for empty inline klass 120 void get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj); 121 122 void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free); 123 void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free); 124 void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat); 125 void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker); 126 127 // Check oops for special arrays, i.e. flat arrays and/or null-free arrays 128 void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label); 129 void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array); 130 void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array); 131 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 132 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array); 133 134 // Check array klass layout helper for flat or null-free arrays... 135 void test_flat_array_layout(Register lh, Label& is_flat_array); 136 void test_non_flat_array_layout(Register lh, Label& is_non_flat_array); 137 138 // Required platform-specific helpers for Label::patch_instructions. 139 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 140 void pd_patch_instruction(address branch, address target, const char* file, int line) { 141 unsigned char op = branch[0]; 142 assert(op == 0xE8 /* call */ || 143 op == 0xE9 /* jmp */ || 144 op == 0xEB /* short jmp */ || 145 (op & 0xF0) == 0x70 /* short jcc */ || 146 (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ || 147 (op == 0xC7 && branch[1] == 0xF8) /* xbegin */, 148 "Invalid opcode at patch point"); 149 150 if (op == 0xEB || (op & 0xF0) == 0x70) { 151 // short offset operators (jmp and jcc) 152 char* disp = (char*) &branch[1]; 153 int imm8 = checked_cast<int>(target - (address) &disp[1]); 154 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 155 file == nullptr ? "<null>" : file, line); 156 *disp = (char)imm8; 157 } else { 158 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 159 int imm32 = checked_cast<int>(target - (address) &disp[1]); 160 *disp = imm32; 161 } 162 } 163 164 // The following 4 methods return the offset of the appropriate move instruction 165 166 // Support for fast byte/short loading with zero extension (depending on particular CPU) 167 int load_unsigned_byte(Register dst, Address src); 168 int load_unsigned_short(Register dst, Address src); 169 170 // Support for fast byte/short loading with sign extension (depending on particular CPU) 171 int load_signed_byte(Register dst, Address src); 172 int load_signed_short(Register dst, Address src); 173 174 // Support for sign-extension (hi:lo = extend_sign(lo)) 175 void extend_sign(Register hi, Register lo); 176 177 // Load and store values by size and signed-ness 178 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 179 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 180 181 // Support for inc/dec with optimal instruction selection depending on value 182 183 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 184 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 185 void increment(Address dst, int value = 1) { LP64_ONLY(incrementq(dst, value)) NOT_LP64(incrementl(dst, value)) ; } 186 void decrement(Address dst, int value = 1) { LP64_ONLY(decrementq(dst, value)) NOT_LP64(decrementl(dst, value)) ; } 187 188 void decrementl(Address dst, int value = 1); 189 void decrementl(Register reg, int value = 1); 190 191 void decrementq(Register reg, int value = 1); 192 void decrementq(Address dst, int value = 1); 193 194 void incrementl(Address dst, int value = 1); 195 void incrementl(Register reg, int value = 1); 196 197 void incrementq(Register reg, int value = 1); 198 void incrementq(Address dst, int value = 1); 199 200 void incrementl(AddressLiteral dst, Register rscratch = noreg); 201 void incrementl(ArrayAddress dst, Register rscratch); 202 203 void incrementq(AddressLiteral dst, Register rscratch = noreg); 204 205 // Support optimal SSE move instructions. 206 void movflt(XMMRegister dst, XMMRegister src) { 207 if (dst-> encoding() == src->encoding()) return; 208 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 209 else { movss (dst, src); return; } 210 } 211 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 212 void movflt(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 213 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 214 215 // Move with zero extension 216 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 217 218 void movdbl(XMMRegister dst, XMMRegister src) { 219 if (dst-> encoding() == src->encoding()) return; 220 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 221 else { movsd (dst, src); return; } 222 } 223 224 void movdbl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 225 226 void movdbl(XMMRegister dst, Address src) { 227 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 228 else { movlpd(dst, src); return; } 229 } 230 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 231 232 void flt_to_flt16(Register dst, XMMRegister src, XMMRegister tmp) { 233 // Use separate tmp XMM register because caller may 234 // requires src XMM register to be unchanged (as in x86.ad). 235 vcvtps2ph(tmp, src, 0x04, Assembler::AVX_128bit); 236 movdl(dst, tmp); 237 movswl(dst, dst); 238 } 239 240 void flt16_to_flt(XMMRegister dst, Register src) { 241 movdl(dst, src); 242 vcvtph2ps(dst, dst, Assembler::AVX_128bit); 243 } 244 245 // Alignment 246 void align32(); 247 void align64(); 248 void align(uint modulus); 249 void align(uint modulus, uint target); 250 251 void post_call_nop(); 252 // A 5 byte nop that is safe for patching (see patch_verified_entry) 253 void fat_nop(); 254 255 // Stack frame creation/removal 256 void enter(); 257 void leave(); 258 259 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 260 // The pointer will be loaded into the thread register. 261 void get_thread(Register thread); 262 263 #ifdef _LP64 264 // Support for argument shuffling 265 266 // bias in bytes 267 void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 268 void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 269 void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 270 void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); 271 void move_ptr(VMRegPair src, VMRegPair dst); 272 void object_move(OopMap* map, 273 int oop_handle_offset, 274 int framesize_in_slots, 275 VMRegPair src, 276 VMRegPair dst, 277 bool is_receiver, 278 int* receiver_offset); 279 #endif // _LP64 280 281 // Support for VM calls 282 // 283 // It is imperative that all calls into the VM are handled via the call_VM macros. 284 // They make sure that the stack linkage is setup correctly. call_VM's correspond 285 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 286 287 288 void call_VM(Register oop_result, 289 address entry_point, 290 bool check_exceptions = true); 291 void call_VM(Register oop_result, 292 address entry_point, 293 Register arg_1, 294 bool check_exceptions = true); 295 void call_VM(Register oop_result, 296 address entry_point, 297 Register arg_1, Register arg_2, 298 bool check_exceptions = true); 299 void call_VM(Register oop_result, 300 address entry_point, 301 Register arg_1, Register arg_2, Register arg_3, 302 bool check_exceptions = true); 303 304 // Overloadings with last_Java_sp 305 void call_VM(Register oop_result, 306 Register last_java_sp, 307 address entry_point, 308 int number_of_arguments = 0, 309 bool check_exceptions = true); 310 void call_VM(Register oop_result, 311 Register last_java_sp, 312 address entry_point, 313 Register arg_1, bool 314 check_exceptions = true); 315 void call_VM(Register oop_result, 316 Register last_java_sp, 317 address entry_point, 318 Register arg_1, Register arg_2, 319 bool check_exceptions = true); 320 void call_VM(Register oop_result, 321 Register last_java_sp, 322 address entry_point, 323 Register arg_1, Register arg_2, Register arg_3, 324 bool check_exceptions = true); 325 326 void get_vm_result (Register oop_result, Register thread); 327 void get_vm_result_2(Register metadata_result, Register thread); 328 329 // These always tightly bind to MacroAssembler::call_VM_base 330 // bypassing the virtual implementation 331 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 332 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 333 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 334 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 335 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 336 337 void call_VM_leaf0(address entry_point); 338 void call_VM_leaf(address entry_point, 339 int number_of_arguments = 0); 340 void call_VM_leaf(address entry_point, 341 Register arg_1); 342 void call_VM_leaf(address entry_point, 343 Register arg_1, Register arg_2); 344 void call_VM_leaf(address entry_point, 345 Register arg_1, Register arg_2, Register arg_3); 346 347 void call_VM_leaf(address entry_point, 348 Register arg_1, Register arg_2, Register arg_3, Register arg_4); 349 350 // These always tightly bind to MacroAssembler::call_VM_leaf_base 351 // bypassing the virtual implementation 352 void super_call_VM_leaf(address entry_point); 353 void super_call_VM_leaf(address entry_point, Register arg_1); 354 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 355 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 356 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 357 358 // last Java Frame (fills frame anchor) 359 void set_last_Java_frame(Register thread, 360 Register last_java_sp, 361 Register last_java_fp, 362 address last_java_pc, 363 Register rscratch); 364 365 // thread in the default location (r15_thread on 64bit) 366 void set_last_Java_frame(Register last_java_sp, 367 Register last_java_fp, 368 address last_java_pc, 369 Register rscratch); 370 371 void reset_last_Java_frame(Register thread, bool clear_fp); 372 373 // thread in the default location (r15_thread on 64bit) 374 void reset_last_Java_frame(bool clear_fp); 375 376 // jobjects 377 void clear_jobject_tag(Register possibly_non_local); 378 void resolve_jobject(Register value, Register thread, Register tmp); 379 void resolve_global_jobject(Register value, Register thread, Register tmp); 380 381 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 382 void c2bool(Register x); 383 384 // C++ bool manipulation 385 386 void movbool(Register dst, Address src); 387 void movbool(Address dst, bool boolconst); 388 void movbool(Address dst, Register src); 389 void testbool(Register dst); 390 391 void resolve_oop_handle(Register result, Register tmp); 392 void resolve_weak_handle(Register result, Register tmp); 393 void load_mirror(Register mirror, Register method, Register tmp); 394 void load_method_holder_cld(Register rresult, Register rmethod); 395 396 void load_method_holder(Register holder, Register method); 397 398 // oop manipulations 399 void load_metadata(Register dst, Register src); 400 void load_klass(Register dst, Register src, Register tmp); 401 void store_klass(Register dst, Register src, Register tmp); 402 403 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 404 Register tmp1, Register thread_tmp); 405 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 406 Register tmp1, Register tmp2, Register tmp3); 407 408 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register inline_klass); 409 410 // inline type data payload offsets... 411 void first_field_offset(Register inline_klass, Register offset); 412 void data_for_oop(Register oop, Register data, Register inline_klass); 413 // get data payload ptr a flat value array at index, kills rcx and index 414 void data_for_value_array_index(Register array, Register array_klass, 415 Register index, Register data); 416 417 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 418 Register thread_tmp = noreg, DecoratorSet decorators = 0); 419 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 420 Register thread_tmp = noreg, DecoratorSet decorators = 0); 421 void store_heap_oop(Address dst, Register val, Register tmp1 = noreg, 422 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 423 424 // Used for storing null. All other oop constants should be 425 // stored using routines that take a jobject. 426 void store_heap_oop_null(Address dst); 427 428 void load_prototype_header(Register dst, Register src, Register tmp); 429 430 #ifdef _LP64 431 void store_klass_gap(Register dst, Register src); 432 433 // This dummy is to prevent a call to store_heap_oop from 434 // converting a zero (like null) into a Register by giving 435 // the compiler two choices it can't resolve 436 437 void store_heap_oop(Address dst, void* dummy); 438 439 void encode_heap_oop(Register r); 440 void decode_heap_oop(Register r); 441 void encode_heap_oop_not_null(Register r); 442 void decode_heap_oop_not_null(Register r); 443 void encode_heap_oop_not_null(Register dst, Register src); 444 void decode_heap_oop_not_null(Register dst, Register src); 445 446 void set_narrow_oop(Register dst, jobject obj); 447 void set_narrow_oop(Address dst, jobject obj); 448 void cmp_narrow_oop(Register dst, jobject obj); 449 void cmp_narrow_oop(Address dst, jobject obj); 450 451 void encode_klass_not_null(Register r, Register tmp); 452 void decode_klass_not_null(Register r, Register tmp); 453 void encode_and_move_klass_not_null(Register dst, Register src); 454 void decode_and_move_klass_not_null(Register dst, Register src); 455 void set_narrow_klass(Register dst, Klass* k); 456 void set_narrow_klass(Address dst, Klass* k); 457 void cmp_narrow_klass(Register dst, Klass* k); 458 void cmp_narrow_klass(Address dst, Klass* k); 459 460 // if heap base register is used - reinit it with the correct value 461 void reinit_heapbase(); 462 463 DEBUG_ONLY(void verify_heapbase(const char* msg);) 464 465 #endif // _LP64 466 467 // Int division/remainder for Java 468 // (as idivl, but checks for special case as described in JVM spec.) 469 // returns idivl instruction offset for implicit exception handling 470 int corrected_idivl(Register reg); 471 472 // Long division/remainder for Java 473 // (as idivq, but checks for special case as described in JVM spec.) 474 // returns idivq instruction offset for implicit exception handling 475 int corrected_idivq(Register reg); 476 477 void int3(); 478 479 // Long operation macros for a 32bit cpu 480 // Long negation for Java 481 void lneg(Register hi, Register lo); 482 483 // Long multiplication for Java 484 // (destroys contents of eax, ebx, ecx and edx) 485 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 486 487 // Long shifts for Java 488 // (semantics as described in JVM spec.) 489 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 490 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 491 492 // Long compare for Java 493 // (semantics as described in JVM spec.) 494 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 495 496 497 // misc 498 499 // Sign extension 500 void sign_extend_short(Register reg); 501 void sign_extend_byte(Register reg); 502 503 // Division by power of 2, rounding towards 0 504 void division_with_shift(Register reg, int shift_value); 505 506 #ifndef _LP64 507 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 508 // 509 // CF (corresponds to C0) if x < y 510 // PF (corresponds to C2) if unordered 511 // ZF (corresponds to C3) if x = y 512 // 513 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 514 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 515 void fcmp(Register tmp); 516 // Variant of the above which allows y to be further down the stack 517 // and which only pops x and y if specified. If pop_right is 518 // specified then pop_left must also be specified. 519 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 520 521 // Floating-point comparison for Java 522 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 523 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 524 // (semantics as described in JVM spec.) 525 void fcmp2int(Register dst, bool unordered_is_less); 526 // Variant of the above which allows y to be further down the stack 527 // and which only pops x and y if specified. If pop_right is 528 // specified then pop_left must also be specified. 529 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 530 531 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 532 // tmp is a temporary register, if none is available use noreg 533 void fremr(Register tmp); 534 535 // only if +VerifyFPU 536 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 537 #endif // !LP64 538 539 // dst = c = a * b + c 540 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 541 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 542 543 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 544 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 545 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 546 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 547 548 549 // same as fcmp2int, but using SSE2 550 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 551 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 552 553 // branch to L if FPU flag C2 is set/not set 554 // tmp is a temporary register, if none is available use noreg 555 void jC2 (Register tmp, Label& L); 556 void jnC2(Register tmp, Label& L); 557 558 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 559 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 560 void load_float(Address src); 561 562 // Store float value to 'address'. If UseSSE >= 1, the value is stored 563 // from register xmm0. Otherwise, the value is stored from the FPU stack. 564 void store_float(Address dst); 565 566 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 567 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 568 void load_double(Address src); 569 570 // Store double value to 'address'. If UseSSE >= 2, the value is stored 571 // from register xmm0. Otherwise, the value is stored from the FPU stack. 572 void store_double(Address dst); 573 574 #ifndef _LP64 575 // Pop ST (ffree & fincstp combined) 576 void fpop(); 577 578 void empty_FPU_stack(); 579 #endif // !_LP64 580 581 void push_IU_state(); 582 void pop_IU_state(); 583 584 void push_FPU_state(); 585 void pop_FPU_state(); 586 587 void push_CPU_state(); 588 void pop_CPU_state(); 589 590 void push_cont_fastpath(); 591 void pop_cont_fastpath(); 592 593 void inc_held_monitor_count(); 594 void dec_held_monitor_count(); 595 596 DEBUG_ONLY(void stop_if_in_cont(Register cont_reg, const char* name);) 597 598 // Round up to a power of two 599 void round_to(Register reg, int modulus); 600 601 private: 602 // General purpose and XMM registers potentially clobbered by native code; there 603 // is no need for FPU or AVX opmask related methods because C1/interpreter 604 // - we save/restore FPU state as a whole always 605 // - do not care about AVX-512 opmask 606 static RegSet call_clobbered_gp_registers(); 607 static XMMRegSet call_clobbered_xmm_registers(); 608 609 void push_set(XMMRegSet set, int offset); 610 void pop_set(XMMRegSet set, int offset); 611 612 public: 613 void push_set(RegSet set, int offset = -1); 614 void pop_set(RegSet set, int offset = -1); 615 616 // Push and pop everything that might be clobbered by a native 617 // runtime call. 618 // Only save the lower 64 bits of each vector register. 619 // Additional registers can be excluded in a passed RegSet. 620 void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); 621 void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true); 622 623 void push_call_clobbered_registers(bool save_fpu = true) { 624 push_call_clobbered_registers_except(RegSet(), save_fpu); 625 } 626 void pop_call_clobbered_registers(bool restore_fpu = true) { 627 pop_call_clobbered_registers_except(RegSet(), restore_fpu); 628 } 629 630 // allocation 631 632 // Object / value buffer allocation... 633 // Allocate instance of klass, assumes klass initialized by caller 634 // new_obj prefers to be rax 635 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 636 void allocate_instance(Register klass, Register new_obj, 637 Register t1, Register t2, 638 bool clear_fields, Label& alloc_failed); 639 640 void tlab_allocate( 641 Register thread, // Current thread 642 Register obj, // result: pointer to object after successful allocation 643 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 644 int con_size_in_bytes, // object size in bytes if known at compile time 645 Register t1, // temp register 646 Register t2, // temp register 647 Label& slow_case // continuation point if fast allocation fails 648 ); 649 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 650 651 // For field "index" within "klass", return inline_klass ... 652 void get_inline_type_field_klass(Register klass, Register index, Register inline_klass); 653 654 void population_count(Register dst, Register src, Register scratch1, Register scratch2); 655 656 // interface method calling 657 void lookup_interface_method(Register recv_klass, 658 Register intf_klass, 659 RegisterOrConstant itable_index, 660 Register method_result, 661 Register scan_temp, 662 Label& no_such_interface, 663 bool return_method = true); 664 665 void lookup_interface_method_stub(Register recv_klass, 666 Register holder_klass, 667 Register resolved_klass, 668 Register method_result, 669 Register scan_temp, 670 Register temp_reg2, 671 Register receiver, 672 int itable_index, 673 Label& L_no_such_interface); 674 675 // virtual method calling 676 void lookup_virtual_method(Register recv_klass, 677 RegisterOrConstant vtable_index, 678 Register method_result); 679 680 // Test sub_klass against super_klass, with fast and slow paths. 681 682 // The fast path produces a tri-state answer: yes / no / maybe-slow. 683 // One of the three labels can be null, meaning take the fall-through. 684 // If super_check_offset is -1, the value is loaded up from super_klass. 685 // No registers are killed, except temp_reg. 686 void check_klass_subtype_fast_path(Register sub_klass, 687 Register super_klass, 688 Register temp_reg, 689 Label* L_success, 690 Label* L_failure, 691 Label* L_slow_path, 692 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 693 694 // The rest of the type check; must be wired to a corresponding fast path. 695 // It does not repeat the fast path logic, so don't use it standalone. 696 // The temp_reg and temp2_reg can be noreg, if no temps are available. 697 // Updates the sub's secondary super cache as necessary. 698 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 699 void check_klass_subtype_slow_path(Register sub_klass, 700 Register super_klass, 701 Register temp_reg, 702 Register temp2_reg, 703 Label* L_success, 704 Label* L_failure, 705 bool set_cond_codes = false); 706 void hashed_check_klass_subtype_slow_path(Register sub_klass, 707 Register super_klass, 708 Register temp_reg, 709 Register temp2_reg, 710 Label* L_success, 711 Label* L_failure, 712 bool set_cond_codes = false); 713 714 // As above, but with a constant super_klass. 715 // The result is in Register result, not the condition codes. 716 void lookup_secondary_supers_table(Register sub_klass, 717 Register super_klass, 718 Register temp1, 719 Register temp2, 720 Register temp3, 721 Register temp4, 722 Register result, 723 u1 super_klass_slot); 724 725 void lookup_secondary_supers_table_slow_path(Register r_super_klass, 726 Register r_array_base, 727 Register r_array_index, 728 Register r_bitmap, 729 Register temp1, 730 Register temp2, 731 Label* L_success, 732 Label* L_failure = nullptr); 733 734 void verify_secondary_supers_table(Register r_sub_klass, 735 Register r_super_klass, 736 Register expected, 737 Register temp1, 738 Register temp2, 739 Register temp3); 740 741 void repne_scanq(Register addr, Register value, Register count, Register limit, 742 Label* L_success, 743 Label* L_failure = nullptr); 744 745 // Simplified, combined version, good for typical uses. 746 // Falls through on failure. 747 void check_klass_subtype(Register sub_klass, 748 Register super_klass, 749 Register temp_reg, 750 Label& L_success); 751 752 void clinit_barrier(Register klass, 753 Register thread, 754 Label* L_fast_path = nullptr, 755 Label* L_slow_path = nullptr); 756 757 // method handles (JSR 292) 758 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 759 760 // Debugging 761 762 // only if +VerifyOops 763 void _verify_oop(Register reg, const char* s, const char* file, int line); 764 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 765 766 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 767 if (VerifyOops) { 768 _verify_oop(reg, s, file, line); 769 } 770 } 771 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 772 if (VerifyOops) { 773 _verify_oop_addr(reg, s, file, line); 774 } 775 } 776 777 // TODO: verify method and klass metadata (compare against vptr?) 778 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 779 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 780 781 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 782 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 783 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 784 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 785 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 786 787 // Verify or restore cpu control state after JNI call 788 void restore_cpu_control_state_after_jni(Register rscratch); 789 790 // prints msg, dumps registers and stops execution 791 void stop(const char* msg); 792 793 // prints msg and continues 794 void warn(const char* msg); 795 796 // dumps registers and other state 797 void print_state(); 798 799 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 800 static void debug64(char* msg, int64_t pc, int64_t regs[]); 801 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 802 static void print_state64(int64_t pc, int64_t regs[]); 803 804 void os_breakpoint(); 805 806 void untested() { stop("untested"); } 807 808 void unimplemented(const char* what = ""); 809 810 void should_not_reach_here() { stop("should not reach here"); } 811 812 void print_CPU_state(); 813 814 // Stack overflow checking 815 void bang_stack_with_offset(int offset) { 816 // stack grows down, caller passes positive offset 817 assert(offset > 0, "must bang with negative offset"); 818 movl(Address(rsp, (-offset)), rax); 819 } 820 821 // Writes to stack successive pages until offset reached to check for 822 // stack overflow + shadow pages. Also, clobbers tmp 823 void bang_stack_size(Register size, Register tmp); 824 825 // Check for reserved stack access in method being exited (for JIT) 826 void reserved_stack_check(); 827 828 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 829 830 void verify_tlab(); 831 832 static Condition negate_condition(Condition cond); 833 834 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 835 // operands. In general the names are modified to avoid hiding the instruction in Assembler 836 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 837 // here in MacroAssembler. The major exception to this rule is call 838 839 // Arithmetics 840 841 842 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 843 void addptr(Address dst, Register src); 844 845 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 846 void addptr(Register dst, int32_t src); 847 void addptr(Register dst, Register src); 848 void addptr(Register dst, RegisterOrConstant src) { 849 if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant())); 850 else addptr(dst, src.as_register()); 851 } 852 853 void andptr(Register dst, int32_t src); 854 void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 855 void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; } 856 857 #ifdef _LP64 858 using Assembler::andq; 859 void andq(Register dst, AddressLiteral src, Register rscratch = noreg); 860 #endif 861 862 void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg); 863 864 // renamed to drag out the casting of address to int32_t/intptr_t 865 void cmp32(Register src1, int32_t imm); 866 867 void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg); 868 // compare reg - mem, or reg - &mem 869 void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg); 870 871 void cmp32(Register src1, Address src2); 872 873 #ifndef _LP64 874 void cmpklass(Address dst, Metadata* obj); 875 void cmpklass(Register dst, Metadata* obj); 876 void cmpoop(Address dst, jobject obj); 877 #endif // _LP64 878 879 void cmpoop(Register src1, Register src2); 880 void cmpoop(Register src1, Address src2); 881 void cmpoop(Register dst, jobject obj, Register rscratch); 882 883 // NOTE src2 must be the lval. This is NOT an mem-mem compare 884 void cmpptr(Address src1, AddressLiteral src2, Register rscratch); 885 886 void cmpptr(Register src1, AddressLiteral src2, Register rscratch = noreg); 887 888 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 889 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 890 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 891 892 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 893 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 894 895 // cmp64 to avoild hiding cmpq 896 void cmp64(Register src1, AddressLiteral src, Register rscratch = noreg); 897 898 void cmpxchgptr(Register reg, Address adr); 899 900 void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg); 901 902 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 903 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 904 905 906 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 907 908 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 909 910 void shlptr(Register dst, int32_t shift); 911 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 912 913 void shrptr(Register dst, int32_t shift); 914 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 915 916 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 917 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 918 919 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 920 921 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 922 void subptr(Register dst, int32_t src); 923 // Force generation of a 4 byte immediate value even if it fits into 8bit 924 void subptr_imm32(Register dst, int32_t src); 925 void subptr(Register dst, Register src); 926 void subptr(Register dst, RegisterOrConstant src) { 927 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 928 else subptr(dst, src.as_register()); 929 } 930 931 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 932 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 933 934 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 935 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 936 937 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 938 939 940 941 // Helper functions for statistics gathering. 942 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 943 void cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch = noreg); 944 // Unconditional atomic increment. 945 void atomic_incl(Address counter_addr); 946 void atomic_incl(AddressLiteral counter_addr, Register rscratch = noreg); 947 #ifdef _LP64 948 void atomic_incq(Address counter_addr); 949 void atomic_incq(AddressLiteral counter_addr, Register rscratch = noreg); 950 #endif 951 void atomic_incptr(AddressLiteral counter_addr, Register rscratch = noreg) { LP64_ONLY(atomic_incq(counter_addr, rscratch)) NOT_LP64(atomic_incl(counter_addr, rscratch)) ; } 952 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 953 954 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 955 void lea(Register dst, AddressLiteral adr); 956 void lea(Address dst, AddressLiteral adr, Register rscratch); 957 958 void leal32(Register dst, Address src) { leal(dst, src); } 959 960 // Import other testl() methods from the parent class or else 961 // they will be hidden by the following overriding declaration. 962 using Assembler::testl; 963 void testl(Address dst, int32_t imm32); 964 void testl(Register dst, int32_t imm32); 965 void testl(Register dst, AddressLiteral src); // requires reachable address 966 using Assembler::testq; 967 void testq(Address dst, int32_t imm32); 968 void testq(Register dst, int32_t imm32); 969 970 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 971 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 972 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 973 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 974 975 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 976 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 977 void testptr(Address src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 978 void testptr(Register src1, Register src2); 979 980 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 981 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 982 983 // Calls 984 985 void call(Label& L, relocInfo::relocType rtype); 986 void call(Register entry); 987 void call(Address addr) { Assembler::call(addr); } 988 989 // NOTE: this call transfers to the effective address of entry NOT 990 // the address contained by entry. This is because this is more natural 991 // for jumps/calls. 992 void call(AddressLiteral entry, Register rscratch = rax); 993 994 // Emit the CompiledIC call idiom 995 void ic_call(address entry, jint method_index = 0); 996 static int ic_check_size(); 997 int ic_check(int end_alignment); 998 999 void emit_static_call_stub(); 1000 1001 // Jumps 1002 1003 // NOTE: these jumps transfer to the effective address of dst NOT 1004 // the address contained by dst. This is because this is more natural 1005 // for jumps/calls. 1006 void jump(AddressLiteral dst, Register rscratch = noreg); 1007 1008 void jump_cc(Condition cc, AddressLiteral dst, Register rscratch = noreg); 1009 1010 // 32bit can do a case table jump in one instruction but we no longer allow the base 1011 // to be installed in the Address class. This jump will transfer to the address 1012 // contained in the location described by entry (not the address of entry) 1013 void jump(ArrayAddress entry, Register rscratch); 1014 1015 // Adding more natural conditional jump instructions 1016 void ALWAYSINLINE jo(Label& L, bool maybe_short = true) { jcc(Assembler::overflow, L, maybe_short); } 1017 void ALWAYSINLINE jno(Label& L, bool maybe_short = true) { jcc(Assembler::noOverflow, L, maybe_short); } 1018 void ALWAYSINLINE js(Label& L, bool maybe_short = true) { jcc(Assembler::negative, L, maybe_short); } 1019 void ALWAYSINLINE jns(Label& L, bool maybe_short = true) { jcc(Assembler::positive, L, maybe_short); } 1020 void ALWAYSINLINE je(Label& L, bool maybe_short = true) { jcc(Assembler::equal, L, maybe_short); } 1021 void ALWAYSINLINE jz(Label& L, bool maybe_short = true) { jcc(Assembler::zero, L, maybe_short); } 1022 void ALWAYSINLINE jne(Label& L, bool maybe_short = true) { jcc(Assembler::notEqual, L, maybe_short); } 1023 void ALWAYSINLINE jnz(Label& L, bool maybe_short = true) { jcc(Assembler::notZero, L, maybe_short); } 1024 void ALWAYSINLINE jb(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1025 void ALWAYSINLINE jnae(Label& L, bool maybe_short = true) { jcc(Assembler::below, L, maybe_short); } 1026 void ALWAYSINLINE jc(Label& L, bool maybe_short = true) { jcc(Assembler::carrySet, L, maybe_short); } 1027 void ALWAYSINLINE jnb(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1028 void ALWAYSINLINE jae(Label& L, bool maybe_short = true) { jcc(Assembler::aboveEqual, L, maybe_short); } 1029 void ALWAYSINLINE jnc(Label& L, bool maybe_short = true) { jcc(Assembler::carryClear, L, maybe_short); } 1030 void ALWAYSINLINE jbe(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1031 void ALWAYSINLINE jna(Label& L, bool maybe_short = true) { jcc(Assembler::belowEqual, L, maybe_short); } 1032 void ALWAYSINLINE ja(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1033 void ALWAYSINLINE jnbe(Label& L, bool maybe_short = true) { jcc(Assembler::above, L, maybe_short); } 1034 void ALWAYSINLINE jl(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1035 void ALWAYSINLINE jnge(Label& L, bool maybe_short = true) { jcc(Assembler::less, L, maybe_short); } 1036 void ALWAYSINLINE jge(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1037 void ALWAYSINLINE jnl(Label& L, bool maybe_short = true) { jcc(Assembler::greaterEqual, L, maybe_short); } 1038 void ALWAYSINLINE jle(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1039 void ALWAYSINLINE jng(Label& L, bool maybe_short = true) { jcc(Assembler::lessEqual, L, maybe_short); } 1040 void ALWAYSINLINE jg(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1041 void ALWAYSINLINE jnle(Label& L, bool maybe_short = true) { jcc(Assembler::greater, L, maybe_short); } 1042 void ALWAYSINLINE jp(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1043 void ALWAYSINLINE jpe(Label& L, bool maybe_short = true) { jcc(Assembler::parity, L, maybe_short); } 1044 void ALWAYSINLINE jnp(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1045 void ALWAYSINLINE jpo(Label& L, bool maybe_short = true) { jcc(Assembler::noParity, L, maybe_short); } 1046 // * No condition for this * void ALWAYSINLINE jcxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1047 // * No condition for this * void ALWAYSINLINE jecxz(Label& L, bool maybe_short = true) { jcc(Assembler::cxz, L, maybe_short); } 1048 1049 // Short versions of the above 1050 void ALWAYSINLINE jo_b(Label& L) { jccb(Assembler::overflow, L); } 1051 void ALWAYSINLINE jno_b(Label& L) { jccb(Assembler::noOverflow, L); } 1052 void ALWAYSINLINE js_b(Label& L) { jccb(Assembler::negative, L); } 1053 void ALWAYSINLINE jns_b(Label& L) { jccb(Assembler::positive, L); } 1054 void ALWAYSINLINE je_b(Label& L) { jccb(Assembler::equal, L); } 1055 void ALWAYSINLINE jz_b(Label& L) { jccb(Assembler::zero, L); } 1056 void ALWAYSINLINE jne_b(Label& L) { jccb(Assembler::notEqual, L); } 1057 void ALWAYSINLINE jnz_b(Label& L) { jccb(Assembler::notZero, L); } 1058 void ALWAYSINLINE jb_b(Label& L) { jccb(Assembler::below, L); } 1059 void ALWAYSINLINE jnae_b(Label& L) { jccb(Assembler::below, L); } 1060 void ALWAYSINLINE jc_b(Label& L) { jccb(Assembler::carrySet, L); } 1061 void ALWAYSINLINE jnb_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1062 void ALWAYSINLINE jae_b(Label& L) { jccb(Assembler::aboveEqual, L); } 1063 void ALWAYSINLINE jnc_b(Label& L) { jccb(Assembler::carryClear, L); } 1064 void ALWAYSINLINE jbe_b(Label& L) { jccb(Assembler::belowEqual, L); } 1065 void ALWAYSINLINE jna_b(Label& L) { jccb(Assembler::belowEqual, L); } 1066 void ALWAYSINLINE ja_b(Label& L) { jccb(Assembler::above, L); } 1067 void ALWAYSINLINE jnbe_b(Label& L) { jccb(Assembler::above, L); } 1068 void ALWAYSINLINE jl_b(Label& L) { jccb(Assembler::less, L); } 1069 void ALWAYSINLINE jnge_b(Label& L) { jccb(Assembler::less, L); } 1070 void ALWAYSINLINE jge_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1071 void ALWAYSINLINE jnl_b(Label& L) { jccb(Assembler::greaterEqual, L); } 1072 void ALWAYSINLINE jle_b(Label& L) { jccb(Assembler::lessEqual, L); } 1073 void ALWAYSINLINE jng_b(Label& L) { jccb(Assembler::lessEqual, L); } 1074 void ALWAYSINLINE jg_b(Label& L) { jccb(Assembler::greater, L); } 1075 void ALWAYSINLINE jnle_b(Label& L) { jccb(Assembler::greater, L); } 1076 void ALWAYSINLINE jp_b(Label& L) { jccb(Assembler::parity, L); } 1077 void ALWAYSINLINE jpe_b(Label& L) { jccb(Assembler::parity, L); } 1078 void ALWAYSINLINE jnp_b(Label& L) { jccb(Assembler::noParity, L); } 1079 void ALWAYSINLINE jpo_b(Label& L) { jccb(Assembler::noParity, L); } 1080 // * No condition for this * void ALWAYSINLINE jcxz_b(Label& L) { jccb(Assembler::cxz, L); } 1081 // * No condition for this * void ALWAYSINLINE jecxz_b(Label& L) { jccb(Assembler::cxz, L); } 1082 1083 // Floating 1084 1085 void push_f(XMMRegister r); 1086 void pop_f(XMMRegister r); 1087 void push_d(XMMRegister r); 1088 void pop_d(XMMRegister r); 1089 1090 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 1091 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 1092 void andpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1093 1094 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 1095 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 1096 void andps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1097 1098 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 1099 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 1100 void comiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1101 1102 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 1103 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 1104 void comisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1105 1106 #ifndef _LP64 1107 void fadd_s(Address src) { Assembler::fadd_s(src); } 1108 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 1109 1110 void fldcw(Address src) { Assembler::fldcw(src); } 1111 void fldcw(AddressLiteral src); 1112 1113 void fld_s(int index) { Assembler::fld_s(index); } 1114 void fld_s(Address src) { Assembler::fld_s(src); } 1115 void fld_s(AddressLiteral src); 1116 1117 void fld_d(Address src) { Assembler::fld_d(src); } 1118 void fld_d(AddressLiteral src); 1119 1120 void fld_x(Address src) { Assembler::fld_x(src); } 1121 void fld_x(AddressLiteral src) { Assembler::fld_x(as_Address(src)); } 1122 1123 void fmul_s(Address src) { Assembler::fmul_s(src); } 1124 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 1125 #endif // !_LP64 1126 1127 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 1128 void ldmxcsr(AddressLiteral src, Register rscratch = noreg); 1129 1130 #ifdef _LP64 1131 private: 1132 void sha256_AVX2_one_round_compute( 1133 Register reg_old_h, 1134 Register reg_a, 1135 Register reg_b, 1136 Register reg_c, 1137 Register reg_d, 1138 Register reg_e, 1139 Register reg_f, 1140 Register reg_g, 1141 Register reg_h, 1142 int iter); 1143 void sha256_AVX2_four_rounds_compute_first(int start); 1144 void sha256_AVX2_four_rounds_compute_last(int start); 1145 void sha256_AVX2_one_round_and_sched( 1146 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 1147 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 1148 XMMRegister xmm_2, /* ymm6 */ 1149 XMMRegister xmm_3, /* ymm7 */ 1150 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 1151 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 1152 Register reg_c, /* edi */ 1153 Register reg_d, /* esi */ 1154 Register reg_e, /* r8d */ 1155 Register reg_f, /* r9d */ 1156 Register reg_g, /* r10d */ 1157 Register reg_h, /* r11d */ 1158 int iter); 1159 1160 void addm(int disp, Register r1, Register r2); 1161 1162 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1163 Register e, Register f, Register g, Register h, int iteration); 1164 1165 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1166 Register a, Register b, Register c, Register d, Register e, Register f, 1167 Register g, Register h, int iteration); 1168 1169 void addmq(int disp, Register r1, Register r2); 1170 public: 1171 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1172 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1173 Register buf, Register state, Register ofs, Register limit, Register rsp, 1174 bool multi_block, XMMRegister shuf_mask); 1175 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1176 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1177 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1178 XMMRegister shuf_mask); 1179 #endif // _LP64 1180 1181 void fast_md5(Register buf, Address state, Address ofs, Address limit, 1182 bool multi_block); 1183 1184 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1185 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1186 Register buf, Register state, Register ofs, Register limit, Register rsp, 1187 bool multi_block); 1188 1189 #ifdef _LP64 1190 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1191 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1192 Register buf, Register state, Register ofs, Register limit, Register rsp, 1193 bool multi_block, XMMRegister shuf_mask); 1194 #else 1195 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1196 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1197 Register buf, Register state, Register ofs, Register limit, Register rsp, 1198 bool multi_block); 1199 #endif 1200 1201 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1202 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1203 Register rax, Register rcx, Register rdx, Register tmp); 1204 1205 #ifndef _LP64 1206 private: 1207 // Initialized in macroAssembler_x86_constants.cpp 1208 static address ONES; 1209 static address L_2IL0FLOATPACKET_0; 1210 static address PI4_INV; 1211 static address PI4X3; 1212 static address PI4X4; 1213 1214 public: 1215 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1216 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1217 Register rax, Register rcx, Register rdx, Register tmp1); 1218 1219 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1220 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1221 Register rax, Register rcx, Register rdx, Register tmp); 1222 1223 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1224 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1225 Register rdx, Register tmp); 1226 1227 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1228 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1229 Register rax, Register rbx, Register rdx); 1230 1231 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1232 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1233 Register rax, Register rcx, Register rdx, Register tmp); 1234 1235 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1236 Register edx, Register ebx, Register esi, Register edi, 1237 Register ebp, Register esp); 1238 1239 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1240 Register esi, Register edi, Register ebp, Register esp); 1241 1242 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1243 Register edx, Register ebx, Register esi, Register edi, 1244 Register ebp, Register esp); 1245 1246 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1247 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1248 Register rax, Register rcx, Register rdx, Register tmp); 1249 #endif // !_LP64 1250 1251 private: 1252 1253 // these are private because users should be doing movflt/movdbl 1254 1255 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1256 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1257 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1258 void movss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1259 1260 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1261 void movlpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1262 1263 public: 1264 1265 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1266 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1267 void addsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1268 1269 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1270 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1271 void addss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1272 1273 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1274 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1275 void addpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1276 1277 using Assembler::vbroadcastsd; 1278 void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1279 1280 using Assembler::vbroadcastss; 1281 void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1282 1283 // Vector float blend 1284 void vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1285 void vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg); 1286 1287 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1288 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1289 void divsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1290 1291 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1292 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1293 void divss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1294 1295 // Move Unaligned Double Quadword 1296 void movdqu(Address dst, XMMRegister src); 1297 void movdqu(XMMRegister dst, XMMRegister src); 1298 void movdqu(XMMRegister dst, Address src); 1299 void movdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1300 1301 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1302 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1303 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1304 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1305 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1306 void kmovwl(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1307 1308 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1309 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1310 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1311 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1312 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1313 void kmovql(KRegister dst, AddressLiteral src, Register rscratch = noreg); 1314 1315 // Safe move operation, lowers down to 16bit moves for targets supporting 1316 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1317 void kmov(Address dst, KRegister src); 1318 void kmov(KRegister dst, Address src); 1319 void kmov(KRegister dst, KRegister src); 1320 void kmov(Register dst, KRegister src); 1321 void kmov(KRegister dst, Register src); 1322 1323 using Assembler::movddup; 1324 void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1325 1326 using Assembler::vmovddup; 1327 void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1328 1329 // AVX Unaligned forms 1330 void vmovdqu(Address dst, XMMRegister src); 1331 void vmovdqu(XMMRegister dst, Address src); 1332 void vmovdqu(XMMRegister dst, XMMRegister src); 1333 void vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1334 void vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1335 1336 // AVX512 Unaligned 1337 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len); 1338 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len); 1339 1340 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1341 void evmovdqub(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqub(dst, src, vector_len); } 1342 1343 void evmovdqub(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1344 if (dst->encoding() != src->encoding() || mask != k0) { 1345 Assembler::evmovdqub(dst, mask, src, merge, vector_len); 1346 } 1347 } 1348 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1349 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1350 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1351 1352 void evmovdquw(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1353 void evmovdquw(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquw(dst, src, vector_len); } 1354 1355 void evmovdquw(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1356 if (dst->encoding() != src->encoding() || mask != k0) { 1357 Assembler::evmovdquw(dst, mask, src, merge, vector_len); 1358 } 1359 } 1360 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1361 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1362 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1363 1364 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1365 if (dst->encoding() != src->encoding()) { 1366 Assembler::evmovdqul(dst, src, vector_len); 1367 } 1368 } 1369 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1370 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1371 1372 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1373 if (dst->encoding() != src->encoding() || mask != k0) { 1374 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1375 } 1376 } 1377 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1378 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1379 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1380 1381 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1382 if (dst->encoding() != src->encoding()) { 1383 Assembler::evmovdquq(dst, src, vector_len); 1384 } 1385 } 1386 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1387 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1388 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1389 1390 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1391 if (dst->encoding() != src->encoding() || mask != k0) { 1392 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1393 } 1394 } 1395 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1396 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1397 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1398 1399 // Move Aligned Double Quadword 1400 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1401 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1402 void movdqa(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1403 1404 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1405 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1406 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1407 void movsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1408 1409 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1410 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1411 void mulpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1412 1413 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1414 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1415 void mulsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1416 1417 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1418 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1419 void mulss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1420 1421 // Carry-Less Multiplication Quadword 1422 void pclmulldq(XMMRegister dst, XMMRegister src) { 1423 // 0x00 - multiply lower 64 bits [0:63] 1424 Assembler::pclmulqdq(dst, src, 0x00); 1425 } 1426 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1427 // 0x11 - multiply upper 64 bits [64:127] 1428 Assembler::pclmulqdq(dst, src, 0x11); 1429 } 1430 1431 void pcmpeqb(XMMRegister dst, XMMRegister src); 1432 void pcmpeqw(XMMRegister dst, XMMRegister src); 1433 1434 void pcmpestri(XMMRegister dst, Address src, int imm8); 1435 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1436 1437 void pmovzxbw(XMMRegister dst, XMMRegister src); 1438 void pmovzxbw(XMMRegister dst, Address src); 1439 1440 void pmovmskb(Register dst, XMMRegister src); 1441 1442 void ptest(XMMRegister dst, XMMRegister src); 1443 1444 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1445 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1446 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch = noreg); 1447 1448 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1449 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1450 void sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1451 1452 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1453 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1454 void subsd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1455 1456 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1457 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1458 void subss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1459 1460 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1461 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1462 void ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1463 1464 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1465 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1466 void ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1467 1468 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1469 void xorpd(XMMRegister dst, XMMRegister src); 1470 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1471 void xorpd(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1472 1473 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1474 void xorps(XMMRegister dst, XMMRegister src); 1475 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1476 void xorps(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1477 1478 // Shuffle Bytes 1479 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1480 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1481 void pshufb(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 1482 // AVX 3-operands instructions 1483 1484 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1485 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1486 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1487 1488 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1489 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1490 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1491 1492 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1493 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch = noreg); 1494 1495 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1496 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1497 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1498 1499 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1500 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1501 1502 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1503 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1504 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1505 1506 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1507 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1508 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1509 1510 using Assembler::vpbroadcastd; 1511 void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1512 1513 using Assembler::vpbroadcastq; 1514 void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = noreg); 1515 1516 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1517 void vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len); 1518 1519 void vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1520 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1521 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1522 1523 // Vector compares 1524 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1525 Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); 1526 } 1527 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1528 1529 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1530 Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); 1531 } 1532 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1533 1534 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1535 Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); 1536 } 1537 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1538 1539 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, int comparison, bool is_signed, int vector_len) { 1540 Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); 1541 } 1542 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int comparison, bool is_signed, int vector_len, Register rscratch = noreg); 1543 1544 void evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len); 1545 1546 // Emit comparison instruction for the specified comparison predicate. 1547 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len); 1548 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1549 1550 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1551 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1552 1553 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1554 1555 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1556 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1557 1558 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1559 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpmulld(dst, nds, src, vector_len); } 1560 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1561 1562 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1563 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1564 1565 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1566 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1567 1568 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1569 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1570 1571 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1572 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1573 1574 void evpsllw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1575 if (!is_varshift) { 1576 Assembler::evpsllw(dst, mask, nds, src, merge, vector_len); 1577 } else { 1578 Assembler::evpsllvw(dst, mask, nds, src, merge, vector_len); 1579 } 1580 } 1581 void evpslld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1582 if (!is_varshift) { 1583 Assembler::evpslld(dst, mask, nds, src, merge, vector_len); 1584 } else { 1585 Assembler::evpsllvd(dst, mask, nds, src, merge, vector_len); 1586 } 1587 } 1588 void evpsllq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1589 if (!is_varshift) { 1590 Assembler::evpsllq(dst, mask, nds, src, merge, vector_len); 1591 } else { 1592 Assembler::evpsllvq(dst, mask, nds, src, merge, vector_len); 1593 } 1594 } 1595 void evpsrlw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1596 if (!is_varshift) { 1597 Assembler::evpsrlw(dst, mask, nds, src, merge, vector_len); 1598 } else { 1599 Assembler::evpsrlvw(dst, mask, nds, src, merge, vector_len); 1600 } 1601 } 1602 void evpsrld(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1603 if (!is_varshift) { 1604 Assembler::evpsrld(dst, mask, nds, src, merge, vector_len); 1605 } else { 1606 Assembler::evpsrlvd(dst, mask, nds, src, merge, vector_len); 1607 } 1608 } 1609 1610 using Assembler::evpsrlq; 1611 void evpsrlq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1612 if (!is_varshift) { 1613 Assembler::evpsrlq(dst, mask, nds, src, merge, vector_len); 1614 } else { 1615 Assembler::evpsrlvq(dst, mask, nds, src, merge, vector_len); 1616 } 1617 } 1618 void evpsraw(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1619 if (!is_varshift) { 1620 Assembler::evpsraw(dst, mask, nds, src, merge, vector_len); 1621 } else { 1622 Assembler::evpsravw(dst, mask, nds, src, merge, vector_len); 1623 } 1624 } 1625 void evpsrad(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1626 if (!is_varshift) { 1627 Assembler::evpsrad(dst, mask, nds, src, merge, vector_len); 1628 } else { 1629 Assembler::evpsravd(dst, mask, nds, src, merge, vector_len); 1630 } 1631 } 1632 using Assembler::evpsraq; 1633 void evpsraq(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len, bool is_varshift) { 1634 if (!is_varshift) { 1635 Assembler::evpsraq(dst, mask, nds, src, merge, vector_len); 1636 } else { 1637 Assembler::evpsravq(dst, mask, nds, src, merge, vector_len); 1638 } 1639 } 1640 1641 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1642 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1643 void evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1644 void evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1645 1646 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1647 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1648 1649 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1650 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1651 1652 void vptest(XMMRegister dst, XMMRegister src); 1653 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1654 1655 void punpcklbw(XMMRegister dst, XMMRegister src); 1656 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1657 1658 void pshufd(XMMRegister dst, Address src, int mode); 1659 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1660 1661 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1662 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1663 1664 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1665 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1666 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1667 1668 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1669 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1670 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1671 1672 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1673 1674 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1675 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1676 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1677 1678 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1679 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1680 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1681 1682 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1683 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1684 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1685 1686 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1687 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1688 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1689 1690 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1691 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1692 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1693 1694 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1695 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1696 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1697 1698 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1699 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch = noreg); 1700 1701 // AVX Vector instructions 1702 1703 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1704 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1705 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1706 1707 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1708 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1709 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1710 1711 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1712 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1713 Assembler::vpxor(dst, nds, src, vector_len); 1714 else 1715 Assembler::vxorpd(dst, nds, src, vector_len); 1716 } 1717 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1718 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1719 Assembler::vpxor(dst, nds, src, vector_len); 1720 else 1721 Assembler::vxorpd(dst, nds, src, vector_len); 1722 } 1723 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1724 1725 // Simple version for AVX2 256bit vectors 1726 void vpxor(XMMRegister dst, XMMRegister src) { 1727 assert(UseAVX >= 2, "Should be at least AVX2"); 1728 Assembler::vpxor(dst, dst, src, AVX_256bit); 1729 } 1730 void vpxor(XMMRegister dst, Address src) { 1731 assert(UseAVX >= 2, "Should be at least AVX2"); 1732 Assembler::vpxor(dst, dst, src, AVX_256bit); 1733 } 1734 1735 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1736 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1737 1738 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1739 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1740 Assembler::vinserti32x4(dst, nds, src, imm8); 1741 } else if (UseAVX > 1) { 1742 // vinserti128 is available only in AVX2 1743 Assembler::vinserti128(dst, nds, src, imm8); 1744 } else { 1745 Assembler::vinsertf128(dst, nds, src, imm8); 1746 } 1747 } 1748 1749 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1750 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1751 Assembler::vinserti32x4(dst, nds, src, imm8); 1752 } else if (UseAVX > 1) { 1753 // vinserti128 is available only in AVX2 1754 Assembler::vinserti128(dst, nds, src, imm8); 1755 } else { 1756 Assembler::vinsertf128(dst, nds, src, imm8); 1757 } 1758 } 1759 1760 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1761 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1762 Assembler::vextracti32x4(dst, src, imm8); 1763 } else if (UseAVX > 1) { 1764 // vextracti128 is available only in AVX2 1765 Assembler::vextracti128(dst, src, imm8); 1766 } else { 1767 Assembler::vextractf128(dst, src, imm8); 1768 } 1769 } 1770 1771 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1772 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1773 Assembler::vextracti32x4(dst, src, imm8); 1774 } else if (UseAVX > 1) { 1775 // vextracti128 is available only in AVX2 1776 Assembler::vextracti128(dst, src, imm8); 1777 } else { 1778 Assembler::vextractf128(dst, src, imm8); 1779 } 1780 } 1781 1782 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1783 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1784 vinserti128(dst, dst, src, 1); 1785 } 1786 void vinserti128_high(XMMRegister dst, Address src) { 1787 vinserti128(dst, dst, src, 1); 1788 } 1789 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1790 vextracti128(dst, src, 1); 1791 } 1792 void vextracti128_high(Address dst, XMMRegister src) { 1793 vextracti128(dst, src, 1); 1794 } 1795 1796 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1797 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1798 Assembler::vinsertf32x4(dst, dst, src, 1); 1799 } else { 1800 Assembler::vinsertf128(dst, dst, src, 1); 1801 } 1802 } 1803 1804 void vinsertf128_high(XMMRegister dst, Address src) { 1805 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1806 Assembler::vinsertf32x4(dst, dst, src, 1); 1807 } else { 1808 Assembler::vinsertf128(dst, dst, src, 1); 1809 } 1810 } 1811 1812 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1813 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1814 Assembler::vextractf32x4(dst, src, 1); 1815 } else { 1816 Assembler::vextractf128(dst, src, 1); 1817 } 1818 } 1819 1820 void vextractf128_high(Address dst, XMMRegister src) { 1821 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1822 Assembler::vextractf32x4(dst, src, 1); 1823 } else { 1824 Assembler::vextractf128(dst, src, 1); 1825 } 1826 } 1827 1828 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1829 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1830 Assembler::vinserti64x4(dst, dst, src, 1); 1831 } 1832 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1833 Assembler::vinsertf64x4(dst, dst, src, 1); 1834 } 1835 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1836 Assembler::vextracti64x4(dst, src, 1); 1837 } 1838 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1839 Assembler::vextractf64x4(dst, src, 1); 1840 } 1841 void vextractf64x4_high(Address dst, XMMRegister src) { 1842 Assembler::vextractf64x4(dst, src, 1); 1843 } 1844 void vinsertf64x4_high(XMMRegister dst, Address src) { 1845 Assembler::vinsertf64x4(dst, dst, src, 1); 1846 } 1847 1848 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1849 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1850 vinserti128(dst, dst, src, 0); 1851 } 1852 void vinserti128_low(XMMRegister dst, Address src) { 1853 vinserti128(dst, dst, src, 0); 1854 } 1855 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1856 vextracti128(dst, src, 0); 1857 } 1858 void vextracti128_low(Address dst, XMMRegister src) { 1859 vextracti128(dst, src, 0); 1860 } 1861 1862 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1863 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1864 Assembler::vinsertf32x4(dst, dst, src, 0); 1865 } else { 1866 Assembler::vinsertf128(dst, dst, src, 0); 1867 } 1868 } 1869 1870 void vinsertf128_low(XMMRegister dst, Address src) { 1871 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1872 Assembler::vinsertf32x4(dst, dst, src, 0); 1873 } else { 1874 Assembler::vinsertf128(dst, dst, src, 0); 1875 } 1876 } 1877 1878 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1879 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1880 Assembler::vextractf32x4(dst, src, 0); 1881 } else { 1882 Assembler::vextractf128(dst, src, 0); 1883 } 1884 } 1885 1886 void vextractf128_low(Address dst, XMMRegister src) { 1887 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1888 Assembler::vextractf32x4(dst, src, 0); 1889 } else { 1890 Assembler::vextractf128(dst, src, 0); 1891 } 1892 } 1893 1894 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1895 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1896 Assembler::vinserti64x4(dst, dst, src, 0); 1897 } 1898 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1899 Assembler::vinsertf64x4(dst, dst, src, 0); 1900 } 1901 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1902 Assembler::vextracti64x4(dst, src, 0); 1903 } 1904 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1905 Assembler::vextractf64x4(dst, src, 0); 1906 } 1907 void vextractf64x4_low(Address dst, XMMRegister src) { 1908 Assembler::vextractf64x4(dst, src, 0); 1909 } 1910 void vinsertf64x4_low(XMMRegister dst, Address src) { 1911 Assembler::vinsertf64x4(dst, dst, src, 0); 1912 } 1913 1914 // Carry-Less Multiplication Quadword 1915 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1916 // 0x00 - multiply lower 64 bits [0:63] 1917 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1918 } 1919 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1920 // 0x11 - multiply upper 64 bits [64:127] 1921 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1922 } 1923 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1924 // 0x10 - multiply nds[0:63] and src[64:127] 1925 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1926 } 1927 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1928 //0x01 - multiply nds[64:127] and src[0:63] 1929 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1930 } 1931 1932 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1933 // 0x00 - multiply lower 64 bits [0:63] 1934 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1935 } 1936 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1937 // 0x11 - multiply upper 64 bits [64:127] 1938 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1939 } 1940 1941 // AVX-512 mask operations. 1942 void kand(BasicType etype, KRegister dst, KRegister src1, KRegister src2); 1943 void kor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1944 void knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp = knoreg, Register rtmp = noreg); 1945 void kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2); 1946 void kortest(uint masklen, KRegister src1, KRegister src2); 1947 void ktest(uint masklen, KRegister src1, KRegister src2); 1948 1949 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1950 void evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1951 1952 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1953 void evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1954 1955 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1956 void evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1957 1958 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len); 1959 void evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len); 1960 1961 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1962 void evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1963 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc); 1964 void evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc); 1965 1966 using Assembler::evpandq; 1967 void evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1968 1969 using Assembler::evpaddq; 1970 void evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch = noreg); 1971 1972 using Assembler::evporq; 1973 void evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1974 1975 using Assembler::vpshufb; 1976 void vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1977 1978 using Assembler::vpor; 1979 void vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch = noreg); 1980 1981 using Assembler::vpternlogq; 1982 void vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch = noreg); 1983 1984 void cmov32( Condition cc, Register dst, Address src); 1985 void cmov32( Condition cc, Register dst, Register src); 1986 1987 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1988 1989 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1990 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1991 1992 void movoop(Register dst, jobject obj); 1993 void movoop(Address dst, jobject obj, Register rscratch); 1994 1995 void mov_metadata(Register dst, Metadata* obj); 1996 void mov_metadata(Address dst, Metadata* obj, Register rscratch); 1997 1998 void movptr(Register dst, Register src); 1999 void movptr(Register dst, Address src); 2000 void movptr(Register dst, AddressLiteral src); 2001 void movptr(Register dst, ArrayAddress src); 2002 void movptr(Register dst, intptr_t src); 2003 void movptr(Address dst, Register src); 2004 void movptr(Address dst, int32_t imm); 2005 void movptr(Address dst, intptr_t src, Register rscratch); 2006 void movptr(ArrayAddress dst, Register src, Register rscratch); 2007 2008 void movptr(Register dst, RegisterOrConstant src) { 2009 if (src.is_constant()) movptr(dst, src.as_constant()); 2010 else movptr(dst, src.as_register()); 2011 } 2012 2013 2014 // to avoid hiding movl 2015 void mov32(Register dst, AddressLiteral src); 2016 void mov32(AddressLiteral dst, Register src, Register rscratch = noreg); 2017 2018 // Import other mov() methods from the parent class or else 2019 // they will be hidden by the following overriding declaration. 2020 using Assembler::movdl; 2021 void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2022 2023 using Assembler::movq; 2024 void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg); 2025 2026 // Can push value or effective address 2027 void pushptr(AddressLiteral src, Register rscratch); 2028 2029 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2030 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2031 2032 void pushoop(jobject obj, Register rscratch); 2033 void pushklass(Metadata* obj, Register rscratch); 2034 2035 // sign extend as need a l to ptr sized element 2036 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2037 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2038 2039 2040 public: 2041 // Inline type specific methods 2042 #include "asm/macroAssembler_common.hpp" 2043 2044 int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true); 2045 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]); 2046 bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 2047 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 2048 RegState reg_state[]); 2049 bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 2050 VMRegPair* from, int from_count, int& from_index, VMReg to, 2051 RegState reg_state[], Register val_array); 2052 int extend_stack_for_inline_args(int args_on_stack); 2053 void remove_frame(int initial_framesize, bool needs_stack_repair); 2054 VMReg spill_reg_for(VMReg reg); 2055 2056 // clear memory of size 'cnt' qwords, starting at 'base'; 2057 // if 'is_large' is set, do not try to produce short loop 2058 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg); 2059 2060 // clear memory initialization sequence for constant size; 2061 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2062 2063 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 2064 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 2065 2066 // Fill primitive arrays 2067 void generate_fill(BasicType t, bool aligned, 2068 Register to, Register value, Register count, 2069 Register rtmp, XMMRegister xtmp); 2070 2071 void encode_iso_array(Register src, Register dst, Register len, 2072 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2073 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 2074 2075 #ifdef _LP64 2076 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 2077 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 2078 Register y, Register y_idx, Register z, 2079 Register carry, Register product, 2080 Register idx, Register kdx); 2081 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 2082 Register yz_idx, Register idx, 2083 Register carry, Register product, int offset); 2084 void multiply_128_x_128_bmi2_loop(Register y, Register z, 2085 Register carry, Register carry2, 2086 Register idx, Register jdx, 2087 Register yz_idx1, Register yz_idx2, 2088 Register tmp, Register tmp3, Register tmp4); 2089 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 2090 Register yz_idx, Register idx, Register jdx, 2091 Register carry, Register product, 2092 Register carry2); 2093 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 2094 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 2095 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 2096 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2097 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 2098 Register tmp2); 2099 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 2100 Register rdxReg, Register raxReg); 2101 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 2102 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2103 Register tmp3, Register tmp4); 2104 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 2105 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 2106 2107 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 2108 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2109 Register raxReg); 2110 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 2111 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 2112 Register raxReg); 2113 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 2114 Register result, Register tmp1, Register tmp2, 2115 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 2116 #endif 2117 2118 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 2119 void update_byte_crc32(Register crc, Register val, Register table); 2120 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 2121 2122 2123 #ifdef _LP64 2124 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 2125 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 2126 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 2127 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 2128 #endif // _LP64 2129 2130 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 2131 // Note on a naming convention: 2132 // Prefix w = register only used on a Westmere+ architecture 2133 // Prefix n = register only used on a Nehalem architecture 2134 #ifdef _LP64 2135 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2136 Register tmp1, Register tmp2, Register tmp3); 2137 #else 2138 void crc32c_ipl_alg4(Register in_out, uint32_t n, 2139 Register tmp1, Register tmp2, Register tmp3, 2140 XMMRegister xtmp1, XMMRegister xtmp2); 2141 #endif 2142 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 2143 Register in_out, 2144 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 2145 XMMRegister w_xtmp2, 2146 Register tmp1, 2147 Register n_tmp2, Register n_tmp3); 2148 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 2149 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2150 Register tmp1, Register tmp2, 2151 Register n_tmp3); 2152 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 2153 Register in_out1, Register in_out2, Register in_out3, 2154 Register tmp1, Register tmp2, Register tmp3, 2155 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2156 Register tmp4, Register tmp5, 2157 Register n_tmp6); 2158 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 2159 Register tmp1, Register tmp2, Register tmp3, 2160 Register tmp4, Register tmp5, Register tmp6, 2161 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 2162 bool is_pclmulqdq_supported); 2163 // Fold 128-bit data chunk 2164 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 2165 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 2166 #ifdef _LP64 2167 // Fold 512-bit data chunk 2168 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 2169 #endif // _LP64 2170 // Fold 8-bit data 2171 void fold_8bit_crc32(Register crc, Register table, Register tmp); 2172 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 2173 2174 // Compress char[] array to byte[]. 2175 void char_array_compress(Register src, Register dst, Register len, 2176 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 2177 XMMRegister tmp4, Register tmp5, Register result, 2178 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 2179 2180 // Inflate byte[] array to char[]. 2181 void byte_array_inflate(Register src, Register dst, Register len, 2182 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 2183 2184 void fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 2185 Register length, Register temp, int vec_enc); 2186 2187 void fill64_masked(uint shift, Register dst, int disp, 2188 XMMRegister xmm, KRegister mask, Register length, 2189 Register temp, bool use64byteVector = false); 2190 2191 void fill32_masked(uint shift, Register dst, int disp, 2192 XMMRegister xmm, KRegister mask, Register length, 2193 Register temp); 2194 2195 void fill32(Address dst, XMMRegister xmm); 2196 2197 void fill32(Register dst, int disp, XMMRegister xmm); 2198 2199 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 2200 2201 void fill64(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 2202 2203 #ifdef _LP64 2204 void convert_f2i(Register dst, XMMRegister src); 2205 void convert_d2i(Register dst, XMMRegister src); 2206 void convert_f2l(Register dst, XMMRegister src); 2207 void convert_d2l(Register dst, XMMRegister src); 2208 void round_double(Register dst, XMMRegister src, Register rtmp, Register rcx); 2209 void round_float(Register dst, XMMRegister src, Register rtmp, Register rcx); 2210 2211 void cache_wb(Address line); 2212 void cache_wbsync(bool is_pre); 2213 2214 #ifdef COMPILER2_OR_JVMCI 2215 void generate_fill_avx3(BasicType type, Register to, Register value, 2216 Register count, Register rtmp, XMMRegister xtmp); 2217 #endif // COMPILER2_OR_JVMCI 2218 #endif // _LP64 2219 2220 void vallones(XMMRegister dst, int vector_len); 2221 2222 void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg); 2223 2224 void lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2225 void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow); 2226 2227 #ifdef _LP64 2228 void save_legacy_gprs(); 2229 void restore_legacy_gprs(); 2230 #endif 2231 }; 2232 2233 /** 2234 * class SkipIfEqual: 2235 * 2236 * Instantiating this class will result in assembly code being output that will 2237 * jump around any code emitted between the creation of the instance and it's 2238 * automatic destruction at the end of a scope block, depending on the value of 2239 * the flag passed to the constructor, which will be checked at run-time. 2240 */ 2241 class SkipIfEqual { 2242 private: 2243 MacroAssembler* _masm; 2244 Label _label; 2245 2246 public: 2247 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register rscratch); 2248 ~SkipIfEqual(); 2249 }; 2250 2251 #endif // CPU_X86_MACROASSEMBLER_X86_HPP