1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "code/vmreg.inline.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "utilities/macros.hpp" 32 #include "runtime/rtmLocking.hpp" 33 #include "runtime/vm_version.hpp" 34 35 // MacroAssembler extends Assembler by frequently used macros. 36 // 37 // Instructions for which a 'better' code sequence exists depending 38 // on arguments should also go in here. 39 40 class MacroAssembler: public Assembler { 41 friend class LIR_Assembler; 42 friend class Runtime1; // as_Address() 43 44 public: 45 // Support for VM calls 46 // 47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 48 // may customize this version by overriding it for its purposes (e.g., to save/restore 49 // additional registers when doing a VM call). 50 51 virtual void call_VM_leaf_base( 52 address entry_point, // the entry point 53 int number_of_arguments // the number of arguments to pop after the call 54 ); 55 56 protected: 57 // This is the base routine called by the different versions of call_VM. The interpreter 58 // may customize this version by overriding it for its purposes (e.g., to save/restore 59 // additional registers when doing a VM call). 60 // 61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 62 // returns the register which contains the thread upon return. If a thread register has been 63 // specified, the return value will correspond to that register. If no last_java_sp is specified 64 // (noreg) than rsp will be used instead. 65 virtual void call_VM_base( // returns the register containing the thread upon return 66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 67 Register java_thread, // the thread if computed before ; use noreg otherwise 68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 69 address entry_point, // the entry point 70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 71 bool check_exceptions // whether to check for pending exceptions after return 72 ); 73 74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 75 76 // helpers for FPU flag access 77 // tmp is a temporary register, if none is available use noreg 78 void save_rax (Register tmp); 79 void restore_rax(Register tmp); 80 81 public: 82 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 83 84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 85 // The implementation is only non-empty for the InterpreterMacroAssembler, 86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 87 virtual void check_and_handle_popframe(Register java_thread); 88 virtual void check_and_handle_earlyret(Register java_thread); 89 90 Address as_Address(AddressLiteral adr); 91 Address as_Address(ArrayAddress adr); 92 93 // Support for NULL-checks 94 // 95 // Generates code that causes a NULL OS exception if the content of reg is NULL. 96 // If the accessed location is M[reg + offset] and the offset is known, provide the 97 // offset. No explicit code generation is needed if the offset is within a certain 98 // range (0 <= offset <= page_size). 99 100 void null_check(Register reg, int offset = -1); 101 static bool needs_explicit_null_check(intptr_t offset); 102 static bool uses_implicit_null_check(void* address); 103 104 // Required platform-specific helpers for Label::patch_instructions. 105 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 106 void pd_patch_instruction(address branch, address target, const char* file, int line) { 107 unsigned char op = branch[0]; 108 assert(op == 0xE8 /* call */ || 109 op == 0xE9 /* jmp */ || 110 op == 0xEB /* short jmp */ || 111 (op & 0xF0) == 0x70 /* short jcc */ || 112 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 113 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 114 "Invalid opcode at patch point"); 115 116 if (op == 0xEB || (op & 0xF0) == 0x70) { 117 // short offset operators (jmp and jcc) 118 char* disp = (char*) &branch[1]; 119 int imm8 = target - (address) &disp[1]; 120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 121 file == NULL ? "<NULL>" : file, line); 122 *disp = imm8; 123 } else { 124 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 125 int imm32 = target - (address) &disp[1]; 126 *disp = imm32; 127 } 128 } 129 130 // The following 4 methods return the offset of the appropriate move instruction 131 132 // Support for fast byte/short loading with zero extension (depending on particular CPU) 133 int load_unsigned_byte(Register dst, Address src); 134 int load_unsigned_short(Register dst, Address src); 135 136 // Support for fast byte/short loading with sign extension (depending on particular CPU) 137 int load_signed_byte(Register dst, Address src); 138 int load_signed_short(Register dst, Address src); 139 140 // Support for sign-extension (hi:lo = extend_sign(lo)) 141 void extend_sign(Register hi, Register lo); 142 143 // Load and store values by size and signed-ness 144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 146 147 // Support for inc/dec with optimal instruction selection depending on value 148 149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 151 152 void decrementl(Address dst, int value = 1); 153 void decrementl(Register reg, int value = 1); 154 155 void decrementq(Register reg, int value = 1); 156 void decrementq(Address dst, int value = 1); 157 158 void incrementl(Address dst, int value = 1); 159 void incrementl(Register reg, int value = 1); 160 161 void incrementq(Register reg, int value = 1); 162 void incrementq(Address dst, int value = 1); 163 164 // Support optimal SSE move instructions. 165 void movflt(XMMRegister dst, XMMRegister src) { 166 if (dst-> encoding() == src->encoding()) return; 167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 168 else { movss (dst, src); return; } 169 } 170 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 171 void movflt(XMMRegister dst, AddressLiteral src); 172 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 173 174 // Move with zero extension 175 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 176 177 void movdbl(XMMRegister dst, XMMRegister src) { 178 if (dst-> encoding() == src->encoding()) return; 179 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 180 else { movsd (dst, src); return; } 181 } 182 183 void movdbl(XMMRegister dst, AddressLiteral src); 184 185 void movdbl(XMMRegister dst, Address src) { 186 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 187 else { movlpd(dst, src); return; } 188 } 189 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 190 191 void incrementl(AddressLiteral dst); 192 void incrementl(ArrayAddress dst); 193 194 void incrementq(AddressLiteral dst); 195 196 // Alignment 197 void align32(); 198 void align64(); 199 void align(int modulus); 200 void align(int modulus, int target); 201 202 // A 5 byte nop that is safe for patching (see patch_verified_entry) 203 void fat_nop(); 204 205 // Stack frame creation/removal 206 void enter(); 207 void leave(); 208 209 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 210 // The pointer will be loaded into the thread register. 211 void get_thread(Register thread); 212 213 #ifdef _LP64 214 // Support for argument shuffling 215 216 void move32_64(VMRegPair src, VMRegPair dst); 217 void long_move(VMRegPair src, VMRegPair dst); 218 void float_move(VMRegPair src, VMRegPair dst); 219 void double_move(VMRegPair src, VMRegPair dst); 220 void move_ptr(VMRegPair src, VMRegPair dst); 221 void object_move(OopMap* map, 222 int oop_handle_offset, 223 int framesize_in_slots, 224 VMRegPair src, 225 VMRegPair dst, 226 bool is_receiver, 227 int* receiver_offset); 228 #endif // _LP64 229 230 // Support for VM calls 231 // 232 // It is imperative that all calls into the VM are handled via the call_VM macros. 233 // They make sure that the stack linkage is setup correctly. call_VM's correspond 234 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 235 236 237 void call_VM(Register oop_result, 238 address entry_point, 239 bool check_exceptions = true); 240 void call_VM(Register oop_result, 241 address entry_point, 242 Register arg_1, 243 bool check_exceptions = true); 244 void call_VM(Register oop_result, 245 address entry_point, 246 Register arg_1, Register arg_2, 247 bool check_exceptions = true); 248 void call_VM(Register oop_result, 249 address entry_point, 250 Register arg_1, Register arg_2, Register arg_3, 251 bool check_exceptions = true); 252 253 // Overloadings with last_Java_sp 254 void call_VM(Register oop_result, 255 Register last_java_sp, 256 address entry_point, 257 int number_of_arguments = 0, 258 bool check_exceptions = true); 259 void call_VM(Register oop_result, 260 Register last_java_sp, 261 address entry_point, 262 Register arg_1, bool 263 check_exceptions = true); 264 void call_VM(Register oop_result, 265 Register last_java_sp, 266 address entry_point, 267 Register arg_1, Register arg_2, 268 bool check_exceptions = true); 269 void call_VM(Register oop_result, 270 Register last_java_sp, 271 address entry_point, 272 Register arg_1, Register arg_2, Register arg_3, 273 bool check_exceptions = true); 274 275 void get_vm_result (Register oop_result, Register thread); 276 void get_vm_result_2(Register metadata_result, Register thread); 277 278 // These always tightly bind to MacroAssembler::call_VM_base 279 // bypassing the virtual implementation 280 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 281 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 282 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 283 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 284 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 285 286 void call_VM_leaf0(address entry_point); 287 void call_VM_leaf(address entry_point, 288 int number_of_arguments = 0); 289 void call_VM_leaf(address entry_point, 290 Register arg_1); 291 void call_VM_leaf(address entry_point, 292 Register arg_1, Register arg_2); 293 void call_VM_leaf(address entry_point, 294 Register arg_1, Register arg_2, Register arg_3); 295 296 // These always tightly bind to MacroAssembler::call_VM_leaf_base 297 // bypassing the virtual implementation 298 void super_call_VM_leaf(address entry_point); 299 void super_call_VM_leaf(address entry_point, Register arg_1); 300 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 301 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 302 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 303 304 // last Java Frame (fills frame anchor) 305 void set_last_Java_frame(Register thread, 306 Register last_java_sp, 307 Register last_java_fp, 308 address last_java_pc); 309 310 // thread in the default location (r15_thread on 64bit) 311 void set_last_Java_frame(Register last_java_sp, 312 Register last_java_fp, 313 address last_java_pc); 314 315 void reset_last_Java_frame(Register thread, bool clear_fp); 316 317 // thread in the default location (r15_thread on 64bit) 318 void reset_last_Java_frame(bool clear_fp); 319 320 // jobjects 321 void clear_jweak_tag(Register possibly_jweak); 322 void resolve_jobject(Register value, Register thread, Register tmp); 323 324 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 325 void c2bool(Register x); 326 327 // C++ bool manipulation 328 329 void movbool(Register dst, Address src); 330 void movbool(Address dst, bool boolconst); 331 void movbool(Address dst, Register src); 332 void testbool(Register dst); 333 334 void resolve_oop_handle(Register result, Register tmp = rscratch2); 335 void resolve_weak_handle(Register result, Register tmp); 336 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 337 void load_method_holder_cld(Register rresult, Register rmethod); 338 339 void load_method_holder(Register holder, Register method); 340 341 // oop manipulations 342 void load_klass(Register dst, Register src, Register tmp); 343 void store_klass(Register dst, Register src, Register tmp); 344 345 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 346 Register tmp1, Register thread_tmp); 347 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 348 Register tmp1, Register tmp2); 349 350 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 351 Register thread_tmp = noreg, DecoratorSet decorators = 0); 352 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 353 Register thread_tmp = noreg, DecoratorSet decorators = 0); 354 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 355 Register tmp2 = noreg, DecoratorSet decorators = 0); 356 357 // Used for storing NULL. All other oop constants should be 358 // stored using routines that take a jobject. 359 void store_heap_oop_null(Address dst); 360 361 void load_prototype_header(Register dst, Register src, Register tmp); 362 363 #ifdef _LP64 364 void store_klass_gap(Register dst, Register src); 365 366 // This dummy is to prevent a call to store_heap_oop from 367 // converting a zero (like NULL) into a Register by giving 368 // the compiler two choices it can't resolve 369 370 void store_heap_oop(Address dst, void* dummy); 371 372 void encode_heap_oop(Register r); 373 void decode_heap_oop(Register r); 374 void encode_heap_oop_not_null(Register r); 375 void decode_heap_oop_not_null(Register r); 376 void encode_heap_oop_not_null(Register dst, Register src); 377 void decode_heap_oop_not_null(Register dst, Register src); 378 379 void set_narrow_oop(Register dst, jobject obj); 380 void set_narrow_oop(Address dst, jobject obj); 381 void cmp_narrow_oop(Register dst, jobject obj); 382 void cmp_narrow_oop(Address dst, jobject obj); 383 384 void encode_klass_not_null(Register r, Register tmp); 385 void decode_klass_not_null(Register r, Register tmp); 386 void encode_and_move_klass_not_null(Register dst, Register src); 387 void decode_and_move_klass_not_null(Register dst, Register src); 388 void set_narrow_klass(Register dst, Klass* k); 389 void set_narrow_klass(Address dst, Klass* k); 390 void cmp_narrow_klass(Register dst, Klass* k); 391 void cmp_narrow_klass(Address dst, Klass* k); 392 393 // if heap base register is used - reinit it with the correct value 394 void reinit_heapbase(); 395 396 DEBUG_ONLY(void verify_heapbase(const char* msg);) 397 398 #endif // _LP64 399 400 // Int division/remainder for Java 401 // (as idivl, but checks for special case as described in JVM spec.) 402 // returns idivl instruction offset for implicit exception handling 403 int corrected_idivl(Register reg); 404 405 // Long division/remainder for Java 406 // (as idivq, but checks for special case as described in JVM spec.) 407 // returns idivq instruction offset for implicit exception handling 408 int corrected_idivq(Register reg); 409 410 void int3(); 411 412 // Long operation macros for a 32bit cpu 413 // Long negation for Java 414 void lneg(Register hi, Register lo); 415 416 // Long multiplication for Java 417 // (destroys contents of eax, ebx, ecx and edx) 418 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 419 420 // Long shifts for Java 421 // (semantics as described in JVM spec.) 422 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 423 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 424 425 // Long compare for Java 426 // (semantics as described in JVM spec.) 427 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 428 429 430 // misc 431 432 // Sign extension 433 void sign_extend_short(Register reg); 434 void sign_extend_byte(Register reg); 435 436 // Division by power of 2, rounding towards 0 437 void division_with_shift(Register reg, int shift_value); 438 439 #ifndef _LP64 440 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 441 // 442 // CF (corresponds to C0) if x < y 443 // PF (corresponds to C2) if unordered 444 // ZF (corresponds to C3) if x = y 445 // 446 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 447 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 448 void fcmp(Register tmp); 449 // Variant of the above which allows y to be further down the stack 450 // and which only pops x and y if specified. If pop_right is 451 // specified then pop_left must also be specified. 452 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 453 454 // Floating-point comparison for Java 455 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 456 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 457 // (semantics as described in JVM spec.) 458 void fcmp2int(Register dst, bool unordered_is_less); 459 // Variant of the above which allows y to be further down the stack 460 // and which only pops x and y if specified. If pop_right is 461 // specified then pop_left must also be specified. 462 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 463 464 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 465 // tmp is a temporary register, if none is available use noreg 466 void fremr(Register tmp); 467 468 // only if +VerifyFPU 469 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 470 #endif // !LP64 471 472 // dst = c = a * b + c 473 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 474 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 475 476 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 477 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 478 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 479 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 480 481 482 // same as fcmp2int, but using SSE2 483 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 484 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 485 486 // branch to L if FPU flag C2 is set/not set 487 // tmp is a temporary register, if none is available use noreg 488 void jC2 (Register tmp, Label& L); 489 void jnC2(Register tmp, Label& L); 490 491 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 492 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 493 void load_float(Address src); 494 495 // Store float value to 'address'. If UseSSE >= 1, the value is stored 496 // from register xmm0. Otherwise, the value is stored from the FPU stack. 497 void store_float(Address dst); 498 499 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 500 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 501 void load_double(Address src); 502 503 // Store double value to 'address'. If UseSSE >= 2, the value is stored 504 // from register xmm0. Otherwise, the value is stored from the FPU stack. 505 void store_double(Address dst); 506 507 #ifndef _LP64 508 // Pop ST (ffree & fincstp combined) 509 void fpop(); 510 511 void empty_FPU_stack(); 512 #endif // !_LP64 513 514 void push_IU_state(); 515 void pop_IU_state(); 516 517 void push_FPU_state(); 518 void pop_FPU_state(); 519 520 void push_CPU_state(); 521 void pop_CPU_state(); 522 523 // Round up to a power of two 524 void round_to(Register reg, int modulus); 525 526 // Callee saved registers handling 527 void push_callee_saved_registers(); 528 void pop_callee_saved_registers(); 529 530 // allocation 531 void eden_allocate( 532 Register thread, // Current thread 533 Register obj, // result: pointer to object after successful allocation 534 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 535 int con_size_in_bytes, // object size in bytes if known at compile time 536 Register t1, // temp register 537 Label& slow_case // continuation point if fast allocation fails 538 ); 539 void tlab_allocate( 540 Register thread, // Current thread 541 Register obj, // result: pointer to object after successful allocation 542 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 543 int con_size_in_bytes, // object size in bytes if known at compile time 544 Register t1, // temp register 545 Register t2, // temp register 546 Label& slow_case // continuation point if fast allocation fails 547 ); 548 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 549 550 // interface method calling 551 void lookup_interface_method(Register recv_klass, 552 Register intf_klass, 553 RegisterOrConstant itable_index, 554 Register method_result, 555 Register scan_temp, 556 Label& no_such_interface, 557 bool return_method = true); 558 559 // virtual method calling 560 void lookup_virtual_method(Register recv_klass, 561 RegisterOrConstant vtable_index, 562 Register method_result); 563 564 // Test sub_klass against super_klass, with fast and slow paths. 565 566 // The fast path produces a tri-state answer: yes / no / maybe-slow. 567 // One of the three labels can be NULL, meaning take the fall-through. 568 // If super_check_offset is -1, the value is loaded up from super_klass. 569 // No registers are killed, except temp_reg. 570 void check_klass_subtype_fast_path(Register sub_klass, 571 Register super_klass, 572 Register temp_reg, 573 Label* L_success, 574 Label* L_failure, 575 Label* L_slow_path, 576 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 577 578 // The rest of the type check; must be wired to a corresponding fast path. 579 // It does not repeat the fast path logic, so don't use it standalone. 580 // The temp_reg and temp2_reg can be noreg, if no temps are available. 581 // Updates the sub's secondary super cache as necessary. 582 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 583 void check_klass_subtype_slow_path(Register sub_klass, 584 Register super_klass, 585 Register temp_reg, 586 Register temp2_reg, 587 Label* L_success, 588 Label* L_failure, 589 bool set_cond_codes = false); 590 591 // Simplified, combined version, good for typical uses. 592 // Falls through on failure. 593 void check_klass_subtype(Register sub_klass, 594 Register super_klass, 595 Register temp_reg, 596 Label& L_success); 597 598 void clinit_barrier(Register klass, 599 Register thread, 600 Label* L_fast_path = NULL, 601 Label* L_slow_path = NULL); 602 603 // method handles (JSR 292) 604 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 605 606 // Debugging 607 608 // only if +VerifyOops 609 void _verify_oop(Register reg, const char* s, const char* file, int line); 610 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 611 612 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 613 if (VerifyOops) { 614 _verify_oop(reg, s, file, line); 615 } 616 } 617 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 618 if (VerifyOops) { 619 _verify_oop_addr(reg, s, file, line); 620 } 621 } 622 623 // TODO: verify method and klass metadata (compare against vptr?) 624 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 625 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 626 627 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 628 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 629 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 630 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 631 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 632 633 // Verify or restore cpu control state after JNI call 634 void restore_cpu_control_state_after_jni(); 635 636 // prints msg, dumps registers and stops execution 637 void stop(const char* msg); 638 639 // prints msg and continues 640 void warn(const char* msg); 641 642 // dumps registers and other state 643 void print_state(); 644 645 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 646 static void debug64(char* msg, int64_t pc, int64_t regs[]); 647 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 648 static void print_state64(int64_t pc, int64_t regs[]); 649 650 void os_breakpoint(); 651 652 void untested() { stop("untested"); } 653 654 void unimplemented(const char* what = ""); 655 656 void should_not_reach_here() { stop("should not reach here"); } 657 658 void print_CPU_state(); 659 660 // Stack overflow checking 661 void bang_stack_with_offset(int offset) { 662 // stack grows down, caller passes positive offset 663 assert(offset > 0, "must bang with negative offset"); 664 movl(Address(rsp, (-offset)), rax); 665 } 666 667 // Writes to stack successive pages until offset reached to check for 668 // stack overflow + shadow pages. Also, clobbers tmp 669 void bang_stack_size(Register size, Register tmp); 670 671 // Check for reserved stack access in method being exited (for JIT) 672 void reserved_stack_check(); 673 674 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 675 676 void verify_tlab(); 677 678 // Biased locking support 679 // lock_reg and obj_reg must be loaded up with the appropriate values. 680 // swap_reg must be rax, and is killed. 681 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 682 // be killed; if not supplied, push/pop will be used internally to 683 // allocate a temporary (inefficient, avoid if possible). 684 // Optional slow case is for implementations (interpreter and C1) which branch to 685 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 686 void biased_locking_enter(Register lock_reg, Register obj_reg, 687 Register swap_reg, Register tmp_reg, 688 Register tmp_reg2, bool swap_reg_contains_mark, 689 Label& done, Label* slow_case = NULL, 690 BiasedLockingCounters* counters = NULL); 691 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 692 693 Condition negate_condition(Condition cond); 694 695 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 696 // operands. In general the names are modified to avoid hiding the instruction in Assembler 697 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 698 // here in MacroAssembler. The major exception to this rule is call 699 700 // Arithmetics 701 702 703 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 704 void addptr(Address dst, Register src); 705 706 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 707 void addptr(Register dst, int32_t src); 708 void addptr(Register dst, Register src); 709 void addptr(Register dst, RegisterOrConstant src) { 710 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 711 else addptr(dst, src.as_register()); 712 } 713 714 void andptr(Register dst, int32_t src); 715 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 716 717 void cmp8(AddressLiteral src1, int imm); 718 719 // renamed to drag out the casting of address to int32_t/intptr_t 720 void cmp32(Register src1, int32_t imm); 721 722 void cmp32(AddressLiteral src1, int32_t imm); 723 // compare reg - mem, or reg - &mem 724 void cmp32(Register src1, AddressLiteral src2); 725 726 void cmp32(Register src1, Address src2); 727 728 #ifndef _LP64 729 void cmpklass(Address dst, Metadata* obj); 730 void cmpklass(Register dst, Metadata* obj); 731 void cmpoop(Address dst, jobject obj); 732 #endif // _LP64 733 734 void cmpoop(Register src1, Register src2); 735 void cmpoop(Register src1, Address src2); 736 void cmpoop(Register dst, jobject obj); 737 738 // NOTE src2 must be the lval. This is NOT an mem-mem compare 739 void cmpptr(Address src1, AddressLiteral src2); 740 741 void cmpptr(Register src1, AddressLiteral src2); 742 743 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 744 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 745 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 746 747 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 748 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 749 750 // cmp64 to avoild hiding cmpq 751 void cmp64(Register src1, AddressLiteral src); 752 753 void cmpxchgptr(Register reg, Address adr); 754 755 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 756 757 758 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 759 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 760 761 762 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 763 764 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 765 766 void shlptr(Register dst, int32_t shift); 767 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 768 769 void shrptr(Register dst, int32_t shift); 770 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 771 772 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 773 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 774 775 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 776 777 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 778 void subptr(Register dst, int32_t src); 779 // Force generation of a 4 byte immediate value even if it fits into 8bit 780 void subptr_imm32(Register dst, int32_t src); 781 void subptr(Register dst, Register src); 782 void subptr(Register dst, RegisterOrConstant src) { 783 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 784 else subptr(dst, src.as_register()); 785 } 786 787 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 788 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 789 790 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 791 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 792 793 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 794 795 796 797 // Helper functions for statistics gathering. 798 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 799 void cond_inc32(Condition cond, AddressLiteral counter_addr); 800 // Unconditional atomic increment. 801 void atomic_incl(Address counter_addr); 802 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 803 #ifdef _LP64 804 void atomic_incq(Address counter_addr); 805 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 806 #endif 807 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 808 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 809 810 void lea(Register dst, AddressLiteral adr); 811 void lea(Address dst, AddressLiteral adr); 812 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 813 814 void leal32(Register dst, Address src) { leal(dst, src); } 815 816 // Import other testl() methods from the parent class or else 817 // they will be hidden by the following overriding declaration. 818 using Assembler::testl; 819 void testl(Register dst, AddressLiteral src); 820 821 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 822 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 823 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 824 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 825 826 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 827 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 828 void testptr(Register src1, Register src2); 829 830 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 831 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 832 833 // Calls 834 835 void call(Label& L, relocInfo::relocType rtype); 836 void call(Register entry); 837 void call(Address addr) { Assembler::call(addr); } 838 839 // NOTE: this call transfers to the effective address of entry NOT 840 // the address contained by entry. This is because this is more natural 841 // for jumps/calls. 842 void call(AddressLiteral entry); 843 844 // Emit the CompiledIC call idiom 845 void ic_call(address entry, jint method_index = 0); 846 847 // Jumps 848 849 // NOTE: these jumps tranfer to the effective address of dst NOT 850 // the address contained by dst. This is because this is more natural 851 // for jumps/calls. 852 void jump(AddressLiteral dst); 853 void jump_cc(Condition cc, AddressLiteral dst); 854 855 // 32bit can do a case table jump in one instruction but we no longer allow the base 856 // to be installed in the Address class. This jump will tranfers to the address 857 // contained in the location described by entry (not the address of entry) 858 void jump(ArrayAddress entry); 859 860 // Floating 861 862 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 863 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 864 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 865 866 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 867 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 868 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 869 870 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 871 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 872 void comiss(XMMRegister dst, AddressLiteral src); 873 874 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 875 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 876 void comisd(XMMRegister dst, AddressLiteral src); 877 878 #ifndef _LP64 879 void fadd_s(Address src) { Assembler::fadd_s(src); } 880 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 881 882 void fldcw(Address src) { Assembler::fldcw(src); } 883 void fldcw(AddressLiteral src); 884 885 void fld_s(int index) { Assembler::fld_s(index); } 886 void fld_s(Address src) { Assembler::fld_s(src); } 887 void fld_s(AddressLiteral src); 888 889 void fld_d(Address src) { Assembler::fld_d(src); } 890 void fld_d(AddressLiteral src); 891 892 void fmul_s(Address src) { Assembler::fmul_s(src); } 893 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 894 #endif // _LP64 895 896 void fld_x(Address src) { Assembler::fld_x(src); } 897 void fld_x(AddressLiteral src); 898 899 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 900 void ldmxcsr(AddressLiteral src); 901 902 #ifdef _LP64 903 private: 904 void sha256_AVX2_one_round_compute( 905 Register reg_old_h, 906 Register reg_a, 907 Register reg_b, 908 Register reg_c, 909 Register reg_d, 910 Register reg_e, 911 Register reg_f, 912 Register reg_g, 913 Register reg_h, 914 int iter); 915 void sha256_AVX2_four_rounds_compute_first(int start); 916 void sha256_AVX2_four_rounds_compute_last(int start); 917 void sha256_AVX2_one_round_and_sched( 918 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 919 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 920 XMMRegister xmm_2, /* ymm6 */ 921 XMMRegister xmm_3, /* ymm7 */ 922 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 923 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 924 Register reg_c, /* edi */ 925 Register reg_d, /* esi */ 926 Register reg_e, /* r8d */ 927 Register reg_f, /* r9d */ 928 Register reg_g, /* r10d */ 929 Register reg_h, /* r11d */ 930 int iter); 931 932 void addm(int disp, Register r1, Register r2); 933 void gfmul(XMMRegister tmp0, XMMRegister t); 934 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 935 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 936 void generateHtbl_one_block(Register htbl); 937 void generateHtbl_eight_blocks(Register htbl); 938 public: 939 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 940 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 941 Register buf, Register state, Register ofs, Register limit, Register rsp, 942 bool multi_block, XMMRegister shuf_mask); 943 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 944 #endif 945 946 #ifdef _LP64 947 private: 948 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 949 Register e, Register f, Register g, Register h, int iteration); 950 951 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 952 Register a, Register b, Register c, Register d, Register e, Register f, 953 Register g, Register h, int iteration); 954 955 void addmq(int disp, Register r1, Register r2); 956 public: 957 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 958 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 959 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 960 XMMRegister shuf_mask); 961 private: 962 void roundEnc(XMMRegister key, int rnum); 963 void lastroundEnc(XMMRegister key, int rnum); 964 void roundDec(XMMRegister key, int rnum); 965 void lastroundDec(XMMRegister key, int rnum); 966 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 967 968 public: 969 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 970 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 971 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 972 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 973 974 #endif 975 976 void fast_md5(Register buf, Address state, Address ofs, Address limit, 977 bool multi_block); 978 979 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 980 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 981 Register buf, Register state, Register ofs, Register limit, Register rsp, 982 bool multi_block); 983 984 #ifdef _LP64 985 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 986 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 987 Register buf, Register state, Register ofs, Register limit, Register rsp, 988 bool multi_block, XMMRegister shuf_mask); 989 #else 990 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 991 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 992 Register buf, Register state, Register ofs, Register limit, Register rsp, 993 bool multi_block); 994 #endif 995 996 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 997 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 998 Register rax, Register rcx, Register rdx, Register tmp); 999 1000 #ifdef _LP64 1001 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1002 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1003 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1004 1005 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1006 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1007 Register rax, Register rcx, Register rdx, Register r11); 1008 1009 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1010 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1011 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1012 1013 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1014 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1015 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1016 Register tmp3, Register tmp4); 1017 1018 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1019 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1020 Register rax, Register rcx, Register rdx, Register tmp1, 1021 Register tmp2, Register tmp3, Register tmp4); 1022 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1023 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1024 Register rax, Register rcx, Register rdx, Register tmp1, 1025 Register tmp2, Register tmp3, Register tmp4); 1026 #else 1027 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1028 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1029 Register rax, Register rcx, Register rdx, Register tmp1); 1030 1031 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1032 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1033 Register rax, Register rcx, Register rdx, Register tmp); 1034 1035 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1036 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1037 Register rdx, Register tmp); 1038 1039 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1040 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1041 Register rax, Register rbx, Register rdx); 1042 1043 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1044 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1045 Register rax, Register rcx, Register rdx, Register tmp); 1046 1047 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1048 Register edx, Register ebx, Register esi, Register edi, 1049 Register ebp, Register esp); 1050 1051 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1052 Register esi, Register edi, Register ebp, Register esp); 1053 1054 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1055 Register edx, Register ebx, Register esi, Register edi, 1056 Register ebp, Register esp); 1057 1058 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1059 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1060 Register rax, Register rcx, Register rdx, Register tmp); 1061 #endif 1062 1063 private: 1064 1065 // these are private because users should be doing movflt/movdbl 1066 1067 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1068 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1069 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1070 void movss(XMMRegister dst, AddressLiteral src); 1071 1072 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1073 void movlpd(XMMRegister dst, AddressLiteral src); 1074 1075 public: 1076 1077 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1078 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1079 void addsd(XMMRegister dst, AddressLiteral src); 1080 1081 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1082 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1083 void addss(XMMRegister dst, AddressLiteral src); 1084 1085 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1086 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1087 void addpd(XMMRegister dst, AddressLiteral src); 1088 1089 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1090 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1091 void divsd(XMMRegister dst, AddressLiteral src); 1092 1093 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1094 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1095 void divss(XMMRegister dst, AddressLiteral src); 1096 1097 // Move Unaligned Double Quadword 1098 void movdqu(Address dst, XMMRegister src); 1099 void movdqu(XMMRegister dst, Address src); 1100 void movdqu(XMMRegister dst, XMMRegister src); 1101 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1102 1103 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1104 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1105 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1106 void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1107 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1108 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1109 1110 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1111 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1112 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1113 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1114 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1115 void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1116 1117 // Safe move operation, lowers down to 16bit moves for targets supporting 1118 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1119 void kmov(Address dst, KRegister src); 1120 void kmov(KRegister dst, Address src); 1121 void kmov(KRegister dst, KRegister src); 1122 void kmov(Register dst, KRegister src); 1123 void kmov(KRegister dst, Register src); 1124 1125 // AVX Unaligned forms 1126 void vmovdqu(Address dst, XMMRegister src); 1127 void vmovdqu(XMMRegister dst, Address src); 1128 void vmovdqu(XMMRegister dst, XMMRegister src); 1129 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1130 1131 // AVX512 Unaligned 1132 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len); 1133 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len); 1134 1135 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1136 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1137 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1138 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1139 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1140 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1141 1142 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1143 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1144 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1145 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1146 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1147 1148 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1149 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1150 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1151 if (dst->encoding() == src->encoding()) return; 1152 Assembler::evmovdqul(dst, src, vector_len); 1153 } 1154 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1155 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1156 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1157 if (dst->encoding() == src->encoding() && mask == k0) return; 1158 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1159 } 1160 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1161 1162 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1163 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1164 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1165 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1166 if (dst->encoding() == src->encoding()) return; 1167 Assembler::evmovdquq(dst, src, vector_len); 1168 } 1169 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1170 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1171 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1172 if (dst->encoding() == src->encoding() && mask == k0) return; 1173 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1174 } 1175 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1176 1177 // Move Aligned Double Quadword 1178 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1179 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1180 void movdqa(XMMRegister dst, AddressLiteral src); 1181 1182 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1183 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1184 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1185 void movsd(XMMRegister dst, AddressLiteral src); 1186 1187 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1188 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1189 void mulpd(XMMRegister dst, AddressLiteral src); 1190 1191 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1192 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1193 void mulsd(XMMRegister dst, AddressLiteral src); 1194 1195 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1196 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1197 void mulss(XMMRegister dst, AddressLiteral src); 1198 1199 // Carry-Less Multiplication Quadword 1200 void pclmulldq(XMMRegister dst, XMMRegister src) { 1201 // 0x00 - multiply lower 64 bits [0:63] 1202 Assembler::pclmulqdq(dst, src, 0x00); 1203 } 1204 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1205 // 0x11 - multiply upper 64 bits [64:127] 1206 Assembler::pclmulqdq(dst, src, 0x11); 1207 } 1208 1209 void pcmpeqb(XMMRegister dst, XMMRegister src); 1210 void pcmpeqw(XMMRegister dst, XMMRegister src); 1211 1212 void pcmpestri(XMMRegister dst, Address src, int imm8); 1213 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1214 1215 void pmovzxbw(XMMRegister dst, XMMRegister src); 1216 void pmovzxbw(XMMRegister dst, Address src); 1217 1218 void pmovmskb(Register dst, XMMRegister src); 1219 1220 void ptest(XMMRegister dst, XMMRegister src); 1221 1222 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1223 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1224 void sqrtsd(XMMRegister dst, AddressLiteral src); 1225 1226 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1227 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1228 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1229 1230 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1231 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1232 void sqrtss(XMMRegister dst, AddressLiteral src); 1233 1234 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1235 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1236 void subsd(XMMRegister dst, AddressLiteral src); 1237 1238 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1239 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1240 void subss(XMMRegister dst, AddressLiteral src); 1241 1242 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1243 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1244 void ucomiss(XMMRegister dst, AddressLiteral src); 1245 1246 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1247 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1248 void ucomisd(XMMRegister dst, AddressLiteral src); 1249 1250 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1251 void xorpd(XMMRegister dst, XMMRegister src); 1252 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1253 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1254 1255 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1256 void xorps(XMMRegister dst, XMMRegister src); 1257 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1258 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1259 1260 // Shuffle Bytes 1261 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1262 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1263 void pshufb(XMMRegister dst, AddressLiteral src); 1264 // AVX 3-operands instructions 1265 1266 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1267 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1268 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1269 1270 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1271 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1272 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1273 1274 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1275 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1276 1277 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1278 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1279 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1280 1281 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1282 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1283 1284 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1285 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1286 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1287 1288 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1289 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1290 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1291 1292 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1293 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1294 1295 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1296 1297 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1298 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1299 1300 // Vector compares 1301 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1302 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1303 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1304 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1305 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1306 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1307 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1308 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1309 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1310 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1311 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1312 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1313 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1314 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1315 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1316 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1317 1318 1319 // Emit comparison instruction for the specified comparison predicate. 1320 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg); 1321 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1322 1323 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1324 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1325 1326 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1327 1328 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1329 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1330 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1331 Assembler::vpmulld(dst, nds, src, vector_len); 1332 }; 1333 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1334 Assembler::vpmulld(dst, nds, src, vector_len); 1335 } 1336 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1337 1338 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1339 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1340 1341 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1342 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1343 1344 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1345 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1346 1347 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1348 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1349 1350 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1351 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1352 1353 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1354 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1355 1356 void vptest(XMMRegister dst, XMMRegister src); 1357 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1358 1359 void punpcklbw(XMMRegister dst, XMMRegister src); 1360 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1361 1362 void pshufd(XMMRegister dst, Address src, int mode); 1363 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1364 1365 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1366 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1367 1368 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1369 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1370 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1371 1372 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1373 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1374 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1375 1376 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1377 1378 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1379 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1380 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1381 1382 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1383 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1384 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1385 1386 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1387 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1388 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1389 1390 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1391 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1392 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1393 1394 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1395 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1396 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1397 1398 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1399 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1400 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1401 1402 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1403 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1404 1405 // AVX Vector instructions 1406 1407 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1408 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1409 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1410 1411 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1412 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1413 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1414 1415 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1416 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1417 Assembler::vpxor(dst, nds, src, vector_len); 1418 else 1419 Assembler::vxorpd(dst, nds, src, vector_len); 1420 } 1421 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1422 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1423 Assembler::vpxor(dst, nds, src, vector_len); 1424 else 1425 Assembler::vxorpd(dst, nds, src, vector_len); 1426 } 1427 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1428 1429 // Simple version for AVX2 256bit vectors 1430 void vpxor(XMMRegister dst, XMMRegister src) { 1431 assert(UseAVX >= 2, "Should be at least AVX2"); 1432 Assembler::vpxor(dst, dst, src, AVX_256bit); 1433 } 1434 void vpxor(XMMRegister dst, Address src) { 1435 assert(UseAVX >= 2, "Should be at least AVX2"); 1436 Assembler::vpxor(dst, dst, src, AVX_256bit); 1437 } 1438 1439 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1440 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1441 1442 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1443 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1444 Assembler::vinserti32x4(dst, nds, src, imm8); 1445 } else if (UseAVX > 1) { 1446 // vinserti128 is available only in AVX2 1447 Assembler::vinserti128(dst, nds, src, imm8); 1448 } else { 1449 Assembler::vinsertf128(dst, nds, src, imm8); 1450 } 1451 } 1452 1453 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1454 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1455 Assembler::vinserti32x4(dst, nds, src, imm8); 1456 } else if (UseAVX > 1) { 1457 // vinserti128 is available only in AVX2 1458 Assembler::vinserti128(dst, nds, src, imm8); 1459 } else { 1460 Assembler::vinsertf128(dst, nds, src, imm8); 1461 } 1462 } 1463 1464 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1465 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1466 Assembler::vextracti32x4(dst, src, imm8); 1467 } else if (UseAVX > 1) { 1468 // vextracti128 is available only in AVX2 1469 Assembler::vextracti128(dst, src, imm8); 1470 } else { 1471 Assembler::vextractf128(dst, src, imm8); 1472 } 1473 } 1474 1475 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1476 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1477 Assembler::vextracti32x4(dst, src, imm8); 1478 } else if (UseAVX > 1) { 1479 // vextracti128 is available only in AVX2 1480 Assembler::vextracti128(dst, src, imm8); 1481 } else { 1482 Assembler::vextractf128(dst, src, imm8); 1483 } 1484 } 1485 1486 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1487 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1488 vinserti128(dst, dst, src, 1); 1489 } 1490 void vinserti128_high(XMMRegister dst, Address src) { 1491 vinserti128(dst, dst, src, 1); 1492 } 1493 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1494 vextracti128(dst, src, 1); 1495 } 1496 void vextracti128_high(Address dst, XMMRegister src) { 1497 vextracti128(dst, src, 1); 1498 } 1499 1500 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1501 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1502 Assembler::vinsertf32x4(dst, dst, src, 1); 1503 } else { 1504 Assembler::vinsertf128(dst, dst, src, 1); 1505 } 1506 } 1507 1508 void vinsertf128_high(XMMRegister dst, Address src) { 1509 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1510 Assembler::vinsertf32x4(dst, dst, src, 1); 1511 } else { 1512 Assembler::vinsertf128(dst, dst, src, 1); 1513 } 1514 } 1515 1516 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1517 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1518 Assembler::vextractf32x4(dst, src, 1); 1519 } else { 1520 Assembler::vextractf128(dst, src, 1); 1521 } 1522 } 1523 1524 void vextractf128_high(Address dst, XMMRegister src) { 1525 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1526 Assembler::vextractf32x4(dst, src, 1); 1527 } else { 1528 Assembler::vextractf128(dst, src, 1); 1529 } 1530 } 1531 1532 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1533 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1534 Assembler::vinserti64x4(dst, dst, src, 1); 1535 } 1536 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1537 Assembler::vinsertf64x4(dst, dst, src, 1); 1538 } 1539 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1540 Assembler::vextracti64x4(dst, src, 1); 1541 } 1542 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1543 Assembler::vextractf64x4(dst, src, 1); 1544 } 1545 void vextractf64x4_high(Address dst, XMMRegister src) { 1546 Assembler::vextractf64x4(dst, src, 1); 1547 } 1548 void vinsertf64x4_high(XMMRegister dst, Address src) { 1549 Assembler::vinsertf64x4(dst, dst, src, 1); 1550 } 1551 1552 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1553 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1554 vinserti128(dst, dst, src, 0); 1555 } 1556 void vinserti128_low(XMMRegister dst, Address src) { 1557 vinserti128(dst, dst, src, 0); 1558 } 1559 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1560 vextracti128(dst, src, 0); 1561 } 1562 void vextracti128_low(Address dst, XMMRegister src) { 1563 vextracti128(dst, src, 0); 1564 } 1565 1566 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1567 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1568 Assembler::vinsertf32x4(dst, dst, src, 0); 1569 } else { 1570 Assembler::vinsertf128(dst, dst, src, 0); 1571 } 1572 } 1573 1574 void vinsertf128_low(XMMRegister dst, Address src) { 1575 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1576 Assembler::vinsertf32x4(dst, dst, src, 0); 1577 } else { 1578 Assembler::vinsertf128(dst, dst, src, 0); 1579 } 1580 } 1581 1582 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1583 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1584 Assembler::vextractf32x4(dst, src, 0); 1585 } else { 1586 Assembler::vextractf128(dst, src, 0); 1587 } 1588 } 1589 1590 void vextractf128_low(Address dst, XMMRegister src) { 1591 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1592 Assembler::vextractf32x4(dst, src, 0); 1593 } else { 1594 Assembler::vextractf128(dst, src, 0); 1595 } 1596 } 1597 1598 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1599 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1600 Assembler::vinserti64x4(dst, dst, src, 0); 1601 } 1602 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1603 Assembler::vinsertf64x4(dst, dst, src, 0); 1604 } 1605 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1606 Assembler::vextracti64x4(dst, src, 0); 1607 } 1608 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1609 Assembler::vextractf64x4(dst, src, 0); 1610 } 1611 void vextractf64x4_low(Address dst, XMMRegister src) { 1612 Assembler::vextractf64x4(dst, src, 0); 1613 } 1614 void vinsertf64x4_low(XMMRegister dst, Address src) { 1615 Assembler::vinsertf64x4(dst, dst, src, 0); 1616 } 1617 1618 // Carry-Less Multiplication Quadword 1619 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1620 // 0x00 - multiply lower 64 bits [0:63] 1621 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1622 } 1623 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1624 // 0x11 - multiply upper 64 bits [64:127] 1625 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1626 } 1627 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1628 // 0x10 - multiply nds[0:63] and src[64:127] 1629 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1630 } 1631 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1632 //0x01 - multiply nds[64:127] and src[0:63] 1633 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1634 } 1635 1636 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1637 // 0x00 - multiply lower 64 bits [0:63] 1638 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1639 } 1640 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1641 // 0x11 - multiply upper 64 bits [64:127] 1642 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1643 } 1644 1645 // Data 1646 1647 void cmov32( Condition cc, Register dst, Address src); 1648 void cmov32( Condition cc, Register dst, Register src); 1649 1650 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1651 1652 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1653 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1654 1655 void movoop(Register dst, jobject obj); 1656 void movoop(Address dst, jobject obj); 1657 1658 void mov_metadata(Register dst, Metadata* obj); 1659 void mov_metadata(Address dst, Metadata* obj); 1660 1661 void movptr(ArrayAddress dst, Register src); 1662 // can this do an lea? 1663 void movptr(Register dst, ArrayAddress src); 1664 1665 void movptr(Register dst, Address src); 1666 1667 #ifdef _LP64 1668 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1669 #else 1670 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1671 #endif 1672 1673 void movptr(Register dst, intptr_t src); 1674 void movptr(Register dst, Register src); 1675 void movptr(Address dst, intptr_t src); 1676 1677 void movptr(Address dst, Register src); 1678 1679 void movptr(Register dst, RegisterOrConstant src) { 1680 if (src.is_constant()) movptr(dst, src.as_constant()); 1681 else movptr(dst, src.as_register()); 1682 } 1683 1684 #ifdef _LP64 1685 // Generally the next two are only used for moving NULL 1686 // Although there are situations in initializing the mark word where 1687 // they could be used. They are dangerous. 1688 1689 // They only exist on LP64 so that int32_t and intptr_t are not the same 1690 // and we have ambiguous declarations. 1691 1692 void movptr(Address dst, int32_t imm32); 1693 void movptr(Register dst, int32_t imm32); 1694 #endif // _LP64 1695 1696 // to avoid hiding movl 1697 void mov32(AddressLiteral dst, Register src); 1698 void mov32(Register dst, AddressLiteral src); 1699 1700 // to avoid hiding movb 1701 void movbyte(ArrayAddress dst, int src); 1702 1703 // Import other mov() methods from the parent class or else 1704 // they will be hidden by the following overriding declaration. 1705 using Assembler::movdl; 1706 using Assembler::movq; 1707 void movdl(XMMRegister dst, AddressLiteral src); 1708 void movq(XMMRegister dst, AddressLiteral src); 1709 1710 // Can push value or effective address 1711 void pushptr(AddressLiteral src); 1712 1713 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1714 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1715 1716 void pushoop(jobject obj); 1717 void pushklass(Metadata* obj); 1718 1719 // sign extend as need a l to ptr sized element 1720 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1721 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1722 1723 1724 public: 1725 // C2 compiled method's prolog code. 1726 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1727 1728 // clear memory of size 'cnt' qwords, starting at 'base'; 1729 // if 'is_large' is set, do not try to produce short loop 1730 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1731 1732 // clear memory initialization sequence for constant size; 1733 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1734 1735 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1736 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1737 1738 // Fill primitive arrays 1739 void generate_fill(BasicType t, bool aligned, 1740 Register to, Register value, Register count, 1741 Register rtmp, XMMRegister xtmp); 1742 1743 void encode_iso_array(Register src, Register dst, Register len, 1744 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1745 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1746 1747 #ifdef _LP64 1748 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1749 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1750 Register y, Register y_idx, Register z, 1751 Register carry, Register product, 1752 Register idx, Register kdx); 1753 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1754 Register yz_idx, Register idx, 1755 Register carry, Register product, int offset); 1756 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1757 Register carry, Register carry2, 1758 Register idx, Register jdx, 1759 Register yz_idx1, Register yz_idx2, 1760 Register tmp, Register tmp3, Register tmp4); 1761 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1762 Register yz_idx, Register idx, Register jdx, 1763 Register carry, Register product, 1764 Register carry2); 1765 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1766 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1767 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1768 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1769 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1770 Register tmp2); 1771 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1772 Register rdxReg, Register raxReg); 1773 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1774 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1775 Register tmp3, Register tmp4); 1776 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1777 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1778 1779 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1780 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1781 Register raxReg); 1782 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1783 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1784 Register raxReg); 1785 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1786 Register result, Register tmp1, Register tmp2, 1787 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1788 #endif 1789 1790 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1791 void update_byte_crc32(Register crc, Register val, Register table); 1792 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1793 1794 1795 #ifdef _LP64 1796 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1797 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1798 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1799 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1800 void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale); 1801 #endif // _LP64 1802 1803 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1804 // Note on a naming convention: 1805 // Prefix w = register only used on a Westmere+ architecture 1806 // Prefix n = register only used on a Nehalem architecture 1807 #ifdef _LP64 1808 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1809 Register tmp1, Register tmp2, Register tmp3); 1810 #else 1811 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1812 Register tmp1, Register tmp2, Register tmp3, 1813 XMMRegister xtmp1, XMMRegister xtmp2); 1814 #endif 1815 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1816 Register in_out, 1817 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1818 XMMRegister w_xtmp2, 1819 Register tmp1, 1820 Register n_tmp2, Register n_tmp3); 1821 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1822 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1823 Register tmp1, Register tmp2, 1824 Register n_tmp3); 1825 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1826 Register in_out1, Register in_out2, Register in_out3, 1827 Register tmp1, Register tmp2, Register tmp3, 1828 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1829 Register tmp4, Register tmp5, 1830 Register n_tmp6); 1831 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1832 Register tmp1, Register tmp2, Register tmp3, 1833 Register tmp4, Register tmp5, Register tmp6, 1834 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1835 bool is_pclmulqdq_supported); 1836 // Fold 128-bit data chunk 1837 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1838 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1839 #ifdef _LP64 1840 // Fold 512-bit data chunk 1841 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 1842 #endif // _LP64 1843 // Fold 8-bit data 1844 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1845 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1846 1847 // Compress char[] array to byte[]. 1848 void char_array_compress(Register src, Register dst, Register len, 1849 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1850 XMMRegister tmp4, Register tmp5, Register result, 1851 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 1852 1853 // Inflate byte[] array to char[]. 1854 void byte_array_inflate(Register src, Register dst, Register len, 1855 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 1856 1857 void fill64_masked_avx(uint shift, Register dst, int disp, 1858 XMMRegister xmm, KRegister mask, Register length, 1859 Register temp, bool use64byteVector = false); 1860 1861 void fill32_masked_avx(uint shift, Register dst, int disp, 1862 XMMRegister xmm, KRegister mask, Register length, 1863 Register temp); 1864 1865 void fill32(Address dst, XMMRegister xmm); 1866 1867 void fill32_avx(Register dst, int disp, XMMRegister xmm); 1868 1869 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 1870 1871 void fill64_avx(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 1872 1873 #ifdef _LP64 1874 void convert_f2i(Register dst, XMMRegister src); 1875 void convert_d2i(Register dst, XMMRegister src); 1876 void convert_f2l(Register dst, XMMRegister src); 1877 void convert_d2l(Register dst, XMMRegister src); 1878 1879 void cache_wb(Address line); 1880 void cache_wbsync(bool is_pre); 1881 1882 #if COMPILER2_OR_JVMCI 1883 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, 1884 Register to, Register count, int shift, 1885 Register index, Register temp, 1886 bool use64byteVector, Label& L_entry, Label& L_exit); 1887 1888 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from, 1889 Register to, Register start_index, Register end_index, 1890 Register count, int shift, Register temp, 1891 bool use64byteVector, Label& L_entry, Label& L_exit); 1892 1893 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm, 1894 KRegister mask, Register length, Register index, 1895 Register temp, int shift = Address::times_1, int offset = 0, 1896 bool use64byteVector = false); 1897 1898 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm, 1899 KRegister mask, Register length, Register index, 1900 Register temp, int shift = Address::times_1, int offset = 0); 1901 1902 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm, 1903 int shift = Address::times_1, int offset = 0); 1904 1905 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm, 1906 bool conjoint, int shift = Address::times_1, int offset = 0, 1907 bool use64byteVector = false); 1908 #endif // COMPILER2_OR_JVMCI 1909 1910 #endif // _LP64 1911 1912 void vallones(XMMRegister dst, int vector_len); 1913 }; 1914 1915 /** 1916 * class SkipIfEqual: 1917 * 1918 * Instantiating this class will result in assembly code being output that will 1919 * jump around any code emitted between the creation of the instance and it's 1920 * automatic destruction at the end of a scope block, depending on the value of 1921 * the flag passed to the constructor, which will be checked at run-time. 1922 */ 1923 class SkipIfEqual { 1924 private: 1925 MacroAssembler* _masm; 1926 Label _label; 1927 1928 public: 1929 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1930 ~SkipIfEqual(); 1931 }; 1932 1933 #endif // CPU_X86_MACROASSEMBLER_X86_HPP