1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "code/vmreg.inline.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "utilities/macros.hpp" 32 #include "runtime/rtmLocking.hpp" 33 #include "runtime/vm_version.hpp" 34 35 // MacroAssembler extends Assembler by frequently used macros. 36 // 37 // Instructions for which a 'better' code sequence exists depending 38 // on arguments should also go in here. 39 40 class MacroAssembler: public Assembler { 41 friend class LIR_Assembler; 42 friend class Runtime1; // as_Address() 43 44 public: 45 // Support for VM calls 46 // 47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 48 // may customize this version by overriding it for its purposes (e.g., to save/restore 49 // additional registers when doing a VM call). 50 51 virtual void call_VM_leaf_base( 52 address entry_point, // the entry point 53 int number_of_arguments // the number of arguments to pop after the call 54 ); 55 56 protected: 57 // This is the base routine called by the different versions of call_VM. The interpreter 58 // may customize this version by overriding it for its purposes (e.g., to save/restore 59 // additional registers when doing a VM call). 60 // 61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 62 // returns the register which contains the thread upon return. If a thread register has been 63 // specified, the return value will correspond to that register. If no last_java_sp is specified 64 // (noreg) than rsp will be used instead. 65 virtual void call_VM_base( // returns the register containing the thread upon return 66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 67 Register java_thread, // the thread if computed before ; use noreg otherwise 68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 69 address entry_point, // the entry point 70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 71 bool check_exceptions // whether to check for pending exceptions after return 72 ); 73 74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 75 76 // helpers for FPU flag access 77 // tmp is a temporary register, if none is available use noreg 78 void save_rax (Register tmp); 79 void restore_rax(Register tmp); 80 81 public: 82 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 83 84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 85 // The implementation is only non-empty for the InterpreterMacroAssembler, 86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 87 virtual void check_and_handle_popframe(Register java_thread); 88 virtual void check_and_handle_earlyret(Register java_thread); 89 90 Address as_Address(AddressLiteral adr); 91 Address as_Address(ArrayAddress adr); 92 93 // Support for NULL-checks 94 // 95 // Generates code that causes a NULL OS exception if the content of reg is NULL. 96 // If the accessed location is M[reg + offset] and the offset is known, provide the 97 // offset. No explicit code generation is needed if the offset is within a certain 98 // range (0 <= offset <= page_size). 99 100 void null_check(Register reg, int offset = -1); 101 static bool needs_explicit_null_check(intptr_t offset); 102 static bool uses_implicit_null_check(void* address); 103 104 // Required platform-specific helpers for Label::patch_instructions. 105 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 106 void pd_patch_instruction(address branch, address target, const char* file, int line) { 107 unsigned char op = branch[0]; 108 assert(op == 0xE8 /* call */ || 109 op == 0xE9 /* jmp */ || 110 op == 0xEB /* short jmp */ || 111 (op & 0xF0) == 0x70 /* short jcc */ || 112 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 113 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 114 "Invalid opcode at patch point"); 115 116 if (op == 0xEB || (op & 0xF0) == 0x70) { 117 // short offset operators (jmp and jcc) 118 char* disp = (char*) &branch[1]; 119 int imm8 = target - (address) &disp[1]; 120 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 121 file == NULL ? "<NULL>" : file, line); 122 *disp = imm8; 123 } else { 124 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 125 int imm32 = target - (address) &disp[1]; 126 *disp = imm32; 127 } 128 } 129 130 // The following 4 methods return the offset of the appropriate move instruction 131 132 // Support for fast byte/short loading with zero extension (depending on particular CPU) 133 int load_unsigned_byte(Register dst, Address src); 134 int load_unsigned_short(Register dst, Address src); 135 136 // Support for fast byte/short loading with sign extension (depending on particular CPU) 137 int load_signed_byte(Register dst, Address src); 138 int load_signed_short(Register dst, Address src); 139 140 // Support for sign-extension (hi:lo = extend_sign(lo)) 141 void extend_sign(Register hi, Register lo); 142 143 // Load and store values by size and signed-ness 144 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 145 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 146 147 // Support for inc/dec with optimal instruction selection depending on value 148 149 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 150 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 151 152 void decrementl(Address dst, int value = 1); 153 void decrementl(Register reg, int value = 1); 154 155 void decrementq(Register reg, int value = 1); 156 void decrementq(Address dst, int value = 1); 157 158 void incrementl(Address dst, int value = 1); 159 void incrementl(Register reg, int value = 1); 160 161 void incrementq(Register reg, int value = 1); 162 void incrementq(Address dst, int value = 1); 163 164 // Support optimal SSE move instructions. 165 void movflt(XMMRegister dst, XMMRegister src) { 166 if (dst-> encoding() == src->encoding()) return; 167 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 168 else { movss (dst, src); return; } 169 } 170 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 171 void movflt(XMMRegister dst, AddressLiteral src); 172 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 173 174 // Move with zero extension 175 void movfltz(XMMRegister dst, XMMRegister src) { movss(dst, src); } 176 177 void movdbl(XMMRegister dst, XMMRegister src) { 178 if (dst-> encoding() == src->encoding()) return; 179 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 180 else { movsd (dst, src); return; } 181 } 182 183 void movdbl(XMMRegister dst, AddressLiteral src); 184 185 void movdbl(XMMRegister dst, Address src) { 186 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 187 else { movlpd(dst, src); return; } 188 } 189 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 190 191 void incrementl(AddressLiteral dst); 192 void incrementl(ArrayAddress dst); 193 194 void incrementq(AddressLiteral dst); 195 196 // Alignment 197 void align32(); 198 void align64(); 199 void align(int modulus); 200 void align(int modulus, int target); 201 202 // A 5 byte nop that is safe for patching (see patch_verified_entry) 203 void fat_nop(); 204 205 // Stack frame creation/removal 206 void enter(); 207 void leave(); 208 209 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 210 // The pointer will be loaded into the thread register. 211 void get_thread(Register thread); 212 213 #ifdef _LP64 214 // Support for argument shuffling 215 216 void move32_64(VMRegPair src, VMRegPair dst); 217 void long_move(VMRegPair src, VMRegPair dst); 218 void float_move(VMRegPair src, VMRegPair dst); 219 void double_move(VMRegPair src, VMRegPair dst); 220 void move_ptr(VMRegPair src, VMRegPair dst); 221 void object_move(OopMap* map, 222 int oop_handle_offset, 223 int framesize_in_slots, 224 VMRegPair src, 225 VMRegPair dst, 226 bool is_receiver, 227 int* receiver_offset); 228 #endif // _LP64 229 230 // Support for VM calls 231 // 232 // It is imperative that all calls into the VM are handled via the call_VM macros. 233 // They make sure that the stack linkage is setup correctly. call_VM's correspond 234 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 235 236 237 void call_VM(Register oop_result, 238 address entry_point, 239 bool check_exceptions = true); 240 void call_VM(Register oop_result, 241 address entry_point, 242 Register arg_1, 243 bool check_exceptions = true); 244 void call_VM(Register oop_result, 245 address entry_point, 246 Register arg_1, Register arg_2, 247 bool check_exceptions = true); 248 void call_VM(Register oop_result, 249 address entry_point, 250 Register arg_1, Register arg_2, Register arg_3, 251 bool check_exceptions = true); 252 253 // Overloadings with last_Java_sp 254 void call_VM(Register oop_result, 255 Register last_java_sp, 256 address entry_point, 257 int number_of_arguments = 0, 258 bool check_exceptions = true); 259 void call_VM(Register oop_result, 260 Register last_java_sp, 261 address entry_point, 262 Register arg_1, bool 263 check_exceptions = true); 264 void call_VM(Register oop_result, 265 Register last_java_sp, 266 address entry_point, 267 Register arg_1, Register arg_2, 268 bool check_exceptions = true); 269 void call_VM(Register oop_result, 270 Register last_java_sp, 271 address entry_point, 272 Register arg_1, Register arg_2, Register arg_3, 273 bool check_exceptions = true); 274 275 void get_vm_result (Register oop_result, Register thread); 276 void get_vm_result_2(Register metadata_result, Register thread); 277 278 // These always tightly bind to MacroAssembler::call_VM_base 279 // bypassing the virtual implementation 280 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 281 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 282 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 283 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 284 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 285 286 void call_VM_leaf0(address entry_point); 287 void call_VM_leaf(address entry_point, 288 int number_of_arguments = 0); 289 void call_VM_leaf(address entry_point, 290 Register arg_1); 291 void call_VM_leaf(address entry_point, 292 Register arg_1, Register arg_2); 293 void call_VM_leaf(address entry_point, 294 Register arg_1, Register arg_2, Register arg_3); 295 296 // These always tightly bind to MacroAssembler::call_VM_leaf_base 297 // bypassing the virtual implementation 298 void super_call_VM_leaf(address entry_point); 299 void super_call_VM_leaf(address entry_point, Register arg_1); 300 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 301 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 302 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 303 304 // last Java Frame (fills frame anchor) 305 void set_last_Java_frame(Register thread, 306 Register last_java_sp, 307 Register last_java_fp, 308 address last_java_pc); 309 310 // thread in the default location (r15_thread on 64bit) 311 void set_last_Java_frame(Register last_java_sp, 312 Register last_java_fp, 313 address last_java_pc); 314 315 void reset_last_Java_frame(Register thread, bool clear_fp); 316 317 // thread in the default location (r15_thread on 64bit) 318 void reset_last_Java_frame(bool clear_fp); 319 320 // jobjects 321 void clear_jweak_tag(Register possibly_jweak); 322 void resolve_jobject(Register value, Register thread, Register tmp); 323 324 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 325 void c2bool(Register x); 326 327 // C++ bool manipulation 328 329 void movbool(Register dst, Address src); 330 void movbool(Address dst, bool boolconst); 331 void movbool(Address dst, Register src); 332 void testbool(Register dst); 333 334 void resolve_oop_handle(Register result, Register tmp = rscratch2); 335 void resolve_weak_handle(Register result, Register tmp); 336 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 337 void load_method_holder_cld(Register rresult, Register rmethod); 338 339 void load_method_holder(Register holder, Register method); 340 341 // oop manipulations 342 void load_klass(Register dst, Register src, Register tmp, bool null_check_src = false); 343 #ifdef _LP64 344 void load_nklass(Register dst, Register src); 345 #endif 346 void store_klass(Register dst, Register src, Register tmp); 347 348 // Compares the Klass pointer of an object to a given Klass (which might be narrow, 349 // depending on UseCompressedClassPointers). 350 void cmp_klass(Register klass, Register dst, Register tmp); 351 352 // Compares the Klass pointer of two objects o1 and o2. Result is in the condition flags. 353 // Uses t1 and t2 as temporary registers. 354 void cmp_klass(Register src, Register dst, Register tmp1, Register tmp2); 355 356 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 357 Register tmp1, Register thread_tmp); 358 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 359 Register tmp1, Register tmp2); 360 361 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 362 Register thread_tmp = noreg, DecoratorSet decorators = 0); 363 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 364 Register thread_tmp = noreg, DecoratorSet decorators = 0); 365 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 366 Register tmp2 = noreg, DecoratorSet decorators = 0); 367 368 // Used for storing NULL. All other oop constants should be 369 // stored using routines that take a jobject. 370 void store_heap_oop_null(Address dst); 371 372 void load_prototype_header(Register dst, Register src, Register tmp); 373 374 #ifdef _LP64 375 void store_klass_gap(Register dst, Register src); 376 377 // This dummy is to prevent a call to store_heap_oop from 378 // converting a zero (like NULL) into a Register by giving 379 // the compiler two choices it can't resolve 380 381 void store_heap_oop(Address dst, void* dummy); 382 383 void encode_heap_oop(Register r); 384 void decode_heap_oop(Register r); 385 void encode_heap_oop_not_null(Register r); 386 void decode_heap_oop_not_null(Register r); 387 void encode_heap_oop_not_null(Register dst, Register src); 388 void decode_heap_oop_not_null(Register dst, Register src); 389 390 void set_narrow_oop(Register dst, jobject obj); 391 void set_narrow_oop(Address dst, jobject obj); 392 void cmp_narrow_oop(Register dst, jobject obj); 393 void cmp_narrow_oop(Address dst, jobject obj); 394 395 void encode_klass_not_null(Register r, Register tmp); 396 void decode_klass_not_null(Register r, Register tmp); 397 void encode_and_move_klass_not_null(Register dst, Register src); 398 void decode_and_move_klass_not_null(Register dst, Register src); 399 void set_narrow_klass(Register dst, Klass* k); 400 void set_narrow_klass(Address dst, Klass* k); 401 void cmp_narrow_klass(Register dst, Klass* k); 402 void cmp_narrow_klass(Address dst, Klass* k); 403 404 // if heap base register is used - reinit it with the correct value 405 void reinit_heapbase(); 406 407 DEBUG_ONLY(void verify_heapbase(const char* msg);) 408 409 #endif // _LP64 410 411 // Int division/remainder for Java 412 // (as idivl, but checks for special case as described in JVM spec.) 413 // returns idivl instruction offset for implicit exception handling 414 int corrected_idivl(Register reg); 415 416 // Long division/remainder for Java 417 // (as idivq, but checks for special case as described in JVM spec.) 418 // returns idivq instruction offset for implicit exception handling 419 int corrected_idivq(Register reg); 420 421 void int3(); 422 423 // Long operation macros for a 32bit cpu 424 // Long negation for Java 425 void lneg(Register hi, Register lo); 426 427 // Long multiplication for Java 428 // (destroys contents of eax, ebx, ecx and edx) 429 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 430 431 // Long shifts for Java 432 // (semantics as described in JVM spec.) 433 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 434 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 435 436 // Long compare for Java 437 // (semantics as described in JVM spec.) 438 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 439 440 441 // misc 442 443 // Sign extension 444 void sign_extend_short(Register reg); 445 void sign_extend_byte(Register reg); 446 447 // Division by power of 2, rounding towards 0 448 void division_with_shift(Register reg, int shift_value); 449 450 #ifndef _LP64 451 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 452 // 453 // CF (corresponds to C0) if x < y 454 // PF (corresponds to C2) if unordered 455 // ZF (corresponds to C3) if x = y 456 // 457 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 458 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 459 void fcmp(Register tmp); 460 // Variant of the above which allows y to be further down the stack 461 // and which only pops x and y if specified. If pop_right is 462 // specified then pop_left must also be specified. 463 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 464 465 // Floating-point comparison for Java 466 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 467 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 468 // (semantics as described in JVM spec.) 469 void fcmp2int(Register dst, bool unordered_is_less); 470 // Variant of the above which allows y to be further down the stack 471 // and which only pops x and y if specified. If pop_right is 472 // specified then pop_left must also be specified. 473 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 474 475 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 476 // tmp is a temporary register, if none is available use noreg 477 void fremr(Register tmp); 478 479 // only if +VerifyFPU 480 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 481 #endif // !LP64 482 483 // dst = c = a * b + c 484 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 485 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 486 487 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 488 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 489 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 490 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 491 492 493 // same as fcmp2int, but using SSE2 494 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 495 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 496 497 // branch to L if FPU flag C2 is set/not set 498 // tmp is a temporary register, if none is available use noreg 499 void jC2 (Register tmp, Label& L); 500 void jnC2(Register tmp, Label& L); 501 502 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 503 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 504 void load_float(Address src); 505 506 // Store float value to 'address'. If UseSSE >= 1, the value is stored 507 // from register xmm0. Otherwise, the value is stored from the FPU stack. 508 void store_float(Address dst); 509 510 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 511 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 512 void load_double(Address src); 513 514 // Store double value to 'address'. If UseSSE >= 2, the value is stored 515 // from register xmm0. Otherwise, the value is stored from the FPU stack. 516 void store_double(Address dst); 517 518 #ifndef _LP64 519 // Pop ST (ffree & fincstp combined) 520 void fpop(); 521 522 void empty_FPU_stack(); 523 #endif // !_LP64 524 525 void push_IU_state(); 526 void pop_IU_state(); 527 528 void push_FPU_state(); 529 void pop_FPU_state(); 530 531 void push_CPU_state(); 532 void pop_CPU_state(); 533 534 // Round up to a power of two 535 void round_to(Register reg, int modulus); 536 537 // Callee saved registers handling 538 void push_callee_saved_registers(); 539 void pop_callee_saved_registers(); 540 541 // allocation 542 void eden_allocate( 543 Register thread, // Current thread 544 Register obj, // result: pointer to object after successful allocation 545 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 546 int con_size_in_bytes, // object size in bytes if known at compile time 547 Register t1, // temp register 548 Label& slow_case // continuation point if fast allocation fails 549 ); 550 void tlab_allocate( 551 Register thread, // Current thread 552 Register obj, // result: pointer to object after successful allocation 553 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 554 int con_size_in_bytes, // object size in bytes if known at compile time 555 Register t1, // temp register 556 Register t2, // temp register 557 Label& slow_case // continuation point if fast allocation fails 558 ); 559 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 560 561 // interface method calling 562 void lookup_interface_method(Register recv_klass, 563 Register intf_klass, 564 RegisterOrConstant itable_index, 565 Register method_result, 566 Register scan_temp, 567 Label& no_such_interface, 568 bool return_method = true); 569 570 // virtual method calling 571 void lookup_virtual_method(Register recv_klass, 572 RegisterOrConstant vtable_index, 573 Register method_result); 574 575 // Test sub_klass against super_klass, with fast and slow paths. 576 577 // The fast path produces a tri-state answer: yes / no / maybe-slow. 578 // One of the three labels can be NULL, meaning take the fall-through. 579 // If super_check_offset is -1, the value is loaded up from super_klass. 580 // No registers are killed, except temp_reg. 581 void check_klass_subtype_fast_path(Register sub_klass, 582 Register super_klass, 583 Register temp_reg, 584 Label* L_success, 585 Label* L_failure, 586 Label* L_slow_path, 587 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 588 589 // The rest of the type check; must be wired to a corresponding fast path. 590 // It does not repeat the fast path logic, so don't use it standalone. 591 // The temp_reg and temp2_reg can be noreg, if no temps are available. 592 // Updates the sub's secondary super cache as necessary. 593 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 594 void check_klass_subtype_slow_path(Register sub_klass, 595 Register super_klass, 596 Register temp_reg, 597 Register temp2_reg, 598 Label* L_success, 599 Label* L_failure, 600 bool set_cond_codes = false); 601 602 // Simplified, combined version, good for typical uses. 603 // Falls through on failure. 604 void check_klass_subtype(Register sub_klass, 605 Register super_klass, 606 Register temp_reg, 607 Label& L_success); 608 609 void clinit_barrier(Register klass, 610 Register thread, 611 Label* L_fast_path = NULL, 612 Label* L_slow_path = NULL); 613 614 // method handles (JSR 292) 615 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 616 617 // Debugging 618 619 // only if +VerifyOops 620 void _verify_oop(Register reg, const char* s, const char* file, int line); 621 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 622 623 void _verify_oop_checked(Register reg, const char* s, const char* file, int line) { 624 if (VerifyOops) { 625 _verify_oop(reg, s, file, line); 626 } 627 } 628 void _verify_oop_addr_checked(Address reg, const char* s, const char* file, int line) { 629 if (VerifyOops) { 630 _verify_oop_addr(reg, s, file, line); 631 } 632 } 633 634 // TODO: verify method and klass metadata (compare against vptr?) 635 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 636 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 637 638 #define verify_oop(reg) _verify_oop_checked(reg, "broken oop " #reg, __FILE__, __LINE__) 639 #define verify_oop_msg(reg, msg) _verify_oop_checked(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 640 #define verify_oop_addr(addr) _verify_oop_addr_checked(addr, "broken oop addr " #addr, __FILE__, __LINE__) 641 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 642 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 643 644 // Verify or restore cpu control state after JNI call 645 void restore_cpu_control_state_after_jni(); 646 647 // prints msg, dumps registers and stops execution 648 void stop(const char* msg); 649 650 // prints msg and continues 651 void warn(const char* msg); 652 653 // dumps registers and other state 654 void print_state(); 655 656 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 657 static void debug64(char* msg, int64_t pc, int64_t regs[]); 658 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 659 static void print_state64(int64_t pc, int64_t regs[]); 660 661 void os_breakpoint(); 662 663 void untested() { stop("untested"); } 664 665 void unimplemented(const char* what = ""); 666 667 void should_not_reach_here() { stop("should not reach here"); } 668 669 void print_CPU_state(); 670 671 // Stack overflow checking 672 void bang_stack_with_offset(int offset) { 673 // stack grows down, caller passes positive offset 674 assert(offset > 0, "must bang with negative offset"); 675 movl(Address(rsp, (-offset)), rax); 676 } 677 678 // Writes to stack successive pages until offset reached to check for 679 // stack overflow + shadow pages. Also, clobbers tmp 680 void bang_stack_size(Register size, Register tmp); 681 682 // Check for reserved stack access in method being exited (for JIT) 683 void reserved_stack_check(); 684 685 void safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod); 686 687 void verify_tlab(); 688 689 // Biased locking support 690 // lock_reg and obj_reg must be loaded up with the appropriate values. 691 // swap_reg must be rax, and is killed. 692 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 693 // be killed; if not supplied, push/pop will be used internally to 694 // allocate a temporary (inefficient, avoid if possible). 695 // Optional slow case is for implementations (interpreter and C1) which branch to 696 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 697 void biased_locking_enter(Register lock_reg, Register obj_reg, 698 Register swap_reg, Register tmp_reg, 699 Register tmp_reg2, bool swap_reg_contains_mark, 700 Label& done, Label* slow_case = NULL, 701 BiasedLockingCounters* counters = NULL); 702 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 703 704 Condition negate_condition(Condition cond); 705 706 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 707 // operands. In general the names are modified to avoid hiding the instruction in Assembler 708 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 709 // here in MacroAssembler. The major exception to this rule is call 710 711 // Arithmetics 712 713 714 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 715 void addptr(Address dst, Register src); 716 717 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 718 void addptr(Register dst, int32_t src); 719 void addptr(Register dst, Register src); 720 void addptr(Register dst, RegisterOrConstant src) { 721 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 722 else addptr(dst, src.as_register()); 723 } 724 725 void andptr(Register dst, int32_t src); 726 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 727 728 void cmp8(AddressLiteral src1, int imm); 729 730 // renamed to drag out the casting of address to int32_t/intptr_t 731 void cmp32(Register src1, int32_t imm); 732 733 void cmp32(AddressLiteral src1, int32_t imm); 734 // compare reg - mem, or reg - &mem 735 void cmp32(Register src1, AddressLiteral src2); 736 737 void cmp32(Register src1, Address src2); 738 739 #ifndef _LP64 740 void cmpklass(Address dst, Metadata* obj); 741 void cmpklass(Register dst, Metadata* obj); 742 void cmpoop(Address dst, jobject obj); 743 #endif // _LP64 744 745 void cmpoop(Register src1, Register src2); 746 void cmpoop(Register src1, Address src2); 747 void cmpoop(Register dst, jobject obj); 748 749 // NOTE src2 must be the lval. This is NOT an mem-mem compare 750 void cmpptr(Address src1, AddressLiteral src2); 751 752 void cmpptr(Register src1, AddressLiteral src2); 753 754 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 755 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 756 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 757 758 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 759 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 760 761 // cmp64 to avoild hiding cmpq 762 void cmp64(Register src1, AddressLiteral src); 763 764 void cmpxchgptr(Register reg, Address adr); 765 766 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 767 768 769 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 770 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 771 772 773 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 774 775 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 776 777 void shlptr(Register dst, int32_t shift); 778 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 779 780 void shrptr(Register dst, int32_t shift); 781 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 782 783 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 784 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 785 786 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 787 788 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 789 void subptr(Register dst, int32_t src); 790 // Force generation of a 4 byte immediate value even if it fits into 8bit 791 void subptr_imm32(Register dst, int32_t src); 792 void subptr(Register dst, Register src); 793 void subptr(Register dst, RegisterOrConstant src) { 794 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 795 else subptr(dst, src.as_register()); 796 } 797 798 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 799 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 800 801 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 802 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 803 804 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 805 806 807 808 // Helper functions for statistics gathering. 809 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 810 void cond_inc32(Condition cond, AddressLiteral counter_addr); 811 // Unconditional atomic increment. 812 void atomic_incl(Address counter_addr); 813 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 814 #ifdef _LP64 815 void atomic_incq(Address counter_addr); 816 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 817 #endif 818 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 819 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 820 821 void lea(Register dst, AddressLiteral adr); 822 void lea(Address dst, AddressLiteral adr); 823 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 824 825 void leal32(Register dst, Address src) { leal(dst, src); } 826 827 // Import other testl() methods from the parent class or else 828 // they will be hidden by the following overriding declaration. 829 using Assembler::testl; 830 void testl(Register dst, AddressLiteral src); 831 832 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 833 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 834 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 835 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 836 837 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 838 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 839 void testptr(Register src1, Register src2); 840 841 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 842 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 843 844 // Calls 845 846 void call(Label& L, relocInfo::relocType rtype); 847 void call(Register entry); 848 void call(Address addr) { Assembler::call(addr); } 849 850 // NOTE: this call transfers to the effective address of entry NOT 851 // the address contained by entry. This is because this is more natural 852 // for jumps/calls. 853 void call(AddressLiteral entry); 854 855 // Emit the CompiledIC call idiom 856 void ic_call(address entry, jint method_index = 0); 857 858 // Jumps 859 860 // NOTE: these jumps tranfer to the effective address of dst NOT 861 // the address contained by dst. This is because this is more natural 862 // for jumps/calls. 863 void jump(AddressLiteral dst); 864 void jump_cc(Condition cc, AddressLiteral dst); 865 866 // 32bit can do a case table jump in one instruction but we no longer allow the base 867 // to be installed in the Address class. This jump will tranfers to the address 868 // contained in the location described by entry (not the address of entry) 869 void jump(ArrayAddress entry); 870 871 // Floating 872 873 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 874 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 875 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 876 877 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 878 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 879 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 880 881 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 882 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 883 void comiss(XMMRegister dst, AddressLiteral src); 884 885 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 886 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 887 void comisd(XMMRegister dst, AddressLiteral src); 888 889 #ifndef _LP64 890 void fadd_s(Address src) { Assembler::fadd_s(src); } 891 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 892 893 void fldcw(Address src) { Assembler::fldcw(src); } 894 void fldcw(AddressLiteral src); 895 896 void fld_s(int index) { Assembler::fld_s(index); } 897 void fld_s(Address src) { Assembler::fld_s(src); } 898 void fld_s(AddressLiteral src); 899 900 void fld_d(Address src) { Assembler::fld_d(src); } 901 void fld_d(AddressLiteral src); 902 903 void fmul_s(Address src) { Assembler::fmul_s(src); } 904 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 905 #endif // _LP64 906 907 void fld_x(Address src) { Assembler::fld_x(src); } 908 void fld_x(AddressLiteral src); 909 910 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 911 void ldmxcsr(AddressLiteral src); 912 913 #ifdef _LP64 914 private: 915 void sha256_AVX2_one_round_compute( 916 Register reg_old_h, 917 Register reg_a, 918 Register reg_b, 919 Register reg_c, 920 Register reg_d, 921 Register reg_e, 922 Register reg_f, 923 Register reg_g, 924 Register reg_h, 925 int iter); 926 void sha256_AVX2_four_rounds_compute_first(int start); 927 void sha256_AVX2_four_rounds_compute_last(int start); 928 void sha256_AVX2_one_round_and_sched( 929 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 930 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 931 XMMRegister xmm_2, /* ymm6 */ 932 XMMRegister xmm_3, /* ymm7 */ 933 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 934 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 935 Register reg_c, /* edi */ 936 Register reg_d, /* esi */ 937 Register reg_e, /* r8d */ 938 Register reg_f, /* r9d */ 939 Register reg_g, /* r10d */ 940 Register reg_h, /* r11d */ 941 int iter); 942 943 void addm(int disp, Register r1, Register r2); 944 void gfmul(XMMRegister tmp0, XMMRegister t); 945 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 946 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 947 void generateHtbl_one_block(Register htbl); 948 void generateHtbl_eight_blocks(Register htbl); 949 public: 950 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 951 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 952 Register buf, Register state, Register ofs, Register limit, Register rsp, 953 bool multi_block, XMMRegister shuf_mask); 954 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 955 #endif 956 957 #ifdef _LP64 958 private: 959 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 960 Register e, Register f, Register g, Register h, int iteration); 961 962 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 963 Register a, Register b, Register c, Register d, Register e, Register f, 964 Register g, Register h, int iteration); 965 966 void addmq(int disp, Register r1, Register r2); 967 public: 968 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 969 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 970 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 971 XMMRegister shuf_mask); 972 private: 973 void roundEnc(XMMRegister key, int rnum); 974 void lastroundEnc(XMMRegister key, int rnum); 975 void roundDec(XMMRegister key, int rnum); 976 void lastroundDec(XMMRegister key, int rnum); 977 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 978 void ev_add128(XMMRegister xmmdst, XMMRegister xmmsrc1, XMMRegister xmmsrc2, 979 int vector_len, KRegister ktmp, Register rscratch = noreg); 980 981 public: 982 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 983 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 984 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 985 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 986 987 #endif 988 989 void fast_md5(Register buf, Address state, Address ofs, Address limit, 990 bool multi_block); 991 992 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 993 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 994 Register buf, Register state, Register ofs, Register limit, Register rsp, 995 bool multi_block); 996 997 #ifdef _LP64 998 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 999 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1000 Register buf, Register state, Register ofs, Register limit, Register rsp, 1001 bool multi_block, XMMRegister shuf_mask); 1002 #else 1003 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1004 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1005 Register buf, Register state, Register ofs, Register limit, Register rsp, 1006 bool multi_block); 1007 #endif 1008 1009 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1010 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1011 Register rax, Register rcx, Register rdx, Register tmp); 1012 1013 #ifdef _LP64 1014 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1015 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1016 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1017 1018 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1019 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1020 Register rax, Register rcx, Register rdx, Register r11); 1021 1022 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1023 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1024 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1025 1026 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1027 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1028 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1029 Register tmp3, Register tmp4); 1030 1031 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1032 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1033 Register rax, Register rcx, Register rdx, Register tmp1, 1034 Register tmp2, Register tmp3, Register tmp4); 1035 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1036 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1037 Register rax, Register rcx, Register rdx, Register tmp1, 1038 Register tmp2, Register tmp3, Register tmp4); 1039 #else 1040 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1041 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1042 Register rax, Register rcx, Register rdx, Register tmp1); 1043 1044 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1045 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1046 Register rax, Register rcx, Register rdx, Register tmp); 1047 1048 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1049 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1050 Register rdx, Register tmp); 1051 1052 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1053 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1054 Register rax, Register rbx, Register rdx); 1055 1056 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1057 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1058 Register rax, Register rcx, Register rdx, Register tmp); 1059 1060 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1061 Register edx, Register ebx, Register esi, Register edi, 1062 Register ebp, Register esp); 1063 1064 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1065 Register esi, Register edi, Register ebp, Register esp); 1066 1067 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1068 Register edx, Register ebx, Register esi, Register edi, 1069 Register ebp, Register esp); 1070 1071 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1072 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1073 Register rax, Register rcx, Register rdx, Register tmp); 1074 #endif 1075 1076 private: 1077 1078 // these are private because users should be doing movflt/movdbl 1079 1080 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1081 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1082 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1083 void movss(XMMRegister dst, AddressLiteral src); 1084 1085 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1086 void movlpd(XMMRegister dst, AddressLiteral src); 1087 1088 public: 1089 1090 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1091 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1092 void addsd(XMMRegister dst, AddressLiteral src); 1093 1094 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1095 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1096 void addss(XMMRegister dst, AddressLiteral src); 1097 1098 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1099 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1100 void addpd(XMMRegister dst, AddressLiteral src); 1101 1102 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1103 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1104 void divsd(XMMRegister dst, AddressLiteral src); 1105 1106 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1107 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1108 void divss(XMMRegister dst, AddressLiteral src); 1109 1110 // Move Unaligned Double Quadword 1111 void movdqu(Address dst, XMMRegister src); 1112 void movdqu(XMMRegister dst, Address src); 1113 void movdqu(XMMRegister dst, XMMRegister src); 1114 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1115 1116 void kmovwl(KRegister dst, Register src) { Assembler::kmovwl(dst, src); } 1117 void kmovwl(Register dst, KRegister src) { Assembler::kmovwl(dst, src); } 1118 void kmovwl(KRegister dst, Address src) { Assembler::kmovwl(dst, src); } 1119 void kmovwl(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1120 void kmovwl(Address dst, KRegister src) { Assembler::kmovwl(dst, src); } 1121 void kmovwl(KRegister dst, KRegister src) { Assembler::kmovwl(dst, src); } 1122 1123 void kmovql(KRegister dst, KRegister src) { Assembler::kmovql(dst, src); } 1124 void kmovql(KRegister dst, Register src) { Assembler::kmovql(dst, src); } 1125 void kmovql(Register dst, KRegister src) { Assembler::kmovql(dst, src); } 1126 void kmovql(KRegister dst, Address src) { Assembler::kmovql(dst, src); } 1127 void kmovql(Address dst, KRegister src) { Assembler::kmovql(dst, src); } 1128 void kmovql(KRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1129 1130 // Safe move operation, lowers down to 16bit moves for targets supporting 1131 // AVX512F feature and 64bit moves for targets supporting AVX512BW feature. 1132 void kmov(Address dst, KRegister src); 1133 void kmov(KRegister dst, Address src); 1134 void kmov(KRegister dst, KRegister src); 1135 void kmov(Register dst, KRegister src); 1136 void kmov(KRegister dst, Register src); 1137 1138 // AVX Unaligned forms 1139 void vmovdqu(Address dst, XMMRegister src); 1140 void vmovdqu(XMMRegister dst, Address src); 1141 void vmovdqu(XMMRegister dst, XMMRegister src); 1142 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1143 1144 // AVX512 Unaligned 1145 void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, int vector_len); 1146 void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, int vector_len); 1147 1148 void evmovdqub(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1149 void evmovdqub(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1150 void evmovdqub(XMMRegister dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, src, merge, vector_len); } 1151 void evmovdqub(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1152 void evmovdqub(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqub(dst, mask, src, merge, vector_len); } 1153 void evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1154 1155 void evmovdquw(Address dst, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1156 void evmovdquw(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1157 void evmovdquw(XMMRegister dst, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, src, merge, vector_len); } 1158 void evmovdquw(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquw(dst, mask, src, merge, vector_len); } 1159 void evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1160 1161 void evmovdqul(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1162 void evmovdqul(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdqul(dst, src, vector_len); } 1163 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 1164 if (dst->encoding() == src->encoding()) return; 1165 Assembler::evmovdqul(dst, src, vector_len); 1166 } 1167 void evmovdqul(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1168 void evmovdqul(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdqul(dst, mask, src, merge, vector_len); } 1169 void evmovdqul(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1170 if (dst->encoding() == src->encoding() && mask == k0) return; 1171 Assembler::evmovdqul(dst, mask, src, merge, vector_len); 1172 } 1173 void evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1174 1175 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1176 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1177 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1178 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 1179 if (dst->encoding() == src->encoding()) return; 1180 Assembler::evmovdquq(dst, src, vector_len); 1181 } 1182 void evmovdquq(Address dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1183 void evmovdquq(XMMRegister dst, KRegister mask, Address src, bool merge, int vector_len) { Assembler::evmovdquq(dst, mask, src, merge, vector_len); } 1184 void evmovdquq(XMMRegister dst, KRegister mask, XMMRegister src, bool merge, int vector_len) { 1185 if (dst->encoding() == src->encoding() && mask == k0) return; 1186 Assembler::evmovdquq(dst, mask, src, merge, vector_len); 1187 } 1188 void evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1189 1190 // Move Aligned Double Quadword 1191 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1192 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1193 void movdqa(XMMRegister dst, AddressLiteral src); 1194 1195 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1196 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1197 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1198 void movsd(XMMRegister dst, AddressLiteral src); 1199 1200 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1201 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1202 void mulpd(XMMRegister dst, AddressLiteral src); 1203 1204 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1205 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1206 void mulsd(XMMRegister dst, AddressLiteral src); 1207 1208 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1209 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1210 void mulss(XMMRegister dst, AddressLiteral src); 1211 1212 // Carry-Less Multiplication Quadword 1213 void pclmulldq(XMMRegister dst, XMMRegister src) { 1214 // 0x00 - multiply lower 64 bits [0:63] 1215 Assembler::pclmulqdq(dst, src, 0x00); 1216 } 1217 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1218 // 0x11 - multiply upper 64 bits [64:127] 1219 Assembler::pclmulqdq(dst, src, 0x11); 1220 } 1221 1222 void pcmpeqb(XMMRegister dst, XMMRegister src); 1223 void pcmpeqw(XMMRegister dst, XMMRegister src); 1224 1225 void pcmpestri(XMMRegister dst, Address src, int imm8); 1226 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1227 1228 void pmovzxbw(XMMRegister dst, XMMRegister src); 1229 void pmovzxbw(XMMRegister dst, Address src); 1230 1231 void pmovmskb(Register dst, XMMRegister src); 1232 1233 void ptest(XMMRegister dst, XMMRegister src); 1234 1235 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1236 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1237 void sqrtsd(XMMRegister dst, AddressLiteral src); 1238 1239 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1240 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1241 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1242 1243 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1244 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1245 void sqrtss(XMMRegister dst, AddressLiteral src); 1246 1247 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1248 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1249 void subsd(XMMRegister dst, AddressLiteral src); 1250 1251 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1252 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1253 void subss(XMMRegister dst, AddressLiteral src); 1254 1255 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1256 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1257 void ucomiss(XMMRegister dst, AddressLiteral src); 1258 1259 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1260 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1261 void ucomisd(XMMRegister dst, AddressLiteral src); 1262 1263 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1264 void xorpd(XMMRegister dst, XMMRegister src); 1265 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1266 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1267 1268 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1269 void xorps(XMMRegister dst, XMMRegister src); 1270 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1271 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1272 1273 // Shuffle Bytes 1274 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1275 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1276 void pshufb(XMMRegister dst, AddressLiteral src); 1277 // AVX 3-operands instructions 1278 1279 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1280 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1281 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1282 1283 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1284 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1285 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1286 1287 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1288 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1289 1290 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1291 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1292 void vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1293 1294 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1295 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1296 1297 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1298 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1299 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1300 1301 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1302 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1303 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1304 1305 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1306 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1307 1308 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1309 1310 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1311 void evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1312 1313 // Vector compares 1314 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1315 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpd(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1316 void evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1317 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1318 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1319 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpq(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1320 void evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1321 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1322 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1323 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpb(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1324 void evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1325 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1326 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, 1327 int comparison, bool is_signed, int vector_len) { Assembler::evpcmpw(kdst, mask, nds, src, comparison, is_signed, vector_len); } 1328 void evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 1329 int comparison, bool is_signed, int vector_len, Register scratch_reg); 1330 1331 1332 // Emit comparison instruction for the specified comparison predicate. 1333 void vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, ComparisonPredicate cond, Width width, int vector_len, Register scratch_reg); 1334 void vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len); 1335 1336 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1337 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1338 1339 void vpmovmskb(Register dst, XMMRegister src, int vector_len = Assembler::AVX_256bit); 1340 1341 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1342 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1343 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1344 Assembler::vpmulld(dst, nds, src, vector_len); 1345 }; 1346 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1347 Assembler::vpmulld(dst, nds, src, vector_len); 1348 } 1349 void vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1350 1351 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1352 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1353 1354 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1355 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1356 1357 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1358 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1359 1360 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1361 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1362 1363 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1364 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1365 1366 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1367 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1368 1369 void vptest(XMMRegister dst, XMMRegister src); 1370 void vptest(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vptest(dst, src, vector_len); } 1371 1372 void punpcklbw(XMMRegister dst, XMMRegister src); 1373 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1374 1375 void pshufd(XMMRegister dst, Address src, int mode); 1376 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1377 1378 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1379 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1380 1381 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1382 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1383 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1384 1385 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1386 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1387 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1388 1389 void evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register scratch_reg); 1390 1391 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1392 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1393 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1394 1395 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1396 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1397 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1398 1399 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1400 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1401 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1402 1403 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1404 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1405 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1406 1407 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1408 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1409 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1410 1411 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1412 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1413 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1414 1415 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1416 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1417 1418 // AVX Vector instructions 1419 1420 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1421 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1422 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1423 1424 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1425 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1426 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1427 1428 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1429 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1430 Assembler::vpxor(dst, nds, src, vector_len); 1431 else 1432 Assembler::vxorpd(dst, nds, src, vector_len); 1433 } 1434 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1435 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1436 Assembler::vpxor(dst, nds, src, vector_len); 1437 else 1438 Assembler::vxorpd(dst, nds, src, vector_len); 1439 } 1440 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1441 1442 // Simple version for AVX2 256bit vectors 1443 void vpxor(XMMRegister dst, XMMRegister src) { 1444 assert(UseAVX >= 2, "Should be at least AVX2"); 1445 Assembler::vpxor(dst, dst, src, AVX_256bit); 1446 } 1447 void vpxor(XMMRegister dst, Address src) { 1448 assert(UseAVX >= 2, "Should be at least AVX2"); 1449 Assembler::vpxor(dst, dst, src, AVX_256bit); 1450 } 1451 1452 void vpermd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpermd(dst, nds, src, vector_len); } 1453 void vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg); 1454 1455 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1456 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1457 Assembler::vinserti32x4(dst, nds, src, imm8); 1458 } else if (UseAVX > 1) { 1459 // vinserti128 is available only in AVX2 1460 Assembler::vinserti128(dst, nds, src, imm8); 1461 } else { 1462 Assembler::vinsertf128(dst, nds, src, imm8); 1463 } 1464 } 1465 1466 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1467 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1468 Assembler::vinserti32x4(dst, nds, src, imm8); 1469 } else if (UseAVX > 1) { 1470 // vinserti128 is available only in AVX2 1471 Assembler::vinserti128(dst, nds, src, imm8); 1472 } else { 1473 Assembler::vinsertf128(dst, nds, src, imm8); 1474 } 1475 } 1476 1477 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1478 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1479 Assembler::vextracti32x4(dst, src, imm8); 1480 } else if (UseAVX > 1) { 1481 // vextracti128 is available only in AVX2 1482 Assembler::vextracti128(dst, src, imm8); 1483 } else { 1484 Assembler::vextractf128(dst, src, imm8); 1485 } 1486 } 1487 1488 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1489 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1490 Assembler::vextracti32x4(dst, src, imm8); 1491 } else if (UseAVX > 1) { 1492 // vextracti128 is available only in AVX2 1493 Assembler::vextracti128(dst, src, imm8); 1494 } else { 1495 Assembler::vextractf128(dst, src, imm8); 1496 } 1497 } 1498 1499 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1500 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1501 vinserti128(dst, dst, src, 1); 1502 } 1503 void vinserti128_high(XMMRegister dst, Address src) { 1504 vinserti128(dst, dst, src, 1); 1505 } 1506 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1507 vextracti128(dst, src, 1); 1508 } 1509 void vextracti128_high(Address dst, XMMRegister src) { 1510 vextracti128(dst, src, 1); 1511 } 1512 1513 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1514 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1515 Assembler::vinsertf32x4(dst, dst, src, 1); 1516 } else { 1517 Assembler::vinsertf128(dst, dst, src, 1); 1518 } 1519 } 1520 1521 void vinsertf128_high(XMMRegister dst, Address src) { 1522 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1523 Assembler::vinsertf32x4(dst, dst, src, 1); 1524 } else { 1525 Assembler::vinsertf128(dst, dst, src, 1); 1526 } 1527 } 1528 1529 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1530 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1531 Assembler::vextractf32x4(dst, src, 1); 1532 } else { 1533 Assembler::vextractf128(dst, src, 1); 1534 } 1535 } 1536 1537 void vextractf128_high(Address dst, XMMRegister src) { 1538 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1539 Assembler::vextractf32x4(dst, src, 1); 1540 } else { 1541 Assembler::vextractf128(dst, src, 1); 1542 } 1543 } 1544 1545 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1546 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1547 Assembler::vinserti64x4(dst, dst, src, 1); 1548 } 1549 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1550 Assembler::vinsertf64x4(dst, dst, src, 1); 1551 } 1552 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1553 Assembler::vextracti64x4(dst, src, 1); 1554 } 1555 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1556 Assembler::vextractf64x4(dst, src, 1); 1557 } 1558 void vextractf64x4_high(Address dst, XMMRegister src) { 1559 Assembler::vextractf64x4(dst, src, 1); 1560 } 1561 void vinsertf64x4_high(XMMRegister dst, Address src) { 1562 Assembler::vinsertf64x4(dst, dst, src, 1); 1563 } 1564 1565 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1566 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1567 vinserti128(dst, dst, src, 0); 1568 } 1569 void vinserti128_low(XMMRegister dst, Address src) { 1570 vinserti128(dst, dst, src, 0); 1571 } 1572 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1573 vextracti128(dst, src, 0); 1574 } 1575 void vextracti128_low(Address dst, XMMRegister src) { 1576 vextracti128(dst, src, 0); 1577 } 1578 1579 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1580 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1581 Assembler::vinsertf32x4(dst, dst, src, 0); 1582 } else { 1583 Assembler::vinsertf128(dst, dst, src, 0); 1584 } 1585 } 1586 1587 void vinsertf128_low(XMMRegister dst, Address src) { 1588 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1589 Assembler::vinsertf32x4(dst, dst, src, 0); 1590 } else { 1591 Assembler::vinsertf128(dst, dst, src, 0); 1592 } 1593 } 1594 1595 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1596 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1597 Assembler::vextractf32x4(dst, src, 0); 1598 } else { 1599 Assembler::vextractf128(dst, src, 0); 1600 } 1601 } 1602 1603 void vextractf128_low(Address dst, XMMRegister src) { 1604 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1605 Assembler::vextractf32x4(dst, src, 0); 1606 } else { 1607 Assembler::vextractf128(dst, src, 0); 1608 } 1609 } 1610 1611 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1612 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1613 Assembler::vinserti64x4(dst, dst, src, 0); 1614 } 1615 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1616 Assembler::vinsertf64x4(dst, dst, src, 0); 1617 } 1618 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1619 Assembler::vextracti64x4(dst, src, 0); 1620 } 1621 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1622 Assembler::vextractf64x4(dst, src, 0); 1623 } 1624 void vextractf64x4_low(Address dst, XMMRegister src) { 1625 Assembler::vextractf64x4(dst, src, 0); 1626 } 1627 void vinsertf64x4_low(XMMRegister dst, Address src) { 1628 Assembler::vinsertf64x4(dst, dst, src, 0); 1629 } 1630 1631 // Carry-Less Multiplication Quadword 1632 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1633 // 0x00 - multiply lower 64 bits [0:63] 1634 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1635 } 1636 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1637 // 0x11 - multiply upper 64 bits [64:127] 1638 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1639 } 1640 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1641 // 0x10 - multiply nds[0:63] and src[64:127] 1642 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1643 } 1644 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1645 //0x01 - multiply nds[64:127] and src[0:63] 1646 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1647 } 1648 1649 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1650 // 0x00 - multiply lower 64 bits [0:63] 1651 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1652 } 1653 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1654 // 0x11 - multiply upper 64 bits [64:127] 1655 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1656 } 1657 1658 // Data 1659 1660 void cmov32( Condition cc, Register dst, Address src); 1661 void cmov32( Condition cc, Register dst, Register src); 1662 1663 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1664 1665 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1666 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1667 1668 void movoop(Register dst, jobject obj); 1669 void movoop(Address dst, jobject obj); 1670 1671 void mov_metadata(Register dst, Metadata* obj); 1672 void mov_metadata(Address dst, Metadata* obj); 1673 1674 void movptr(ArrayAddress dst, Register src); 1675 // can this do an lea? 1676 void movptr(Register dst, ArrayAddress src); 1677 1678 void movptr(Register dst, Address src); 1679 1680 #ifdef _LP64 1681 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1682 #else 1683 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1684 #endif 1685 1686 void movptr(Register dst, intptr_t src); 1687 void movptr(Register dst, Register src); 1688 void movptr(Address dst, intptr_t src); 1689 1690 void movptr(Address dst, Register src); 1691 1692 void movptr(Register dst, RegisterOrConstant src) { 1693 if (src.is_constant()) movptr(dst, src.as_constant()); 1694 else movptr(dst, src.as_register()); 1695 } 1696 1697 #ifdef _LP64 1698 // Generally the next two are only used for moving NULL 1699 // Although there are situations in initializing the mark word where 1700 // they could be used. They are dangerous. 1701 1702 // They only exist on LP64 so that int32_t and intptr_t are not the same 1703 // and we have ambiguous declarations. 1704 1705 void movptr(Address dst, int32_t imm32); 1706 void movptr(Register dst, int32_t imm32); 1707 #endif // _LP64 1708 1709 // to avoid hiding movl 1710 void mov32(AddressLiteral dst, Register src); 1711 void mov32(Register dst, AddressLiteral src); 1712 1713 // to avoid hiding movb 1714 void movbyte(ArrayAddress dst, int src); 1715 1716 // Import other mov() methods from the parent class or else 1717 // they will be hidden by the following overriding declaration. 1718 using Assembler::movdl; 1719 using Assembler::movq; 1720 void movdl(XMMRegister dst, AddressLiteral src); 1721 void movq(XMMRegister dst, AddressLiteral src); 1722 1723 // Can push value or effective address 1724 void pushptr(AddressLiteral src); 1725 1726 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1727 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1728 1729 void pushoop(jobject obj); 1730 void pushklass(Metadata* obj); 1731 1732 // sign extend as need a l to ptr sized element 1733 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1734 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1735 1736 1737 public: 1738 // C2 compiled method's prolog code. 1739 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1740 1741 // clear memory of size 'cnt' qwords, starting at 'base'; 1742 // if 'is_large' is set, do not try to produce short loop 1743 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg); 1744 1745 // clear memory initialization sequence for constant size; 1746 void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1747 1748 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1749 void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg); 1750 1751 // Fill primitive arrays 1752 void generate_fill(BasicType t, bool aligned, 1753 Register to, Register value, Register count, 1754 Register rtmp, XMMRegister xtmp); 1755 1756 void encode_iso_array(Register src, Register dst, Register len, 1757 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1758 XMMRegister tmp4, Register tmp5, Register result, bool ascii); 1759 1760 #ifdef _LP64 1761 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1762 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1763 Register y, Register y_idx, Register z, 1764 Register carry, Register product, 1765 Register idx, Register kdx); 1766 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1767 Register yz_idx, Register idx, 1768 Register carry, Register product, int offset); 1769 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1770 Register carry, Register carry2, 1771 Register idx, Register jdx, 1772 Register yz_idx1, Register yz_idx2, 1773 Register tmp, Register tmp3, Register tmp4); 1774 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1775 Register yz_idx, Register idx, Register jdx, 1776 Register carry, Register product, 1777 Register carry2); 1778 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1779 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1780 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1781 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1782 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1783 Register tmp2); 1784 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1785 Register rdxReg, Register raxReg); 1786 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1787 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1788 Register tmp3, Register tmp4); 1789 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1790 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1791 1792 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1793 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1794 Register raxReg); 1795 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1796 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1797 Register raxReg); 1798 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1799 Register result, Register tmp1, Register tmp2, 1800 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1801 #endif 1802 1803 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1804 void update_byte_crc32(Register crc, Register val, Register table); 1805 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1806 1807 1808 #ifdef _LP64 1809 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1810 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1811 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1812 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1813 void updateBytesAdler32(Register adler32, Register buf, Register length, XMMRegister shuf0, XMMRegister shuf1, ExternalAddress scale); 1814 #endif // _LP64 1815 1816 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1817 // Note on a naming convention: 1818 // Prefix w = register only used on a Westmere+ architecture 1819 // Prefix n = register only used on a Nehalem architecture 1820 #ifdef _LP64 1821 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1822 Register tmp1, Register tmp2, Register tmp3); 1823 #else 1824 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1825 Register tmp1, Register tmp2, Register tmp3, 1826 XMMRegister xtmp1, XMMRegister xtmp2); 1827 #endif 1828 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1829 Register in_out, 1830 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1831 XMMRegister w_xtmp2, 1832 Register tmp1, 1833 Register n_tmp2, Register n_tmp3); 1834 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1835 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1836 Register tmp1, Register tmp2, 1837 Register n_tmp3); 1838 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1839 Register in_out1, Register in_out2, Register in_out3, 1840 Register tmp1, Register tmp2, Register tmp3, 1841 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1842 Register tmp4, Register tmp5, 1843 Register n_tmp6); 1844 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1845 Register tmp1, Register tmp2, Register tmp3, 1846 Register tmp4, Register tmp5, Register tmp6, 1847 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1848 bool is_pclmulqdq_supported); 1849 // Fold 128-bit data chunk 1850 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1851 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1852 #ifdef _LP64 1853 // Fold 512-bit data chunk 1854 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 1855 #endif // _LP64 1856 // Fold 8-bit data 1857 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1858 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1859 1860 // Compress char[] array to byte[]. 1861 void char_array_compress(Register src, Register dst, Register len, 1862 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1863 XMMRegister tmp4, Register tmp5, Register result, 1864 KRegister mask1 = knoreg, KRegister mask2 = knoreg); 1865 1866 // Inflate byte[] array to char[]. 1867 void byte_array_inflate(Register src, Register dst, Register len, 1868 XMMRegister tmp1, Register tmp2, KRegister mask = knoreg); 1869 1870 void fill64_masked_avx(uint shift, Register dst, int disp, 1871 XMMRegister xmm, KRegister mask, Register length, 1872 Register temp, bool use64byteVector = false); 1873 1874 void fill32_masked_avx(uint shift, Register dst, int disp, 1875 XMMRegister xmm, KRegister mask, Register length, 1876 Register temp); 1877 1878 void fill32(Address dst, XMMRegister xmm); 1879 1880 void fill32_avx(Register dst, int disp, XMMRegister xmm); 1881 1882 void fill64(Address dst, XMMRegister xmm, bool use64byteVector = false); 1883 1884 void fill64_avx(Register dst, int dis, XMMRegister xmm, bool use64byteVector = false); 1885 1886 #ifdef _LP64 1887 void convert_f2i(Register dst, XMMRegister src); 1888 void convert_d2i(Register dst, XMMRegister src); 1889 void convert_f2l(Register dst, XMMRegister src); 1890 void convert_d2l(Register dst, XMMRegister src); 1891 1892 void cache_wb(Address line); 1893 void cache_wbsync(bool is_pre); 1894 1895 #if COMPILER2_OR_JVMCI 1896 void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, 1897 Register to, Register count, int shift, 1898 Register index, Register temp, 1899 bool use64byteVector, Label& L_entry, Label& L_exit); 1900 1901 void arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegister mask, Register from, 1902 Register to, Register start_index, Register end_index, 1903 Register count, int shift, Register temp, 1904 bool use64byteVector, Label& L_entry, Label& L_exit); 1905 1906 void copy64_masked_avx(Register dst, Register src, XMMRegister xmm, 1907 KRegister mask, Register length, Register index, 1908 Register temp, int shift = Address::times_1, int offset = 0, 1909 bool use64byteVector = false); 1910 1911 void copy32_masked_avx(Register dst, Register src, XMMRegister xmm, 1912 KRegister mask, Register length, Register index, 1913 Register temp, int shift = Address::times_1, int offset = 0); 1914 1915 void copy32_avx(Register dst, Register src, Register index, XMMRegister xmm, 1916 int shift = Address::times_1, int offset = 0); 1917 1918 void copy64_avx(Register dst, Register src, Register index, XMMRegister xmm, 1919 bool conjoint, int shift = Address::times_1, int offset = 0, 1920 bool use64byteVector = false); 1921 #endif // COMPILER2_OR_JVMCI 1922 1923 #endif // _LP64 1924 1925 void vallones(XMMRegister dst, int vector_len); 1926 1927 void fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow); 1928 void fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow); 1929 }; 1930 1931 /** 1932 * class SkipIfEqual: 1933 * 1934 * Instantiating this class will result in assembly code being output that will 1935 * jump around any code emitted between the creation of the instance and it's 1936 * automatic destruction at the end of a scope block, depending on the value of 1937 * the flag passed to the constructor, which will be checked at run-time. 1938 */ 1939 class SkipIfEqual { 1940 private: 1941 MacroAssembler* _masm; 1942 Label _label; 1943 1944 public: 1945 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1946 ~SkipIfEqual(); 1947 }; 1948 1949 #endif // CPU_X86_MACROASSEMBLER_X86_HPP --- EOF ---