1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/assembler.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "code/aotCodeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "crc32c.h" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "gc/shared/collectedHeap.inline.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "interpreter/bytecodeHistogram.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "interpreter/interpreterRuntime.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/macros.hpp" 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #define STOP(error) stop(error) 63 #else 64 #define BLOCK_COMMENT(str) block_comment(str) 65 #define STOP(error) block_comment(error); stop(error) 66 #endif 67 68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 69 70 #ifdef ASSERT 71 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 72 #endif 73 74 static const Assembler::Condition reverse[] = { 75 Assembler::noOverflow /* overflow = 0x0 */ , 76 Assembler::overflow /* noOverflow = 0x1 */ , 77 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 78 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 79 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 80 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 81 Assembler::above /* belowEqual = 0x6 */ , 82 Assembler::belowEqual /* above = 0x7 */ , 83 Assembler::positive /* negative = 0x8 */ , 84 Assembler::negative /* positive = 0x9 */ , 85 Assembler::noParity /* parity = 0xa */ , 86 Assembler::parity /* noParity = 0xb */ , 87 Assembler::greaterEqual /* less = 0xc */ , 88 Assembler::less /* greaterEqual = 0xd */ , 89 Assembler::greater /* lessEqual = 0xe */ , 90 Assembler::lessEqual /* greater = 0xf, */ 91 92 }; 93 94 95 // Implementation of MacroAssembler 96 97 Address MacroAssembler::as_Address(AddressLiteral adr) { 98 // amd64 always does this as a pc-rel 99 // we can be absolute or disp based on the instruction type 100 // jmp/call are displacements others are absolute 101 assert(!adr.is_lval(), "must be rval"); 102 assert(reachable(adr), "must be"); 103 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 104 105 } 106 107 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 108 AddressLiteral base = adr.base(); 109 lea(rscratch, base); 110 Address index = adr.index(); 111 assert(index._disp == 0, "must not have disp"); // maybe it can? 112 Address array(rscratch, index._index, index._scale, index._disp); 113 return array; 114 } 115 116 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 117 Label L, E; 118 119 #ifdef _WIN64 120 // Windows always allocates space for it's register args 121 assert(num_args <= 4, "only register arguments supported"); 122 subq(rsp, frame::arg_reg_save_area_bytes); 123 #endif 124 125 // Align stack if necessary 126 testl(rsp, 15); 127 jcc(Assembler::zero, L); 128 129 subq(rsp, 8); 130 call(RuntimeAddress(entry_point)); 131 addq(rsp, 8); 132 jmp(E); 133 134 bind(L); 135 call(RuntimeAddress(entry_point)); 136 137 bind(E); 138 139 #ifdef _WIN64 140 // restore stack pointer 141 addq(rsp, frame::arg_reg_save_area_bytes); 142 #endif 143 } 144 145 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 146 assert(!src2.is_lval(), "should use cmpptr"); 147 assert(rscratch != noreg || always_reachable(src2), "missing"); 148 149 if (reachable(src2)) { 150 cmpq(src1, as_Address(src2)); 151 } else { 152 lea(rscratch, src2); 153 Assembler::cmpq(src1, Address(rscratch, 0)); 154 } 155 } 156 157 int MacroAssembler::corrected_idivq(Register reg) { 158 // Full implementation of Java ldiv and lrem; checks for special 159 // case as described in JVM spec., p.243 & p.271. The function 160 // returns the (pc) offset of the idivl instruction - may be needed 161 // for implicit exceptions. 162 // 163 // normal case special case 164 // 165 // input : rax: dividend min_long 166 // reg: divisor (may not be eax/edx) -1 167 // 168 // output: rax: quotient (= rax idiv reg) min_long 169 // rdx: remainder (= rax irem reg) 0 170 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 171 static const int64_t min_long = 0x8000000000000000; 172 Label normal_case, special_case; 173 174 // check for special case 175 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 176 jcc(Assembler::notEqual, normal_case); 177 xorl(rdx, rdx); // prepare rdx for possible special case (where 178 // remainder = 0) 179 cmpq(reg, -1); 180 jcc(Assembler::equal, special_case); 181 182 // handle normal case 183 bind(normal_case); 184 cdqq(); 185 int idivq_offset = offset(); 186 idivq(reg); 187 188 // normal and special case exit 189 bind(special_case); 190 191 return idivq_offset; 192 } 193 194 void MacroAssembler::decrementq(Register reg, int value) { 195 if (value == min_jint) { subq(reg, value); return; } 196 if (value < 0) { incrementq(reg, -value); return; } 197 if (value == 0) { ; return; } 198 if (value == 1 && UseIncDec) { decq(reg) ; return; } 199 /* else */ { subq(reg, value) ; return; } 200 } 201 202 void MacroAssembler::decrementq(Address dst, int value) { 203 if (value == min_jint) { subq(dst, value); return; } 204 if (value < 0) { incrementq(dst, -value); return; } 205 if (value == 0) { ; return; } 206 if (value == 1 && UseIncDec) { decq(dst) ; return; } 207 /* else */ { subq(dst, value) ; return; } 208 } 209 210 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 211 assert(rscratch != noreg || always_reachable(dst), "missing"); 212 213 if (reachable(dst)) { 214 incrementq(as_Address(dst)); 215 } else { 216 lea(rscratch, dst); 217 incrementq(Address(rscratch, 0)); 218 } 219 } 220 221 void MacroAssembler::incrementq(Register reg, int value) { 222 if (value == min_jint) { addq(reg, value); return; } 223 if (value < 0) { decrementq(reg, -value); return; } 224 if (value == 0) { ; return; } 225 if (value == 1 && UseIncDec) { incq(reg) ; return; } 226 /* else */ { addq(reg, value) ; return; } 227 } 228 229 void MacroAssembler::incrementq(Address dst, int value) { 230 if (value == min_jint) { addq(dst, value); return; } 231 if (value < 0) { decrementq(dst, -value); return; } 232 if (value == 0) { ; return; } 233 if (value == 1 && UseIncDec) { incq(dst) ; return; } 234 /* else */ { addq(dst, value) ; return; } 235 } 236 237 // 32bit can do a case table jump in one instruction but we no longer allow the base 238 // to be installed in the Address class 239 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 240 lea(rscratch, entry.base()); 241 Address dispatch = entry.index(); 242 assert(dispatch._base == noreg, "must be"); 243 dispatch._base = rscratch; 244 jmp(dispatch); 245 } 246 247 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 248 ShouldNotReachHere(); // 64bit doesn't use two regs 249 cmpq(x_lo, y_lo); 250 } 251 252 void MacroAssembler::lea(Register dst, AddressLiteral src) { 253 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 254 } 255 256 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 257 lea(rscratch, adr); 258 movptr(dst, rscratch); 259 } 260 261 void MacroAssembler::leave() { 262 // %%% is this really better? Why not on 32bit too? 263 emit_int8((unsigned char)0xC9); // LEAVE 264 } 265 266 void MacroAssembler::lneg(Register hi, Register lo) { 267 ShouldNotReachHere(); // 64bit doesn't use two regs 268 negq(lo); 269 } 270 271 void MacroAssembler::movoop(Register dst, jobject obj) { 272 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 273 } 274 275 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 276 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 277 movq(dst, rscratch); 278 } 279 280 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 281 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 282 } 283 284 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 285 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 286 movq(dst, rscratch); 287 } 288 289 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 290 if (src.is_lval()) { 291 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 292 } else { 293 if (reachable(src)) { 294 movq(dst, as_Address(src)); 295 } else { 296 lea(dst, src); 297 movq(dst, Address(dst, 0)); 298 } 299 } 300 } 301 302 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 303 movq(as_Address(dst, rscratch), src); 304 } 305 306 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 307 movq(dst, as_Address(src, dst /*rscratch*/)); 308 } 309 310 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 311 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 312 if (is_simm32(src)) { 313 movptr(dst, checked_cast<int32_t>(src)); 314 } else { 315 mov64(rscratch, src); 316 movq(dst, rscratch); 317 } 318 } 319 320 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 321 movoop(rscratch, obj); 322 push(rscratch); 323 } 324 325 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 326 mov_metadata(rscratch, obj); 327 push(rscratch); 328 } 329 330 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 331 lea(rscratch, src); 332 if (src.is_lval()) { 333 push(rscratch); 334 } else { 335 pushq(Address(rscratch, 0)); 336 } 337 } 338 339 static void pass_arg0(MacroAssembler* masm, Register arg) { 340 if (c_rarg0 != arg ) { 341 masm->mov(c_rarg0, arg); 342 } 343 } 344 345 static void pass_arg1(MacroAssembler* masm, Register arg) { 346 if (c_rarg1 != arg ) { 347 masm->mov(c_rarg1, arg); 348 } 349 } 350 351 static void pass_arg2(MacroAssembler* masm, Register arg) { 352 if (c_rarg2 != arg ) { 353 masm->mov(c_rarg2, arg); 354 } 355 } 356 357 static void pass_arg3(MacroAssembler* masm, Register arg) { 358 if (c_rarg3 != arg ) { 359 masm->mov(c_rarg3, arg); 360 } 361 } 362 363 void MacroAssembler::stop(const char* msg) { 364 if (ShowMessageBoxOnError) { 365 address rip = pc(); 366 pusha(); // get regs on stack 367 lea(c_rarg1, InternalAddress(rip)); 368 movq(c_rarg2, rsp); // pass pointer to regs array 369 } 370 // Skip AOT caching C strings in scratch buffer. 371 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg); 372 lea(c_rarg0, ExternalAddress((address) str)); 373 andq(rsp, -16); // align stack as required by ABI 374 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 375 hlt(); 376 } 377 378 void MacroAssembler::warn(const char* msg) { 379 push(rbp); 380 movq(rbp, rsp); 381 andq(rsp, -16); // align stack as required by push_CPU_state and call 382 push_CPU_state(); // keeps alignment at 16 bytes 383 384 #ifdef _WIN64 385 // Windows always allocates space for its register args 386 subq(rsp, frame::arg_reg_save_area_bytes); 387 #endif 388 lea(c_rarg0, ExternalAddress((address) msg)); 389 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 390 391 #ifdef _WIN64 392 // restore stack pointer 393 addq(rsp, frame::arg_reg_save_area_bytes); 394 #endif 395 pop_CPU_state(); 396 mov(rsp, rbp); 397 pop(rbp); 398 } 399 400 void MacroAssembler::print_state() { 401 address rip = pc(); 402 pusha(); // get regs on stack 403 push(rbp); 404 movq(rbp, rsp); 405 andq(rsp, -16); // align stack as required by push_CPU_state and call 406 push_CPU_state(); // keeps alignment at 16 bytes 407 408 lea(c_rarg0, InternalAddress(rip)); 409 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 410 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 411 412 pop_CPU_state(); 413 mov(rsp, rbp); 414 pop(rbp); 415 popa(); 416 } 417 418 #ifndef PRODUCT 419 extern "C" void findpc(intptr_t x); 420 #endif 421 422 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 423 // In order to get locks to work, we need to fake a in_VM state 424 if (ShowMessageBoxOnError) { 425 JavaThread* thread = JavaThread::current(); 426 JavaThreadState saved_state = thread->thread_state(); 427 thread->set_thread_state(_thread_in_vm); 428 #ifndef PRODUCT 429 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 430 ttyLocker ttyl; 431 BytecodeCounter::print(); 432 } 433 #endif 434 // To see where a verify_oop failed, get $ebx+40/X for this frame. 435 // XXX correct this offset for amd64 436 // This is the value of eip which points to where verify_oop will return. 437 if (os::message_box(msg, "Execution stopped, print registers?")) { 438 print_state64(pc, regs); 439 BREAKPOINT; 440 } 441 } 442 fatal("DEBUG MESSAGE: %s", msg); 443 } 444 445 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 446 ttyLocker ttyl; 447 DebuggingContext debugging{}; 448 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 449 #ifndef PRODUCT 450 tty->cr(); 451 findpc(pc); 452 tty->cr(); 453 #endif 454 #define PRINT_REG(rax, value) \ 455 { tty->print("%s = ", #rax); os::print_location(tty, value); } 456 PRINT_REG(rax, regs[15]); 457 PRINT_REG(rbx, regs[12]); 458 PRINT_REG(rcx, regs[14]); 459 PRINT_REG(rdx, regs[13]); 460 PRINT_REG(rdi, regs[8]); 461 PRINT_REG(rsi, regs[9]); 462 PRINT_REG(rbp, regs[10]); 463 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 464 PRINT_REG(rsp, (intptr_t)(®s[16])); 465 PRINT_REG(r8 , regs[7]); 466 PRINT_REG(r9 , regs[6]); 467 PRINT_REG(r10, regs[5]); 468 PRINT_REG(r11, regs[4]); 469 PRINT_REG(r12, regs[3]); 470 PRINT_REG(r13, regs[2]); 471 PRINT_REG(r14, regs[1]); 472 PRINT_REG(r15, regs[0]); 473 #undef PRINT_REG 474 // Print some words near the top of the stack. 475 int64_t* rsp = ®s[16]; 476 int64_t* dump_sp = rsp; 477 for (int col1 = 0; col1 < 8; col1++) { 478 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 479 os::print_location(tty, *dump_sp++); 480 } 481 for (int row = 0; row < 25; row++) { 482 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 483 for (int col = 0; col < 4; col++) { 484 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 485 } 486 tty->cr(); 487 } 488 // Print some instructions around pc: 489 Disassembler::decode((address)pc-64, (address)pc); 490 tty->print_cr("--------"); 491 Disassembler::decode((address)pc, (address)pc+32); 492 } 493 494 // The java_calling_convention describes stack locations as ideal slots on 495 // a frame with no abi restrictions. Since we must observe abi restrictions 496 // (like the placement of the register window) the slots must be biased by 497 // the following value. 498 static int reg2offset_in(VMReg r) { 499 // Account for saved rbp and return address 500 // This should really be in_preserve_stack_slots 501 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 502 } 503 504 static int reg2offset_out(VMReg r) { 505 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 506 } 507 508 // A long move 509 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 510 511 // The calling conventions assures us that each VMregpair is either 512 // all really one physical register or adjacent stack slots. 513 514 if (src.is_single_phys_reg() ) { 515 if (dst.is_single_phys_reg()) { 516 if (dst.first() != src.first()) { 517 mov(dst.first()->as_Register(), src.first()->as_Register()); 518 } 519 } else { 520 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 521 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 522 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 523 } 524 } else if (dst.is_single_phys_reg()) { 525 assert(src.is_single_reg(), "not a stack pair"); 526 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 527 } else { 528 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 529 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 530 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 531 } 532 } 533 534 // A double move 535 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 536 537 // The calling conventions assures us that each VMregpair is either 538 // all really one physical register or adjacent stack slots. 539 540 if (src.is_single_phys_reg() ) { 541 if (dst.is_single_phys_reg()) { 542 // In theory these overlap but the ordering is such that this is likely a nop 543 if ( src.first() != dst.first()) { 544 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 545 } 546 } else { 547 assert(dst.is_single_reg(), "not a stack pair"); 548 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 549 } 550 } else if (dst.is_single_phys_reg()) { 551 assert(src.is_single_reg(), "not a stack pair"); 552 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 553 } else { 554 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 555 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 556 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 557 } 558 } 559 560 561 // A float arg may have to do float reg int reg conversion 562 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 563 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 564 565 // The calling conventions assures us that each VMregpair is either 566 // all really one physical register or adjacent stack slots. 567 568 if (src.first()->is_stack()) { 569 if (dst.first()->is_stack()) { 570 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 571 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 572 } else { 573 // stack to reg 574 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 575 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 576 } 577 } else if (dst.first()->is_stack()) { 578 // reg to stack 579 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 580 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 581 } else { 582 // reg to reg 583 // In theory these overlap but the ordering is such that this is likely a nop 584 if ( src.first() != dst.first()) { 585 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 586 } 587 } 588 } 589 590 // On 64 bit we will store integer like items to the stack as 591 // 64 bits items (x86_32/64 abi) even though java would only store 592 // 32bits for a parameter. On 32bit it will simply be 32 bits 593 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 594 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 595 if (src.first()->is_stack()) { 596 if (dst.first()->is_stack()) { 597 // stack to stack 598 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 599 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 600 } else { 601 // stack to reg 602 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 603 } 604 } else if (dst.first()->is_stack()) { 605 // reg to stack 606 // Do we really have to sign extend??? 607 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 608 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 609 } else { 610 // Do we really have to sign extend??? 611 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 612 if (dst.first() != src.first()) { 613 movq(dst.first()->as_Register(), src.first()->as_Register()); 614 } 615 } 616 } 617 618 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 619 if (src.first()->is_stack()) { 620 if (dst.first()->is_stack()) { 621 // stack to stack 622 movq(rax, Address(rbp, reg2offset_in(src.first()))); 623 movq(Address(rsp, reg2offset_out(dst.first())), rax); 624 } else { 625 // stack to reg 626 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 627 } 628 } else if (dst.first()->is_stack()) { 629 // reg to stack 630 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 631 } else { 632 if (dst.first() != src.first()) { 633 movq(dst.first()->as_Register(), src.first()->as_Register()); 634 } 635 } 636 } 637 638 // An oop arg. Must pass a handle not the oop itself 639 void MacroAssembler::object_move(OopMap* map, 640 int oop_handle_offset, 641 int framesize_in_slots, 642 VMRegPair src, 643 VMRegPair dst, 644 bool is_receiver, 645 int* receiver_offset) { 646 647 // must pass a handle. First figure out the location we use as a handle 648 649 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 650 651 // See if oop is null if it is we need no handle 652 653 if (src.first()->is_stack()) { 654 655 // Oop is already on the stack as an argument 656 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 657 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 658 if (is_receiver) { 659 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 660 } 661 662 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 663 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 664 // conditionally move a null 665 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 666 } else { 667 668 // Oop is in a register we must store it to the space we reserve 669 // on the stack for oop_handles and pass a handle if oop is non-null 670 671 const Register rOop = src.first()->as_Register(); 672 int oop_slot; 673 if (rOop == j_rarg0) 674 oop_slot = 0; 675 else if (rOop == j_rarg1) 676 oop_slot = 1; 677 else if (rOop == j_rarg2) 678 oop_slot = 2; 679 else if (rOop == j_rarg3) 680 oop_slot = 3; 681 else if (rOop == j_rarg4) 682 oop_slot = 4; 683 else { 684 assert(rOop == j_rarg5, "wrong register"); 685 oop_slot = 5; 686 } 687 688 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 689 int offset = oop_slot*VMRegImpl::stack_slot_size; 690 691 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 692 // Store oop in handle area, may be null 693 movptr(Address(rsp, offset), rOop); 694 if (is_receiver) { 695 *receiver_offset = offset; 696 } 697 698 cmpptr(rOop, NULL_WORD); 699 lea(rHandle, Address(rsp, offset)); 700 // conditionally move a null from the handle area where it was just stored 701 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 702 } 703 704 // If arg is on the stack then place it otherwise it is already in correct reg. 705 if (dst.first()->is_stack()) { 706 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 707 } 708 } 709 710 void MacroAssembler::addptr(Register dst, int32_t imm32) { 711 addq(dst, imm32); 712 } 713 714 void MacroAssembler::addptr(Register dst, Register src) { 715 addq(dst, src); 716 } 717 718 void MacroAssembler::addptr(Address dst, Register src) { 719 addq(dst, src); 720 } 721 722 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 723 assert(rscratch != noreg || always_reachable(src), "missing"); 724 725 if (reachable(src)) { 726 Assembler::addsd(dst, as_Address(src)); 727 } else { 728 lea(rscratch, src); 729 Assembler::addsd(dst, Address(rscratch, 0)); 730 } 731 } 732 733 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 734 assert(rscratch != noreg || always_reachable(src), "missing"); 735 736 if (reachable(src)) { 737 addss(dst, as_Address(src)); 738 } else { 739 lea(rscratch, src); 740 addss(dst, Address(rscratch, 0)); 741 } 742 } 743 744 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 745 assert(rscratch != noreg || always_reachable(src), "missing"); 746 747 if (reachable(src)) { 748 Assembler::addpd(dst, as_Address(src)); 749 } else { 750 lea(rscratch, src); 751 Assembler::addpd(dst, Address(rscratch, 0)); 752 } 753 } 754 755 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 756 // Stub code is generated once and never copied. 757 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 758 void MacroAssembler::align64() { 759 align(64, (uint)(uintptr_t)pc()); 760 } 761 762 void MacroAssembler::align32() { 763 align(32, (uint)(uintptr_t)pc()); 764 } 765 766 void MacroAssembler::align(uint modulus) { 767 // 8273459: Ensure alignment is possible with current segment alignment 768 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 769 align(modulus, offset()); 770 } 771 772 void MacroAssembler::align(uint modulus, uint target) { 773 if (target % modulus != 0) { 774 nop(modulus - (target % modulus)); 775 } 776 } 777 778 void MacroAssembler::push_f(XMMRegister r) { 779 subptr(rsp, wordSize); 780 movflt(Address(rsp, 0), r); 781 } 782 783 void MacroAssembler::pop_f(XMMRegister r) { 784 movflt(r, Address(rsp, 0)); 785 addptr(rsp, wordSize); 786 } 787 788 void MacroAssembler::push_d(XMMRegister r) { 789 subptr(rsp, 2 * wordSize); 790 movdbl(Address(rsp, 0), r); 791 } 792 793 void MacroAssembler::pop_d(XMMRegister r) { 794 movdbl(r, Address(rsp, 0)); 795 addptr(rsp, 2 * Interpreter::stackElementSize); 796 } 797 798 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 799 // Used in sign-masking with aligned address. 800 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 801 assert(rscratch != noreg || always_reachable(src), "missing"); 802 803 if (UseAVX > 2 && 804 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 805 (dst->encoding() >= 16)) { 806 vpand(dst, dst, src, AVX_512bit, rscratch); 807 } else if (reachable(src)) { 808 Assembler::andpd(dst, as_Address(src)); 809 } else { 810 lea(rscratch, src); 811 Assembler::andpd(dst, Address(rscratch, 0)); 812 } 813 } 814 815 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 816 // Used in sign-masking with aligned address. 817 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 818 assert(rscratch != noreg || always_reachable(src), "missing"); 819 820 if (reachable(src)) { 821 Assembler::andps(dst, as_Address(src)); 822 } else { 823 lea(rscratch, src); 824 Assembler::andps(dst, Address(rscratch, 0)); 825 } 826 } 827 828 void MacroAssembler::andptr(Register dst, int32_t imm32) { 829 andq(dst, imm32); 830 } 831 832 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 833 assert(rscratch != noreg || always_reachable(src), "missing"); 834 835 if (reachable(src)) { 836 andq(dst, as_Address(src)); 837 } else { 838 lea(rscratch, src); 839 andq(dst, Address(rscratch, 0)); 840 } 841 } 842 843 void MacroAssembler::atomic_incl(Address counter_addr) { 844 lock(); 845 incrementl(counter_addr); 846 } 847 848 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 849 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 850 851 if (reachable(counter_addr)) { 852 atomic_incl(as_Address(counter_addr)); 853 } else { 854 lea(rscratch, counter_addr); 855 atomic_incl(Address(rscratch, 0)); 856 } 857 } 858 859 void MacroAssembler::atomic_incq(Address counter_addr) { 860 lock(); 861 incrementq(counter_addr); 862 } 863 864 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 865 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 866 867 if (reachable(counter_addr)) { 868 atomic_incq(as_Address(counter_addr)); 869 } else { 870 lea(rscratch, counter_addr); 871 atomic_incq(Address(rscratch, 0)); 872 } 873 } 874 875 // Writes to stack successive pages until offset reached to check for 876 // stack overflow + shadow pages. This clobbers tmp. 877 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 878 movptr(tmp, rsp); 879 // Bang stack for total size given plus shadow page size. 880 // Bang one page at a time because large size can bang beyond yellow and 881 // red zones. 882 Label loop; 883 bind(loop); 884 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 885 subptr(tmp, (int)os::vm_page_size()); 886 subl(size, (int)os::vm_page_size()); 887 jcc(Assembler::greater, loop); 888 889 // Bang down shadow pages too. 890 // At this point, (tmp-0) is the last address touched, so don't 891 // touch it again. (It was touched as (tmp-pagesize) but then tmp 892 // was post-decremented.) Skip this address by starting at i=1, and 893 // touch a few more pages below. N.B. It is important to touch all 894 // the way down including all pages in the shadow zone. 895 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 896 // this could be any sized move but this is can be a debugging crumb 897 // so the bigger the better. 898 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 899 } 900 } 901 902 void MacroAssembler::reserved_stack_check() { 903 // testing if reserved zone needs to be enabled 904 Label no_reserved_zone_enabling; 905 906 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset())); 907 jcc(Assembler::below, no_reserved_zone_enabling); 908 909 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread); 910 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 911 should_not_reach_here(); 912 913 bind(no_reserved_zone_enabling); 914 } 915 916 void MacroAssembler::c2bool(Register x) { 917 // implements x == 0 ? 0 : 1 918 // note: must only look at least-significant byte of x 919 // since C-style booleans are stored in one byte 920 // only! (was bug) 921 andl(x, 0xFF); 922 setb(Assembler::notZero, x); 923 } 924 925 // Wouldn't need if AddressLiteral version had new name 926 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 927 Assembler::call(L, rtype); 928 } 929 930 void MacroAssembler::call(Register entry) { 931 Assembler::call(entry); 932 } 933 934 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 935 assert(rscratch != noreg || always_reachable(entry), "missing"); 936 937 if (reachable(entry)) { 938 Assembler::call_literal(entry.target(), entry.rspec()); 939 } else { 940 lea(rscratch, entry); 941 Assembler::call(rscratch); 942 } 943 } 944 945 void MacroAssembler::ic_call(address entry, jint method_index) { 946 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 947 // Needs full 64-bit immediate for later patching. 948 mov64(rax, (int64_t)Universe::non_oop_word()); 949 call(AddressLiteral(entry, rh)); 950 } 951 952 int MacroAssembler::ic_check_size() { 953 return UseCompactObjectHeaders ? 17 : 14; 954 } 955 956 int MacroAssembler::ic_check(int end_alignment) { 957 Register receiver = j_rarg0; 958 Register data = rax; 959 Register temp = rscratch1; 960 961 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 962 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 963 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 964 // before the inline cache check here, and not after 965 align(end_alignment, offset() + ic_check_size()); 966 967 int uep_offset = offset(); 968 969 if (UseCompactObjectHeaders) { 970 load_narrow_klass_compact(temp, receiver); 971 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 972 } else if (UseCompressedClassPointers) { 973 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 974 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 975 } else { 976 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 977 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 978 } 979 980 // if inline cache check fails, then jump to runtime routine 981 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 982 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment); 983 984 return uep_offset; 985 } 986 987 void MacroAssembler::emit_static_call_stub() { 988 // Static stub relocation also tags the Method* in the code-stream. 989 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 990 // This is recognized as unresolved by relocs/nativeinst/ic code. 991 jump(RuntimeAddress(pc())); 992 } 993 994 // Implementation of call_VM versions 995 996 void MacroAssembler::call_VM(Register oop_result, 997 address entry_point, 998 bool check_exceptions) { 999 Label C, E; 1000 call(C, relocInfo::none); 1001 jmp(E); 1002 1003 bind(C); 1004 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1005 ret(0); 1006 1007 bind(E); 1008 } 1009 1010 void MacroAssembler::call_VM(Register oop_result, 1011 address entry_point, 1012 Register arg_1, 1013 bool check_exceptions) { 1014 Label C, E; 1015 call(C, relocInfo::none); 1016 jmp(E); 1017 1018 bind(C); 1019 pass_arg1(this, arg_1); 1020 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1021 ret(0); 1022 1023 bind(E); 1024 } 1025 1026 void MacroAssembler::call_VM(Register oop_result, 1027 address entry_point, 1028 Register arg_1, 1029 Register arg_2, 1030 bool check_exceptions) { 1031 Label C, E; 1032 call(C, relocInfo::none); 1033 jmp(E); 1034 1035 bind(C); 1036 1037 assert_different_registers(arg_1, c_rarg2); 1038 1039 pass_arg2(this, arg_2); 1040 pass_arg1(this, arg_1); 1041 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1042 ret(0); 1043 1044 bind(E); 1045 } 1046 1047 void MacroAssembler::call_VM(Register oop_result, 1048 address entry_point, 1049 Register arg_1, 1050 Register arg_2, 1051 Register arg_3, 1052 bool check_exceptions) { 1053 Label C, E; 1054 call(C, relocInfo::none); 1055 jmp(E); 1056 1057 bind(C); 1058 1059 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1060 assert_different_registers(arg_2, c_rarg3); 1061 pass_arg3(this, arg_3); 1062 pass_arg2(this, arg_2); 1063 pass_arg1(this, arg_1); 1064 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1065 ret(0); 1066 1067 bind(E); 1068 } 1069 1070 void MacroAssembler::call_VM(Register oop_result, 1071 Register last_java_sp, 1072 address entry_point, 1073 int number_of_arguments, 1074 bool check_exceptions) { 1075 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1076 } 1077 1078 void MacroAssembler::call_VM(Register oop_result, 1079 Register last_java_sp, 1080 address entry_point, 1081 Register arg_1, 1082 bool check_exceptions) { 1083 pass_arg1(this, arg_1); 1084 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1085 } 1086 1087 void MacroAssembler::call_VM(Register oop_result, 1088 Register last_java_sp, 1089 address entry_point, 1090 Register arg_1, 1091 Register arg_2, 1092 bool check_exceptions) { 1093 1094 assert_different_registers(arg_1, c_rarg2); 1095 pass_arg2(this, arg_2); 1096 pass_arg1(this, arg_1); 1097 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1098 } 1099 1100 void MacroAssembler::call_VM(Register oop_result, 1101 Register last_java_sp, 1102 address entry_point, 1103 Register arg_1, 1104 Register arg_2, 1105 Register arg_3, 1106 bool check_exceptions) { 1107 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1108 assert_different_registers(arg_2, c_rarg3); 1109 pass_arg3(this, arg_3); 1110 pass_arg2(this, arg_2); 1111 pass_arg1(this, arg_1); 1112 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1113 } 1114 1115 void MacroAssembler::super_call_VM(Register oop_result, 1116 Register last_java_sp, 1117 address entry_point, 1118 int number_of_arguments, 1119 bool check_exceptions) { 1120 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1121 } 1122 1123 void MacroAssembler::super_call_VM(Register oop_result, 1124 Register last_java_sp, 1125 address entry_point, 1126 Register arg_1, 1127 bool check_exceptions) { 1128 pass_arg1(this, arg_1); 1129 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1130 } 1131 1132 void MacroAssembler::super_call_VM(Register oop_result, 1133 Register last_java_sp, 1134 address entry_point, 1135 Register arg_1, 1136 Register arg_2, 1137 bool check_exceptions) { 1138 1139 assert_different_registers(arg_1, c_rarg2); 1140 pass_arg2(this, arg_2); 1141 pass_arg1(this, arg_1); 1142 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1143 } 1144 1145 void MacroAssembler::super_call_VM(Register oop_result, 1146 Register last_java_sp, 1147 address entry_point, 1148 Register arg_1, 1149 Register arg_2, 1150 Register arg_3, 1151 bool check_exceptions) { 1152 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1153 assert_different_registers(arg_2, c_rarg3); 1154 pass_arg3(this, arg_3); 1155 pass_arg2(this, arg_2); 1156 pass_arg1(this, arg_1); 1157 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1158 } 1159 1160 void MacroAssembler::call_VM_base(Register oop_result, 1161 Register last_java_sp, 1162 address entry_point, 1163 int number_of_arguments, 1164 bool check_exceptions) { 1165 Register java_thread = r15_thread; 1166 1167 // determine last_java_sp register 1168 if (!last_java_sp->is_valid()) { 1169 last_java_sp = rsp; 1170 } 1171 // debugging support 1172 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1173 #ifdef ASSERT 1174 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1175 // r12 is the heapbase. 1176 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 1177 #endif // ASSERT 1178 1179 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1180 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1181 1182 // push java thread (becomes first argument of C function) 1183 1184 mov(c_rarg0, r15_thread); 1185 1186 // set last Java frame before call 1187 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1188 1189 // Only interpreter should have to set fp 1190 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1); 1191 1192 // do the call, remove parameters 1193 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1194 1195 #ifdef ASSERT 1196 // Check that thread register is not clobbered. 1197 guarantee(java_thread != rax, "change this code"); 1198 push(rax); 1199 { Label L; 1200 get_thread_slow(rax); 1201 cmpptr(java_thread, rax); 1202 jcc(Assembler::equal, L); 1203 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?"); 1204 bind(L); 1205 } 1206 pop(rax); 1207 #endif 1208 1209 // reset last Java frame 1210 // Only interpreter should have to clear fp 1211 reset_last_Java_frame(true); 1212 1213 // C++ interp handles this in the interpreter 1214 check_and_handle_popframe(); 1215 check_and_handle_earlyret(); 1216 1217 if (check_exceptions) { 1218 // check for pending exceptions (java_thread is set upon return) 1219 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD); 1220 // This used to conditionally jump to forward_exception however it is 1221 // possible if we relocate that the branch will not reach. So we must jump 1222 // around so we can always reach 1223 1224 Label ok; 1225 jcc(Assembler::equal, ok); 1226 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1227 bind(ok); 1228 } 1229 1230 // get oop result if there is one and reset the value in the thread 1231 if (oop_result->is_valid()) { 1232 get_vm_result_oop(oop_result); 1233 } 1234 } 1235 1236 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1237 // Calculate the value for last_Java_sp somewhat subtle. 1238 // call_VM does an intermediate call which places a return address on 1239 // the stack just under the stack pointer as the user finished with it. 1240 // This allows use to retrieve last_Java_pc from last_Java_sp[-1]. 1241 1242 // We've pushed one address, correct last_Java_sp 1243 lea(rax, Address(rsp, wordSize)); 1244 1245 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions); 1246 } 1247 1248 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1249 void MacroAssembler::call_VM_leaf0(address entry_point) { 1250 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1251 } 1252 1253 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1254 call_VM_leaf_base(entry_point, number_of_arguments); 1255 } 1256 1257 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1258 pass_arg0(this, arg_0); 1259 call_VM_leaf(entry_point, 1); 1260 } 1261 1262 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1263 1264 assert_different_registers(arg_0, c_rarg1); 1265 pass_arg1(this, arg_1); 1266 pass_arg0(this, arg_0); 1267 call_VM_leaf(entry_point, 2); 1268 } 1269 1270 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1271 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1272 assert_different_registers(arg_1, c_rarg2); 1273 pass_arg2(this, arg_2); 1274 pass_arg1(this, arg_1); 1275 pass_arg0(this, arg_0); 1276 call_VM_leaf(entry_point, 3); 1277 } 1278 1279 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1280 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1281 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1282 assert_different_registers(arg_2, c_rarg3); 1283 pass_arg3(this, arg_3); 1284 pass_arg2(this, arg_2); 1285 pass_arg1(this, arg_1); 1286 pass_arg0(this, arg_0); 1287 call_VM_leaf(entry_point, 3); 1288 } 1289 1290 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1291 pass_arg0(this, arg_0); 1292 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1293 } 1294 1295 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1296 assert_different_registers(arg_0, c_rarg1); 1297 pass_arg1(this, arg_1); 1298 pass_arg0(this, arg_0); 1299 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1300 } 1301 1302 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1303 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1304 assert_different_registers(arg_1, c_rarg2); 1305 pass_arg2(this, arg_2); 1306 pass_arg1(this, arg_1); 1307 pass_arg0(this, arg_0); 1308 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1309 } 1310 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1312 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1313 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1314 assert_different_registers(arg_2, c_rarg3); 1315 pass_arg3(this, arg_3); 1316 pass_arg2(this, arg_2); 1317 pass_arg1(this, arg_1); 1318 pass_arg0(this, arg_0); 1319 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1320 } 1321 1322 void MacroAssembler::get_vm_result_oop(Register oop_result) { 1323 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset())); 1324 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD); 1325 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1326 } 1327 1328 void MacroAssembler::get_vm_result_metadata(Register metadata_result) { 1329 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset())); 1330 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD); 1331 } 1332 1333 void MacroAssembler::check_and_handle_earlyret() { 1334 } 1335 1336 void MacroAssembler::check_and_handle_popframe() { 1337 } 1338 1339 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1340 assert(rscratch != noreg || always_reachable(src1), "missing"); 1341 1342 if (reachable(src1)) { 1343 cmpl(as_Address(src1), imm); 1344 } else { 1345 lea(rscratch, src1); 1346 cmpl(Address(rscratch, 0), imm); 1347 } 1348 } 1349 1350 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1351 assert(!src2.is_lval(), "use cmpptr"); 1352 assert(rscratch != noreg || always_reachable(src2), "missing"); 1353 1354 if (reachable(src2)) { 1355 cmpl(src1, as_Address(src2)); 1356 } else { 1357 lea(rscratch, src2); 1358 cmpl(src1, Address(rscratch, 0)); 1359 } 1360 } 1361 1362 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1363 Assembler::cmpl(src1, imm); 1364 } 1365 1366 void MacroAssembler::cmp32(Register src1, Address src2) { 1367 Assembler::cmpl(src1, src2); 1368 } 1369 1370 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1371 ucomisd(opr1, opr2); 1372 1373 Label L; 1374 if (unordered_is_less) { 1375 movl(dst, -1); 1376 jcc(Assembler::parity, L); 1377 jcc(Assembler::below , L); 1378 movl(dst, 0); 1379 jcc(Assembler::equal , L); 1380 increment(dst); 1381 } else { // unordered is greater 1382 movl(dst, 1); 1383 jcc(Assembler::parity, L); 1384 jcc(Assembler::above , L); 1385 movl(dst, 0); 1386 jcc(Assembler::equal , L); 1387 decrementl(dst); 1388 } 1389 bind(L); 1390 } 1391 1392 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1393 ucomiss(opr1, opr2); 1394 1395 Label L; 1396 if (unordered_is_less) { 1397 movl(dst, -1); 1398 jcc(Assembler::parity, L); 1399 jcc(Assembler::below , L); 1400 movl(dst, 0); 1401 jcc(Assembler::equal , L); 1402 increment(dst); 1403 } else { // unordered is greater 1404 movl(dst, 1); 1405 jcc(Assembler::parity, L); 1406 jcc(Assembler::above , L); 1407 movl(dst, 0); 1408 jcc(Assembler::equal , L); 1409 decrementl(dst); 1410 } 1411 bind(L); 1412 } 1413 1414 1415 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1416 assert(rscratch != noreg || always_reachable(src1), "missing"); 1417 1418 if (reachable(src1)) { 1419 cmpb(as_Address(src1), imm); 1420 } else { 1421 lea(rscratch, src1); 1422 cmpb(Address(rscratch, 0), imm); 1423 } 1424 } 1425 1426 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1427 assert(rscratch != noreg || always_reachable(src2), "missing"); 1428 1429 if (src2.is_lval()) { 1430 movptr(rscratch, src2); 1431 Assembler::cmpq(src1, rscratch); 1432 } else if (reachable(src2)) { 1433 cmpq(src1, as_Address(src2)); 1434 } else { 1435 lea(rscratch, src2); 1436 Assembler::cmpq(src1, Address(rscratch, 0)); 1437 } 1438 } 1439 1440 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1441 assert(src2.is_lval(), "not a mem-mem compare"); 1442 // moves src2's literal address 1443 movptr(rscratch, src2); 1444 Assembler::cmpq(src1, rscratch); 1445 } 1446 1447 void MacroAssembler::cmpoop(Register src1, Register src2) { 1448 cmpptr(src1, src2); 1449 } 1450 1451 void MacroAssembler::cmpoop(Register src1, Address src2) { 1452 cmpptr(src1, src2); 1453 } 1454 1455 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1456 movoop(rscratch, src2); 1457 cmpptr(src1, rscratch); 1458 } 1459 1460 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1461 assert(rscratch != noreg || always_reachable(adr), "missing"); 1462 1463 if (reachable(adr)) { 1464 lock(); 1465 cmpxchgptr(reg, as_Address(adr)); 1466 } else { 1467 lea(rscratch, adr); 1468 lock(); 1469 cmpxchgptr(reg, Address(rscratch, 0)); 1470 } 1471 } 1472 1473 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1474 cmpxchgq(reg, adr); 1475 } 1476 1477 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1478 assert(rscratch != noreg || always_reachable(src), "missing"); 1479 1480 if (reachable(src)) { 1481 Assembler::comisd(dst, as_Address(src)); 1482 } else { 1483 lea(rscratch, src); 1484 Assembler::comisd(dst, Address(rscratch, 0)); 1485 } 1486 } 1487 1488 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1489 assert(rscratch != noreg || always_reachable(src), "missing"); 1490 1491 if (reachable(src)) { 1492 Assembler::comiss(dst, as_Address(src)); 1493 } else { 1494 lea(rscratch, src); 1495 Assembler::comiss(dst, Address(rscratch, 0)); 1496 } 1497 } 1498 1499 1500 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1501 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1502 1503 Condition negated_cond = negate_condition(cond); 1504 Label L; 1505 jcc(negated_cond, L); 1506 pushf(); // Preserve flags 1507 atomic_incl(counter_addr, rscratch); 1508 popf(); 1509 bind(L); 1510 } 1511 1512 int MacroAssembler::corrected_idivl(Register reg) { 1513 // Full implementation of Java idiv and irem; checks for 1514 // special case as described in JVM spec., p.243 & p.271. 1515 // The function returns the (pc) offset of the idivl 1516 // instruction - may be needed for implicit exceptions. 1517 // 1518 // normal case special case 1519 // 1520 // input : rax,: dividend min_int 1521 // reg: divisor (may not be rax,/rdx) -1 1522 // 1523 // output: rax,: quotient (= rax, idiv reg) min_int 1524 // rdx: remainder (= rax, irem reg) 0 1525 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1526 const int min_int = 0x80000000; 1527 Label normal_case, special_case; 1528 1529 // check for special case 1530 cmpl(rax, min_int); 1531 jcc(Assembler::notEqual, normal_case); 1532 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1533 cmpl(reg, -1); 1534 jcc(Assembler::equal, special_case); 1535 1536 // handle normal case 1537 bind(normal_case); 1538 cdql(); 1539 int idivl_offset = offset(); 1540 idivl(reg); 1541 1542 // normal and special case exit 1543 bind(special_case); 1544 1545 return idivl_offset; 1546 } 1547 1548 1549 1550 void MacroAssembler::decrementl(Register reg, int value) { 1551 if (value == min_jint) {subl(reg, value) ; return; } 1552 if (value < 0) { incrementl(reg, -value); return; } 1553 if (value == 0) { ; return; } 1554 if (value == 1 && UseIncDec) { decl(reg) ; return; } 1555 /* else */ { subl(reg, value) ; return; } 1556 } 1557 1558 void MacroAssembler::decrementl(Address dst, int value) { 1559 if (value == min_jint) {subl(dst, value) ; return; } 1560 if (value < 0) { incrementl(dst, -value); return; } 1561 if (value == 0) { ; return; } 1562 if (value == 1 && UseIncDec) { decl(dst) ; return; } 1563 /* else */ { subl(dst, value) ; return; } 1564 } 1565 1566 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 1567 assert(shift_value > 0, "illegal shift value"); 1568 Label _is_positive; 1569 testl (reg, reg); 1570 jcc (Assembler::positive, _is_positive); 1571 int offset = (1 << shift_value) - 1 ; 1572 1573 if (offset == 1) { 1574 incrementl(reg); 1575 } else { 1576 addl(reg, offset); 1577 } 1578 1579 bind (_is_positive); 1580 sarl(reg, shift_value); 1581 } 1582 1583 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1584 assert(rscratch != noreg || always_reachable(src), "missing"); 1585 1586 if (reachable(src)) { 1587 Assembler::divsd(dst, as_Address(src)); 1588 } else { 1589 lea(rscratch, src); 1590 Assembler::divsd(dst, Address(rscratch, 0)); 1591 } 1592 } 1593 1594 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1595 assert(rscratch != noreg || always_reachable(src), "missing"); 1596 1597 if (reachable(src)) { 1598 Assembler::divss(dst, as_Address(src)); 1599 } else { 1600 lea(rscratch, src); 1601 Assembler::divss(dst, Address(rscratch, 0)); 1602 } 1603 } 1604 1605 void MacroAssembler::enter() { 1606 push(rbp); 1607 mov(rbp, rsp); 1608 } 1609 1610 void MacroAssembler::post_call_nop() { 1611 if (!Continuations::enabled()) { 1612 return; 1613 } 1614 InstructionMark im(this); 1615 relocate(post_call_nop_Relocation::spec()); 1616 InlineSkippedInstructionsCounter skipCounter(this); 1617 emit_int8((uint8_t)0x0f); 1618 emit_int8((uint8_t)0x1f); 1619 emit_int8((uint8_t)0x84); 1620 emit_int8((uint8_t)0x00); 1621 emit_int32(0x00); 1622 } 1623 1624 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1625 assert(rscratch != noreg || always_reachable(src), "missing"); 1626 if (reachable(src)) { 1627 Assembler::mulpd(dst, as_Address(src)); 1628 } else { 1629 lea(rscratch, src); 1630 Assembler::mulpd(dst, Address(rscratch, 0)); 1631 } 1632 } 1633 1634 // dst = c = a * b + c 1635 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1636 Assembler::vfmadd231sd(c, a, b); 1637 if (dst != c) { 1638 movdbl(dst, c); 1639 } 1640 } 1641 1642 // dst = c = a * b + c 1643 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1644 Assembler::vfmadd231ss(c, a, b); 1645 if (dst != c) { 1646 movflt(dst, c); 1647 } 1648 } 1649 1650 // dst = c = a * b + c 1651 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1652 Assembler::vfmadd231pd(c, a, b, vector_len); 1653 if (dst != c) { 1654 vmovdqu(dst, c); 1655 } 1656 } 1657 1658 // dst = c = a * b + c 1659 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1660 Assembler::vfmadd231ps(c, a, b, vector_len); 1661 if (dst != c) { 1662 vmovdqu(dst, c); 1663 } 1664 } 1665 1666 // dst = c = a * b + c 1667 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1668 Assembler::vfmadd231pd(c, a, b, vector_len); 1669 if (dst != c) { 1670 vmovdqu(dst, c); 1671 } 1672 } 1673 1674 // dst = c = a * b + c 1675 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1676 Assembler::vfmadd231ps(c, a, b, vector_len); 1677 if (dst != c) { 1678 vmovdqu(dst, c); 1679 } 1680 } 1681 1682 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 1683 assert(rscratch != noreg || always_reachable(dst), "missing"); 1684 1685 if (reachable(dst)) { 1686 incrementl(as_Address(dst)); 1687 } else { 1688 lea(rscratch, dst); 1689 incrementl(Address(rscratch, 0)); 1690 } 1691 } 1692 1693 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 1694 incrementl(as_Address(dst, rscratch)); 1695 } 1696 1697 void MacroAssembler::incrementl(Register reg, int value) { 1698 if (value == min_jint) {addl(reg, value) ; return; } 1699 if (value < 0) { decrementl(reg, -value); return; } 1700 if (value == 0) { ; return; } 1701 if (value == 1 && UseIncDec) { incl(reg) ; return; } 1702 /* else */ { addl(reg, value) ; return; } 1703 } 1704 1705 void MacroAssembler::incrementl(Address dst, int value) { 1706 if (value == min_jint) {addl(dst, value) ; return; } 1707 if (value < 0) { decrementl(dst, -value); return; } 1708 if (value == 0) { ; return; } 1709 if (value == 1 && UseIncDec) { incl(dst) ; return; } 1710 /* else */ { addl(dst, value) ; return; } 1711 } 1712 1713 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 1714 assert(rscratch != noreg || always_reachable(dst), "missing"); 1715 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 1716 if (reachable(dst)) { 1717 jmp_literal(dst.target(), dst.rspec()); 1718 } else { 1719 lea(rscratch, dst); 1720 jmp(rscratch); 1721 } 1722 } 1723 1724 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 1725 assert(rscratch != noreg || always_reachable(dst), "missing"); 1726 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 1727 if (reachable(dst)) { 1728 InstructionMark im(this); 1729 relocate(dst.reloc()); 1730 const int short_size = 2; 1731 const int long_size = 6; 1732 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 1733 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 1734 // 0111 tttn #8-bit disp 1735 emit_int8(0x70 | cc); 1736 emit_int8((offs - short_size) & 0xFF); 1737 } else { 1738 // 0000 1111 1000 tttn #32-bit disp 1739 emit_int8(0x0F); 1740 emit_int8((unsigned char)(0x80 | cc)); 1741 emit_int32(offs - long_size); 1742 } 1743 } else { 1744 #ifdef ASSERT 1745 warning("reversing conditional branch"); 1746 #endif /* ASSERT */ 1747 Label skip; 1748 jccb(reverse[cc], skip); 1749 lea(rscratch, dst); 1750 Assembler::jmp(rscratch); 1751 bind(skip); 1752 } 1753 } 1754 1755 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) { 1756 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); 1757 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing"); 1758 1759 stmxcsr(mxcsr_save); 1760 movl(tmp, mxcsr_save); 1761 if (EnableX86ECoreOpts) { 1762 // The mxcsr_std has status bits set for performance on ECore 1763 orl(tmp, 0x003f); 1764 } else { 1765 // Mask out status bits (only check control and mask bits) 1766 andl(tmp, 0xFFC0); 1767 } 1768 cmp32(tmp, mxcsr_std, rscratch); 1769 } 1770 1771 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 1772 assert(rscratch != noreg || always_reachable(src), "missing"); 1773 1774 if (reachable(src)) { 1775 Assembler::ldmxcsr(as_Address(src)); 1776 } else { 1777 lea(rscratch, src); 1778 Assembler::ldmxcsr(Address(rscratch, 0)); 1779 } 1780 } 1781 1782 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1783 int off = offset(); 1784 movsbl(dst, src); // movsxb 1785 return off; 1786 } 1787 1788 // Note: load_signed_short used to be called load_signed_word. 1789 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 1790 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 1791 // The term "word" in HotSpot means a 32- or 64-bit machine word. 1792 int MacroAssembler::load_signed_short(Register dst, Address src) { 1793 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 1794 // version but this is what 64bit has always done. This seems to imply 1795 // that users are only using 32bits worth. 1796 int off = offset(); 1797 movswl(dst, src); // movsxw 1798 return off; 1799 } 1800 1801 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1802 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1803 // and "3.9 Partial Register Penalties", p. 22). 1804 int off = offset(); 1805 movzbl(dst, src); // movzxb 1806 return off; 1807 } 1808 1809 // Note: load_unsigned_short used to be called load_unsigned_word. 1810 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1811 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1812 // and "3.9 Partial Register Penalties", p. 22). 1813 int off = offset(); 1814 movzwl(dst, src); // movzxw 1815 return off; 1816 } 1817 1818 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1819 switch (size_in_bytes) { 1820 case 8: movq(dst, src); break; 1821 case 4: movl(dst, src); break; 1822 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1823 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1824 default: ShouldNotReachHere(); 1825 } 1826 } 1827 1828 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1829 switch (size_in_bytes) { 1830 case 8: movq(dst, src); break; 1831 case 4: movl(dst, src); break; 1832 case 2: movw(dst, src); break; 1833 case 1: movb(dst, src); break; 1834 default: ShouldNotReachHere(); 1835 } 1836 } 1837 1838 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 1839 assert(rscratch != noreg || always_reachable(dst), "missing"); 1840 1841 if (reachable(dst)) { 1842 movl(as_Address(dst), src); 1843 } else { 1844 lea(rscratch, dst); 1845 movl(Address(rscratch, 0), src); 1846 } 1847 } 1848 1849 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 1850 if (reachable(src)) { 1851 movl(dst, as_Address(src)); 1852 } else { 1853 lea(dst, src); 1854 movl(dst, Address(dst, 0)); 1855 } 1856 } 1857 1858 // C++ bool manipulation 1859 1860 void MacroAssembler::movbool(Register dst, Address src) { 1861 if(sizeof(bool) == 1) 1862 movb(dst, src); 1863 else if(sizeof(bool) == 2) 1864 movw(dst, src); 1865 else if(sizeof(bool) == 4) 1866 movl(dst, src); 1867 else 1868 // unsupported 1869 ShouldNotReachHere(); 1870 } 1871 1872 void MacroAssembler::movbool(Address dst, bool boolconst) { 1873 if(sizeof(bool) == 1) 1874 movb(dst, (int) boolconst); 1875 else if(sizeof(bool) == 2) 1876 movw(dst, (int) boolconst); 1877 else if(sizeof(bool) == 4) 1878 movl(dst, (int) boolconst); 1879 else 1880 // unsupported 1881 ShouldNotReachHere(); 1882 } 1883 1884 void MacroAssembler::movbool(Address dst, Register src) { 1885 if(sizeof(bool) == 1) 1886 movb(dst, src); 1887 else if(sizeof(bool) == 2) 1888 movw(dst, src); 1889 else if(sizeof(bool) == 4) 1890 movl(dst, src); 1891 else 1892 // unsupported 1893 ShouldNotReachHere(); 1894 } 1895 1896 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1897 assert(rscratch != noreg || always_reachable(src), "missing"); 1898 1899 if (reachable(src)) { 1900 movdl(dst, as_Address(src)); 1901 } else { 1902 lea(rscratch, src); 1903 movdl(dst, Address(rscratch, 0)); 1904 } 1905 } 1906 1907 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 1908 assert(rscratch != noreg || always_reachable(src), "missing"); 1909 1910 if (reachable(src)) { 1911 movq(dst, as_Address(src)); 1912 } else { 1913 lea(rscratch, src); 1914 movq(dst, Address(rscratch, 0)); 1915 } 1916 } 1917 1918 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1919 assert(rscratch != noreg || always_reachable(src), "missing"); 1920 1921 if (reachable(src)) { 1922 if (UseXmmLoadAndClearUpper) { 1923 movsd (dst, as_Address(src)); 1924 } else { 1925 movlpd(dst, as_Address(src)); 1926 } 1927 } else { 1928 lea(rscratch, src); 1929 if (UseXmmLoadAndClearUpper) { 1930 movsd (dst, Address(rscratch, 0)); 1931 } else { 1932 movlpd(dst, Address(rscratch, 0)); 1933 } 1934 } 1935 } 1936 1937 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 1938 assert(rscratch != noreg || always_reachable(src), "missing"); 1939 1940 if (reachable(src)) { 1941 movss(dst, as_Address(src)); 1942 } else { 1943 lea(rscratch, src); 1944 movss(dst, Address(rscratch, 0)); 1945 } 1946 } 1947 1948 void MacroAssembler::movptr(Register dst, Register src) { 1949 movq(dst, src); 1950 } 1951 1952 void MacroAssembler::movptr(Register dst, Address src) { 1953 movq(dst, src); 1954 } 1955 1956 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 1957 void MacroAssembler::movptr(Register dst, intptr_t src) { 1958 if (is_uimm32(src)) { 1959 movl(dst, checked_cast<uint32_t>(src)); 1960 } else if (is_simm32(src)) { 1961 movq(dst, checked_cast<int32_t>(src)); 1962 } else { 1963 mov64(dst, src); 1964 } 1965 } 1966 1967 void MacroAssembler::movptr(Address dst, Register src) { 1968 movq(dst, src); 1969 } 1970 1971 void MacroAssembler::movptr(Address dst, int32_t src) { 1972 movslq(dst, src); 1973 } 1974 1975 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 1976 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1977 Assembler::movdqu(dst, src); 1978 } 1979 1980 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 1981 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1982 Assembler::movdqu(dst, src); 1983 } 1984 1985 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 1986 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1987 Assembler::movdqu(dst, src); 1988 } 1989 1990 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 1991 assert(rscratch != noreg || always_reachable(src), "missing"); 1992 1993 if (reachable(src)) { 1994 movdqu(dst, as_Address(src)); 1995 } else { 1996 lea(rscratch, src); 1997 movdqu(dst, Address(rscratch, 0)); 1998 } 1999 } 2000 2001 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2002 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2003 Assembler::vmovdqu(dst, src); 2004 } 2005 2006 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2007 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2008 Assembler::vmovdqu(dst, src); 2009 } 2010 2011 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2012 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2013 Assembler::vmovdqu(dst, src); 2014 } 2015 2016 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2017 assert(rscratch != noreg || always_reachable(src), "missing"); 2018 2019 if (reachable(src)) { 2020 vmovdqu(dst, as_Address(src)); 2021 } 2022 else { 2023 lea(rscratch, src); 2024 vmovdqu(dst, Address(rscratch, 0)); 2025 } 2026 } 2027 2028 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2029 assert(rscratch != noreg || always_reachable(src), "missing"); 2030 2031 if (vector_len == AVX_512bit) { 2032 evmovdquq(dst, src, AVX_512bit, rscratch); 2033 } else if (vector_len == AVX_256bit) { 2034 vmovdqu(dst, src, rscratch); 2035 } else { 2036 movdqu(dst, src, rscratch); 2037 } 2038 } 2039 2040 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2041 if (vector_len == AVX_512bit) { 2042 evmovdquq(dst, src, AVX_512bit); 2043 } else if (vector_len == AVX_256bit) { 2044 vmovdqu(dst, src); 2045 } else { 2046 movdqu(dst, src); 2047 } 2048 } 2049 2050 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) { 2051 if (vector_len == AVX_512bit) { 2052 evmovdquq(dst, src, AVX_512bit); 2053 } else if (vector_len == AVX_256bit) { 2054 vmovdqu(dst, src); 2055 } else { 2056 movdqu(dst, src); 2057 } 2058 } 2059 2060 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) { 2061 if (vector_len == AVX_512bit) { 2062 evmovdquq(dst, src, AVX_512bit); 2063 } else if (vector_len == AVX_256bit) { 2064 vmovdqu(dst, src); 2065 } else { 2066 movdqu(dst, src); 2067 } 2068 } 2069 2070 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2071 assert(rscratch != noreg || always_reachable(src), "missing"); 2072 2073 if (reachable(src)) { 2074 vmovdqa(dst, as_Address(src)); 2075 } 2076 else { 2077 lea(rscratch, src); 2078 vmovdqa(dst, Address(rscratch, 0)); 2079 } 2080 } 2081 2082 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2083 assert(rscratch != noreg || always_reachable(src), "missing"); 2084 2085 if (vector_len == AVX_512bit) { 2086 evmovdqaq(dst, src, AVX_512bit, rscratch); 2087 } else if (vector_len == AVX_256bit) { 2088 vmovdqa(dst, src, rscratch); 2089 } else { 2090 movdqa(dst, src, rscratch); 2091 } 2092 } 2093 2094 void MacroAssembler::kmov(KRegister dst, Address src) { 2095 if (VM_Version::supports_avx512bw()) { 2096 kmovql(dst, src); 2097 } else { 2098 assert(VM_Version::supports_evex(), ""); 2099 kmovwl(dst, src); 2100 } 2101 } 2102 2103 void MacroAssembler::kmov(Address dst, KRegister src) { 2104 if (VM_Version::supports_avx512bw()) { 2105 kmovql(dst, src); 2106 } else { 2107 assert(VM_Version::supports_evex(), ""); 2108 kmovwl(dst, src); 2109 } 2110 } 2111 2112 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2113 if (VM_Version::supports_avx512bw()) { 2114 kmovql(dst, src); 2115 } else { 2116 assert(VM_Version::supports_evex(), ""); 2117 kmovwl(dst, src); 2118 } 2119 } 2120 2121 void MacroAssembler::kmov(Register dst, KRegister src) { 2122 if (VM_Version::supports_avx512bw()) { 2123 kmovql(dst, src); 2124 } else { 2125 assert(VM_Version::supports_evex(), ""); 2126 kmovwl(dst, src); 2127 } 2128 } 2129 2130 void MacroAssembler::kmov(KRegister dst, Register src) { 2131 if (VM_Version::supports_avx512bw()) { 2132 kmovql(dst, src); 2133 } else { 2134 assert(VM_Version::supports_evex(), ""); 2135 kmovwl(dst, src); 2136 } 2137 } 2138 2139 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2140 assert(rscratch != noreg || always_reachable(src), "missing"); 2141 2142 if (reachable(src)) { 2143 kmovql(dst, as_Address(src)); 2144 } else { 2145 lea(rscratch, src); 2146 kmovql(dst, Address(rscratch, 0)); 2147 } 2148 } 2149 2150 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2151 assert(rscratch != noreg || always_reachable(src), "missing"); 2152 2153 if (reachable(src)) { 2154 kmovwl(dst, as_Address(src)); 2155 } else { 2156 lea(rscratch, src); 2157 kmovwl(dst, Address(rscratch, 0)); 2158 } 2159 } 2160 2161 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2162 int vector_len, Register rscratch) { 2163 assert(rscratch != noreg || always_reachable(src), "missing"); 2164 2165 if (reachable(src)) { 2166 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2167 } else { 2168 lea(rscratch, src); 2169 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2170 } 2171 } 2172 2173 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2174 int vector_len, Register rscratch) { 2175 assert(rscratch != noreg || always_reachable(src), "missing"); 2176 2177 if (reachable(src)) { 2178 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2179 } else { 2180 lea(rscratch, src); 2181 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2182 } 2183 } 2184 2185 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2186 assert(rscratch != noreg || always_reachable(src), "missing"); 2187 2188 if (reachable(src)) { 2189 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2190 } else { 2191 lea(rscratch, src); 2192 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2193 } 2194 } 2195 2196 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2197 assert(rscratch != noreg || always_reachable(src), "missing"); 2198 2199 if (reachable(src)) { 2200 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2201 } else { 2202 lea(rscratch, src); 2203 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2204 } 2205 } 2206 2207 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2208 assert(rscratch != noreg || always_reachable(src), "missing"); 2209 2210 if (reachable(src)) { 2211 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2212 } else { 2213 lea(rscratch, src); 2214 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2215 } 2216 } 2217 2218 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2219 assert(rscratch != noreg || always_reachable(src), "missing"); 2220 2221 if (reachable(src)) { 2222 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len); 2223 } else { 2224 lea(rscratch, src); 2225 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len); 2226 } 2227 } 2228 2229 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2230 assert(rscratch != noreg || always_reachable(src), "missing"); 2231 2232 if (reachable(src)) { 2233 Assembler::evmovdqaq(dst, as_Address(src), vector_len); 2234 } else { 2235 lea(rscratch, src); 2236 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len); 2237 } 2238 } 2239 2240 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2241 assert(rscratch != noreg || always_reachable(src), "missing"); 2242 2243 if (reachable(src)) { 2244 Assembler::movapd(dst, as_Address(src)); 2245 } else { 2246 lea(rscratch, src); 2247 Assembler::movapd(dst, Address(rscratch, 0)); 2248 } 2249 } 2250 2251 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2252 assert(rscratch != noreg || always_reachable(src), "missing"); 2253 2254 if (reachable(src)) { 2255 Assembler::movdqa(dst, as_Address(src)); 2256 } else { 2257 lea(rscratch, src); 2258 Assembler::movdqa(dst, Address(rscratch, 0)); 2259 } 2260 } 2261 2262 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2263 assert(rscratch != noreg || always_reachable(src), "missing"); 2264 2265 if (reachable(src)) { 2266 Assembler::movsd(dst, as_Address(src)); 2267 } else { 2268 lea(rscratch, src); 2269 Assembler::movsd(dst, Address(rscratch, 0)); 2270 } 2271 } 2272 2273 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2274 assert(rscratch != noreg || always_reachable(src), "missing"); 2275 2276 if (reachable(src)) { 2277 Assembler::movss(dst, as_Address(src)); 2278 } else { 2279 lea(rscratch, src); 2280 Assembler::movss(dst, Address(rscratch, 0)); 2281 } 2282 } 2283 2284 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2285 assert(rscratch != noreg || always_reachable(src), "missing"); 2286 2287 if (reachable(src)) { 2288 Assembler::movddup(dst, as_Address(src)); 2289 } else { 2290 lea(rscratch, src); 2291 Assembler::movddup(dst, Address(rscratch, 0)); 2292 } 2293 } 2294 2295 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2296 assert(rscratch != noreg || always_reachable(src), "missing"); 2297 2298 if (reachable(src)) { 2299 Assembler::vmovddup(dst, as_Address(src), vector_len); 2300 } else { 2301 lea(rscratch, src); 2302 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2303 } 2304 } 2305 2306 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2307 assert(rscratch != noreg || always_reachable(src), "missing"); 2308 2309 if (reachable(src)) { 2310 Assembler::mulsd(dst, as_Address(src)); 2311 } else { 2312 lea(rscratch, src); 2313 Assembler::mulsd(dst, Address(rscratch, 0)); 2314 } 2315 } 2316 2317 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2318 assert(rscratch != noreg || always_reachable(src), "missing"); 2319 2320 if (reachable(src)) { 2321 Assembler::mulss(dst, as_Address(src)); 2322 } else { 2323 lea(rscratch, src); 2324 Assembler::mulss(dst, Address(rscratch, 0)); 2325 } 2326 } 2327 2328 void MacroAssembler::null_check(Register reg, int offset) { 2329 if (needs_explicit_null_check(offset)) { 2330 // provoke OS null exception if reg is null by 2331 // accessing M[reg] w/o changing any (non-CC) registers 2332 // NOTE: cmpl is plenty here to provoke a segv 2333 cmpptr(rax, Address(reg, 0)); 2334 // Note: should probably use testl(rax, Address(reg, 0)); 2335 // may be shorter code (however, this version of 2336 // testl needs to be implemented first) 2337 } else { 2338 // nothing to do, (later) access of M[reg + offset] 2339 // will provoke OS null exception if reg is null 2340 } 2341 } 2342 2343 void MacroAssembler::os_breakpoint() { 2344 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2345 // (e.g., MSVC can't call ps() otherwise) 2346 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2347 } 2348 2349 void MacroAssembler::unimplemented(const char* what) { 2350 const char* buf = nullptr; 2351 { 2352 ResourceMark rm; 2353 stringStream ss; 2354 ss.print("unimplemented: %s", what); 2355 buf = code_string(ss.as_string()); 2356 } 2357 stop(buf); 2358 } 2359 2360 #define XSTATE_BV 0x200 2361 2362 void MacroAssembler::pop_CPU_state() { 2363 pop_FPU_state(); 2364 pop_IU_state(); 2365 } 2366 2367 void MacroAssembler::pop_FPU_state() { 2368 fxrstor(Address(rsp, 0)); 2369 addptr(rsp, FPUStateSizeInWords * wordSize); 2370 } 2371 2372 void MacroAssembler::pop_IU_state() { 2373 popa(); 2374 addq(rsp, 8); 2375 popf(); 2376 } 2377 2378 // Save Integer and Float state 2379 // Warning: Stack must be 16 byte aligned (64bit) 2380 void MacroAssembler::push_CPU_state() { 2381 push_IU_state(); 2382 push_FPU_state(); 2383 } 2384 2385 void MacroAssembler::push_FPU_state() { 2386 subptr(rsp, FPUStateSizeInWords * wordSize); 2387 fxsave(Address(rsp, 0)); 2388 } 2389 2390 void MacroAssembler::push_IU_state() { 2391 // Push flags first because pusha kills them 2392 pushf(); 2393 // Make sure rsp stays 16-byte aligned 2394 subq(rsp, 8); 2395 pusha(); 2396 } 2397 2398 void MacroAssembler::push_cont_fastpath() { 2399 if (!Continuations::enabled()) return; 2400 2401 Label L_done; 2402 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2403 jccb(Assembler::belowEqual, L_done); 2404 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp); 2405 bind(L_done); 2406 } 2407 2408 void MacroAssembler::pop_cont_fastpath() { 2409 if (!Continuations::enabled()) return; 2410 2411 Label L_done; 2412 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2413 jccb(Assembler::below, L_done); 2414 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0); 2415 bind(L_done); 2416 } 2417 2418 void MacroAssembler::inc_held_monitor_count() { 2419 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2420 } 2421 2422 void MacroAssembler::dec_held_monitor_count() { 2423 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2424 } 2425 2426 #ifdef ASSERT 2427 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 2428 Label no_cont; 2429 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 2430 testl(cont, cont); 2431 jcc(Assembler::zero, no_cont); 2432 stop(name); 2433 bind(no_cont); 2434 } 2435 #endif 2436 2437 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register 2438 // we must set sp to zero to clear frame 2439 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 2440 // must clear fp, so that compiled frames are not confused; it is 2441 // possible that we need it only for debugging 2442 if (clear_fp) { 2443 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 2444 } 2445 // Always clear the pc because it could have been set by make_walkable() 2446 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 2447 vzeroupper(); 2448 } 2449 2450 void MacroAssembler::round_to(Register reg, int modulus) { 2451 addptr(reg, modulus - 1); 2452 andptr(reg, -modulus); 2453 } 2454 2455 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) { 2456 if (at_return) { 2457 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 2458 // we may safely use rsp instead to perform the stack watermark check. 2459 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset())); 2460 jcc(Assembler::above, slow_path); 2461 return; 2462 } 2463 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 2464 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 2465 } 2466 2467 // Calls to C land 2468 // 2469 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 2470 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 2471 // has to be reset to 0. This is required to allow proper stack traversal. 2472 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2473 Register last_java_fp, 2474 address last_java_pc, 2475 Register rscratch) { 2476 vzeroupper(); 2477 // determine last_java_sp register 2478 if (!last_java_sp->is_valid()) { 2479 last_java_sp = rsp; 2480 } 2481 // last_java_fp is optional 2482 if (last_java_fp->is_valid()) { 2483 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 2484 } 2485 // last_java_pc is optional 2486 if (last_java_pc != nullptr) { 2487 Address java_pc(r15_thread, 2488 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 2489 lea(java_pc, InternalAddress(last_java_pc), rscratch); 2490 } 2491 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 2492 } 2493 2494 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2495 Register last_java_fp, 2496 Label &L, 2497 Register scratch) { 2498 lea(scratch, L); 2499 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch); 2500 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch); 2501 } 2502 2503 void MacroAssembler::shlptr(Register dst, int imm8) { 2504 shlq(dst, imm8); 2505 } 2506 2507 void MacroAssembler::shrptr(Register dst, int imm8) { 2508 shrq(dst, imm8); 2509 } 2510 2511 void MacroAssembler::sign_extend_byte(Register reg) { 2512 movsbl(reg, reg); // movsxb 2513 } 2514 2515 void MacroAssembler::sign_extend_short(Register reg) { 2516 movswl(reg, reg); // movsxw 2517 } 2518 2519 void MacroAssembler::testl(Address dst, int32_t imm32) { 2520 if (imm32 >= 0 && is8bit(imm32)) { 2521 testb(dst, imm32); 2522 } else { 2523 Assembler::testl(dst, imm32); 2524 } 2525 } 2526 2527 void MacroAssembler::testl(Register dst, int32_t imm32) { 2528 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 2529 testb(dst, imm32); 2530 } else { 2531 Assembler::testl(dst, imm32); 2532 } 2533 } 2534 2535 void MacroAssembler::testl(Register dst, AddressLiteral src) { 2536 assert(always_reachable(src), "Address should be reachable"); 2537 testl(dst, as_Address(src)); 2538 } 2539 2540 void MacroAssembler::testq(Address dst, int32_t imm32) { 2541 if (imm32 >= 0) { 2542 testl(dst, imm32); 2543 } else { 2544 Assembler::testq(dst, imm32); 2545 } 2546 } 2547 2548 void MacroAssembler::testq(Register dst, int32_t imm32) { 2549 if (imm32 >= 0) { 2550 testl(dst, imm32); 2551 } else { 2552 Assembler::testq(dst, imm32); 2553 } 2554 } 2555 2556 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 2557 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2558 Assembler::pcmpeqb(dst, src); 2559 } 2560 2561 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 2562 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2563 Assembler::pcmpeqw(dst, src); 2564 } 2565 2566 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2567 assert((dst->encoding() < 16),"XMM register should be 0-15"); 2568 Assembler::pcmpestri(dst, src, imm8); 2569 } 2570 2571 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2572 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2573 Assembler::pcmpestri(dst, src, imm8); 2574 } 2575 2576 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2577 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2578 Assembler::pmovzxbw(dst, src); 2579 } 2580 2581 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 2582 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2583 Assembler::pmovzxbw(dst, src); 2584 } 2585 2586 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 2587 assert((src->encoding() < 16),"XMM register should be 0-15"); 2588 Assembler::pmovmskb(dst, src); 2589 } 2590 2591 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 2592 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2593 Assembler::ptest(dst, src); 2594 } 2595 2596 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2597 assert(rscratch != noreg || always_reachable(src), "missing"); 2598 2599 if (reachable(src)) { 2600 Assembler::sqrtss(dst, as_Address(src)); 2601 } else { 2602 lea(rscratch, src); 2603 Assembler::sqrtss(dst, Address(rscratch, 0)); 2604 } 2605 } 2606 2607 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2608 assert(rscratch != noreg || always_reachable(src), "missing"); 2609 2610 if (reachable(src)) { 2611 Assembler::subsd(dst, as_Address(src)); 2612 } else { 2613 lea(rscratch, src); 2614 Assembler::subsd(dst, Address(rscratch, 0)); 2615 } 2616 } 2617 2618 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 2619 assert(rscratch != noreg || always_reachable(src), "missing"); 2620 2621 if (reachable(src)) { 2622 Assembler::roundsd(dst, as_Address(src), rmode); 2623 } else { 2624 lea(rscratch, src); 2625 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 2626 } 2627 } 2628 2629 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2630 assert(rscratch != noreg || always_reachable(src), "missing"); 2631 2632 if (reachable(src)) { 2633 Assembler::subss(dst, as_Address(src)); 2634 } else { 2635 lea(rscratch, src); 2636 Assembler::subss(dst, Address(rscratch, 0)); 2637 } 2638 } 2639 2640 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2641 assert(rscratch != noreg || always_reachable(src), "missing"); 2642 2643 if (reachable(src)) { 2644 Assembler::ucomisd(dst, as_Address(src)); 2645 } else { 2646 lea(rscratch, src); 2647 Assembler::ucomisd(dst, Address(rscratch, 0)); 2648 } 2649 } 2650 2651 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2652 assert(rscratch != noreg || always_reachable(src), "missing"); 2653 2654 if (reachable(src)) { 2655 Assembler::ucomiss(dst, as_Address(src)); 2656 } else { 2657 lea(rscratch, src); 2658 Assembler::ucomiss(dst, Address(rscratch, 0)); 2659 } 2660 } 2661 2662 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2663 assert(rscratch != noreg || always_reachable(src), "missing"); 2664 2665 // Used in sign-bit flipping with aligned address. 2666 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2667 2668 if (UseAVX > 2 && 2669 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2670 (dst->encoding() >= 16)) { 2671 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2672 } else if (reachable(src)) { 2673 Assembler::xorpd(dst, as_Address(src)); 2674 } else { 2675 lea(rscratch, src); 2676 Assembler::xorpd(dst, Address(rscratch, 0)); 2677 } 2678 } 2679 2680 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 2681 if (UseAVX > 2 && 2682 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2683 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2684 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2685 } else { 2686 Assembler::xorpd(dst, src); 2687 } 2688 } 2689 2690 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 2691 if (UseAVX > 2 && 2692 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2693 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2694 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2695 } else { 2696 Assembler::xorps(dst, src); 2697 } 2698 } 2699 2700 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 2701 assert(rscratch != noreg || always_reachable(src), "missing"); 2702 2703 // Used in sign-bit flipping with aligned address. 2704 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2705 2706 if (UseAVX > 2 && 2707 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2708 (dst->encoding() >= 16)) { 2709 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2710 } else if (reachable(src)) { 2711 Assembler::xorps(dst, as_Address(src)); 2712 } else { 2713 lea(rscratch, src); 2714 Assembler::xorps(dst, Address(rscratch, 0)); 2715 } 2716 } 2717 2718 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 2719 assert(rscratch != noreg || always_reachable(src), "missing"); 2720 2721 // Used in sign-bit flipping with aligned address. 2722 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 2723 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 2724 if (reachable(src)) { 2725 Assembler::pshufb(dst, as_Address(src)); 2726 } else { 2727 lea(rscratch, src); 2728 Assembler::pshufb(dst, Address(rscratch, 0)); 2729 } 2730 } 2731 2732 // AVX 3-operands instructions 2733 2734 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2735 assert(rscratch != noreg || always_reachable(src), "missing"); 2736 2737 if (reachable(src)) { 2738 vaddsd(dst, nds, as_Address(src)); 2739 } else { 2740 lea(rscratch, src); 2741 vaddsd(dst, nds, Address(rscratch, 0)); 2742 } 2743 } 2744 2745 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2746 assert(rscratch != noreg || always_reachable(src), "missing"); 2747 2748 if (reachable(src)) { 2749 vaddss(dst, nds, as_Address(src)); 2750 } else { 2751 lea(rscratch, src); 2752 vaddss(dst, nds, Address(rscratch, 0)); 2753 } 2754 } 2755 2756 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2757 assert(UseAVX > 0, "requires some form of AVX"); 2758 assert(rscratch != noreg || always_reachable(src), "missing"); 2759 2760 if (reachable(src)) { 2761 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 2762 } else { 2763 lea(rscratch, src); 2764 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 2765 } 2766 } 2767 2768 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2769 assert(UseAVX > 0, "requires some form of AVX"); 2770 assert(rscratch != noreg || always_reachable(src), "missing"); 2771 2772 if (reachable(src)) { 2773 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 2774 } else { 2775 lea(rscratch, src); 2776 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 2777 } 2778 } 2779 2780 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2781 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2782 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2783 2784 vandps(dst, nds, negate_field, vector_len, rscratch); 2785 } 2786 2787 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2788 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2789 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2790 2791 vandpd(dst, nds, negate_field, vector_len, rscratch); 2792 } 2793 2794 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2795 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2796 Assembler::vpaddb(dst, nds, src, vector_len); 2797 } 2798 2799 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2800 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2801 Assembler::vpaddb(dst, nds, src, vector_len); 2802 } 2803 2804 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2805 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2806 Assembler::vpaddw(dst, nds, src, vector_len); 2807 } 2808 2809 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2810 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2811 Assembler::vpaddw(dst, nds, src, vector_len); 2812 } 2813 2814 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2815 assert(rscratch != noreg || always_reachable(src), "missing"); 2816 2817 if (reachable(src)) { 2818 Assembler::vpand(dst, nds, as_Address(src), vector_len); 2819 } else { 2820 lea(rscratch, src); 2821 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 2822 } 2823 } 2824 2825 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2826 assert(rscratch != noreg || always_reachable(src), "missing"); 2827 2828 if (reachable(src)) { 2829 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 2830 } else { 2831 lea(rscratch, src); 2832 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 2833 } 2834 } 2835 2836 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2837 assert(rscratch != noreg || always_reachable(src), "missing"); 2838 2839 if (reachable(src)) { 2840 Assembler::vbroadcasti128(dst, as_Address(src), vector_len); 2841 } else { 2842 lea(rscratch, src); 2843 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len); 2844 } 2845 } 2846 2847 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2848 assert(rscratch != noreg || always_reachable(src), "missing"); 2849 2850 if (reachable(src)) { 2851 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 2852 } else { 2853 lea(rscratch, src); 2854 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 2855 } 2856 } 2857 2858 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2859 assert(rscratch != noreg || always_reachable(src), "missing"); 2860 2861 if (reachable(src)) { 2862 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 2863 } else { 2864 lea(rscratch, src); 2865 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 2866 } 2867 } 2868 2869 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2870 assert(rscratch != noreg || always_reachable(src), "missing"); 2871 2872 if (reachable(src)) { 2873 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 2874 } else { 2875 lea(rscratch, src); 2876 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 2877 } 2878 } 2879 2880 // Vector float blend 2881 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2882 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2883 // WARN: Allow dst == (src1|src2), mask == scratch 2884 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2885 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 2886 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2887 if (blend_emulation && scratch_available && dst_available) { 2888 if (compute_mask) { 2889 vpsrad(scratch, mask, 32, vector_len); 2890 mask = scratch; 2891 } 2892 if (dst == src1) { 2893 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 2894 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2895 } else { 2896 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2897 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 2898 } 2899 vpor(dst, dst, scratch, vector_len); 2900 } else { 2901 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 2902 } 2903 } 2904 2905 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2906 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2907 // WARN: Allow dst == (src1|src2), mask == scratch 2908 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2909 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 2910 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2911 if (blend_emulation && scratch_available && dst_available) { 2912 if (compute_mask) { 2913 vpxor(scratch, scratch, scratch, vector_len); 2914 vpcmpgtq(scratch, scratch, mask, vector_len); 2915 mask = scratch; 2916 } 2917 if (dst == src1) { 2918 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 2919 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2920 } else { 2921 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2922 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 2923 } 2924 vpor(dst, dst, scratch, vector_len); 2925 } else { 2926 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 2927 } 2928 } 2929 2930 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2931 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2932 Assembler::vpcmpeqb(dst, nds, src, vector_len); 2933 } 2934 2935 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 2936 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2937 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 2938 } 2939 2940 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2941 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2942 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2943 } 2944 2945 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2946 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2947 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2948 } 2949 2950 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2951 assert(rscratch != noreg || always_reachable(src), "missing"); 2952 2953 if (reachable(src)) { 2954 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 2955 } else { 2956 lea(rscratch, src); 2957 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 2958 } 2959 } 2960 2961 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2962 int comparison, bool is_signed, int vector_len, Register rscratch) { 2963 assert(rscratch != noreg || always_reachable(src), "missing"); 2964 2965 if (reachable(src)) { 2966 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2967 } else { 2968 lea(rscratch, src); 2969 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2970 } 2971 } 2972 2973 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2974 int comparison, bool is_signed, int vector_len, Register rscratch) { 2975 assert(rscratch != noreg || always_reachable(src), "missing"); 2976 2977 if (reachable(src)) { 2978 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2979 } else { 2980 lea(rscratch, src); 2981 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2982 } 2983 } 2984 2985 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2986 int comparison, bool is_signed, int vector_len, Register rscratch) { 2987 assert(rscratch != noreg || always_reachable(src), "missing"); 2988 2989 if (reachable(src)) { 2990 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2991 } else { 2992 lea(rscratch, src); 2993 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2994 } 2995 } 2996 2997 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2998 int comparison, bool is_signed, int vector_len, Register rscratch) { 2999 assert(rscratch != noreg || always_reachable(src), "missing"); 3000 3001 if (reachable(src)) { 3002 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3003 } else { 3004 lea(rscratch, src); 3005 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3006 } 3007 } 3008 3009 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3010 if (width == Assembler::Q) { 3011 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3012 } else { 3013 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3014 } 3015 } 3016 3017 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3018 int eq_cond_enc = 0x29; 3019 int gt_cond_enc = 0x37; 3020 if (width != Assembler::Q) { 3021 eq_cond_enc = 0x74 + width; 3022 gt_cond_enc = 0x64 + width; 3023 } 3024 switch (cond) { 3025 case eq: 3026 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3027 break; 3028 case neq: 3029 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3030 vallones(xtmp, vector_len); 3031 vpxor(dst, xtmp, dst, vector_len); 3032 break; 3033 case le: 3034 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3035 vallones(xtmp, vector_len); 3036 vpxor(dst, xtmp, dst, vector_len); 3037 break; 3038 case nlt: 3039 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3040 vallones(xtmp, vector_len); 3041 vpxor(dst, xtmp, dst, vector_len); 3042 break; 3043 case lt: 3044 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3045 break; 3046 case nle: 3047 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3048 break; 3049 default: 3050 assert(false, "Should not reach here"); 3051 } 3052 } 3053 3054 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3055 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3056 Assembler::vpmovzxbw(dst, src, vector_len); 3057 } 3058 3059 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3060 assert((src->encoding() < 16),"XMM register should be 0-15"); 3061 Assembler::vpmovmskb(dst, src, vector_len); 3062 } 3063 3064 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3065 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3066 Assembler::vpmullw(dst, nds, src, vector_len); 3067 } 3068 3069 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3070 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3071 Assembler::vpmullw(dst, nds, src, vector_len); 3072 } 3073 3074 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3075 assert((UseAVX > 0), "AVX support is needed"); 3076 assert(rscratch != noreg || always_reachable(src), "missing"); 3077 3078 if (reachable(src)) { 3079 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3080 } else { 3081 lea(rscratch, src); 3082 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3083 } 3084 } 3085 3086 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3087 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3088 Assembler::vpsubb(dst, nds, src, vector_len); 3089 } 3090 3091 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3092 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3093 Assembler::vpsubb(dst, nds, src, vector_len); 3094 } 3095 3096 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3097 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3098 Assembler::vpsubw(dst, nds, src, vector_len); 3099 } 3100 3101 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3102 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3103 Assembler::vpsubw(dst, nds, src, vector_len); 3104 } 3105 3106 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3107 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3108 Assembler::vpsraw(dst, nds, shift, vector_len); 3109 } 3110 3111 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3112 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3113 Assembler::vpsraw(dst, nds, shift, vector_len); 3114 } 3115 3116 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3117 assert(UseAVX > 2,""); 3118 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3119 vector_len = 2; 3120 } 3121 Assembler::evpsraq(dst, nds, shift, vector_len); 3122 } 3123 3124 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3125 assert(UseAVX > 2,""); 3126 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3127 vector_len = 2; 3128 } 3129 Assembler::evpsraq(dst, nds, shift, vector_len); 3130 } 3131 3132 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3133 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3134 Assembler::vpsrlw(dst, nds, shift, vector_len); 3135 } 3136 3137 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3138 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3139 Assembler::vpsrlw(dst, nds, shift, vector_len); 3140 } 3141 3142 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3143 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3144 Assembler::vpsllw(dst, nds, shift, vector_len); 3145 } 3146 3147 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3148 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3149 Assembler::vpsllw(dst, nds, shift, vector_len); 3150 } 3151 3152 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3153 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3154 Assembler::vptest(dst, src); 3155 } 3156 3157 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3158 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3159 Assembler::punpcklbw(dst, src); 3160 } 3161 3162 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3163 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3164 Assembler::pshufd(dst, src, mode); 3165 } 3166 3167 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3168 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3169 Assembler::pshuflw(dst, src, mode); 3170 } 3171 3172 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3173 assert(rscratch != noreg || always_reachable(src), "missing"); 3174 3175 if (reachable(src)) { 3176 vandpd(dst, nds, as_Address(src), vector_len); 3177 } else { 3178 lea(rscratch, src); 3179 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3180 } 3181 } 3182 3183 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3184 assert(rscratch != noreg || always_reachable(src), "missing"); 3185 3186 if (reachable(src)) { 3187 vandps(dst, nds, as_Address(src), vector_len); 3188 } else { 3189 lea(rscratch, src); 3190 vandps(dst, nds, Address(rscratch, 0), vector_len); 3191 } 3192 } 3193 3194 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3195 bool merge, int vector_len, Register rscratch) { 3196 assert(rscratch != noreg || always_reachable(src), "missing"); 3197 3198 if (reachable(src)) { 3199 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3200 } else { 3201 lea(rscratch, src); 3202 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3203 } 3204 } 3205 3206 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3207 assert(rscratch != noreg || always_reachable(src), "missing"); 3208 3209 if (reachable(src)) { 3210 vdivsd(dst, nds, as_Address(src)); 3211 } else { 3212 lea(rscratch, src); 3213 vdivsd(dst, nds, Address(rscratch, 0)); 3214 } 3215 } 3216 3217 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3218 assert(rscratch != noreg || always_reachable(src), "missing"); 3219 3220 if (reachable(src)) { 3221 vdivss(dst, nds, as_Address(src)); 3222 } else { 3223 lea(rscratch, src); 3224 vdivss(dst, nds, Address(rscratch, 0)); 3225 } 3226 } 3227 3228 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3229 assert(rscratch != noreg || always_reachable(src), "missing"); 3230 3231 if (reachable(src)) { 3232 vmulsd(dst, nds, as_Address(src)); 3233 } else { 3234 lea(rscratch, src); 3235 vmulsd(dst, nds, Address(rscratch, 0)); 3236 } 3237 } 3238 3239 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3240 assert(rscratch != noreg || always_reachable(src), "missing"); 3241 3242 if (reachable(src)) { 3243 vmulss(dst, nds, as_Address(src)); 3244 } else { 3245 lea(rscratch, src); 3246 vmulss(dst, nds, Address(rscratch, 0)); 3247 } 3248 } 3249 3250 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3251 assert(rscratch != noreg || always_reachable(src), "missing"); 3252 3253 if (reachable(src)) { 3254 vsubsd(dst, nds, as_Address(src)); 3255 } else { 3256 lea(rscratch, src); 3257 vsubsd(dst, nds, Address(rscratch, 0)); 3258 } 3259 } 3260 3261 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3262 assert(rscratch != noreg || always_reachable(src), "missing"); 3263 3264 if (reachable(src)) { 3265 vsubss(dst, nds, as_Address(src)); 3266 } else { 3267 lea(rscratch, src); 3268 vsubss(dst, nds, Address(rscratch, 0)); 3269 } 3270 } 3271 3272 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3273 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3274 assert(rscratch != noreg || always_reachable(src), "missing"); 3275 3276 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3277 } 3278 3279 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3280 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3281 assert(rscratch != noreg || always_reachable(src), "missing"); 3282 3283 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3284 } 3285 3286 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3287 assert(rscratch != noreg || always_reachable(src), "missing"); 3288 3289 if (reachable(src)) { 3290 vxorpd(dst, nds, as_Address(src), vector_len); 3291 } else { 3292 lea(rscratch, src); 3293 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3294 } 3295 } 3296 3297 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3298 assert(rscratch != noreg || always_reachable(src), "missing"); 3299 3300 if (reachable(src)) { 3301 vxorps(dst, nds, as_Address(src), vector_len); 3302 } else { 3303 lea(rscratch, src); 3304 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3305 } 3306 } 3307 3308 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3309 assert(rscratch != noreg || always_reachable(src), "missing"); 3310 3311 if (UseAVX > 1 || (vector_len < 1)) { 3312 if (reachable(src)) { 3313 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3314 } else { 3315 lea(rscratch, src); 3316 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3317 } 3318 } else { 3319 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3320 } 3321 } 3322 3323 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3324 assert(rscratch != noreg || always_reachable(src), "missing"); 3325 3326 if (reachable(src)) { 3327 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3328 } else { 3329 lea(rscratch, src); 3330 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3331 } 3332 } 3333 3334 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3335 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3336 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3337 // The inverted mask is sign-extended 3338 andptr(possibly_non_local, inverted_mask); 3339 } 3340 3341 void MacroAssembler::resolve_jobject(Register value, 3342 Register tmp) { 3343 Register thread = r15_thread; 3344 assert_different_registers(value, thread, tmp); 3345 Label done, tagged, weak_tagged; 3346 testptr(value, value); 3347 jcc(Assembler::zero, done); // Use null as-is. 3348 testptr(value, JNIHandles::tag_mask); // Test for tag. 3349 jcc(Assembler::notZero, tagged); 3350 3351 // Resolve local handle 3352 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp); 3353 verify_oop(value); 3354 jmp(done); 3355 3356 bind(tagged); 3357 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 3358 jcc(Assembler::notZero, weak_tagged); 3359 3360 // Resolve global handle 3361 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3362 verify_oop(value); 3363 jmp(done); 3364 3365 bind(weak_tagged); 3366 // Resolve jweak. 3367 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3368 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp); 3369 verify_oop(value); 3370 3371 bind(done); 3372 } 3373 3374 void MacroAssembler::resolve_global_jobject(Register value, 3375 Register tmp) { 3376 Register thread = r15_thread; 3377 assert_different_registers(value, thread, tmp); 3378 Label done; 3379 3380 testptr(value, value); 3381 jcc(Assembler::zero, done); // Use null as-is. 3382 3383 #ifdef ASSERT 3384 { 3385 Label valid_global_tag; 3386 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 3387 jcc(Assembler::notZero, valid_global_tag); 3388 stop("non global jobject using resolve_global_jobject"); 3389 bind(valid_global_tag); 3390 } 3391 #endif 3392 3393 // Resolve global handle 3394 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3395 verify_oop(value); 3396 3397 bind(done); 3398 } 3399 3400 void MacroAssembler::subptr(Register dst, int32_t imm32) { 3401 subq(dst, imm32); 3402 } 3403 3404 // Force generation of a 4 byte immediate value even if it fits into 8bit 3405 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 3406 subq_imm32(dst, imm32); 3407 } 3408 3409 void MacroAssembler::subptr(Register dst, Register src) { 3410 subq(dst, src); 3411 } 3412 3413 // C++ bool manipulation 3414 void MacroAssembler::testbool(Register dst) { 3415 if(sizeof(bool) == 1) 3416 testb(dst, 0xff); 3417 else if(sizeof(bool) == 2) { 3418 // testw implementation needed for two byte bools 3419 ShouldNotReachHere(); 3420 } else if(sizeof(bool) == 4) 3421 testl(dst, dst); 3422 else 3423 // unsupported 3424 ShouldNotReachHere(); 3425 } 3426 3427 void MacroAssembler::testptr(Register dst, Register src) { 3428 testq(dst, src); 3429 } 3430 3431 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3432 void MacroAssembler::tlab_allocate(Register obj, 3433 Register var_size_in_bytes, 3434 int con_size_in_bytes, 3435 Register t1, 3436 Register t2, 3437 Label& slow_case) { 3438 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3439 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 3440 } 3441 3442 RegSet MacroAssembler::call_clobbered_gp_registers() { 3443 RegSet regs; 3444 regs += RegSet::of(rax, rcx, rdx); 3445 #ifndef _WINDOWS 3446 regs += RegSet::of(rsi, rdi); 3447 #endif 3448 regs += RegSet::range(r8, r11); 3449 if (UseAPX) { 3450 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 3451 } 3452 return regs; 3453 } 3454 3455 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 3456 int num_xmm_registers = XMMRegister::available_xmm_registers(); 3457 #if defined(_WINDOWS) 3458 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 3459 if (num_xmm_registers > 16) { 3460 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 3461 } 3462 return result; 3463 #else 3464 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 3465 #endif 3466 } 3467 3468 // C1 only ever uses the first double/float of the XMM register. 3469 static int xmm_save_size() { return sizeof(double); } 3470 3471 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3472 masm->movdbl(Address(rsp, offset), reg); 3473 } 3474 3475 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3476 masm->movdbl(reg, Address(rsp, offset)); 3477 } 3478 3479 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 3480 bool save_fpu, int& gp_area_size, int& xmm_area_size) { 3481 3482 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 3483 StackAlignmentInBytes); 3484 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0; 3485 3486 return gp_area_size + xmm_area_size; 3487 } 3488 3489 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 3490 block_comment("push_call_clobbered_registers start"); 3491 // Regular registers 3492 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 3493 3494 int gp_area_size; 3495 int xmm_area_size; 3496 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 3497 gp_area_size, xmm_area_size); 3498 subptr(rsp, total_save_size); 3499 3500 push_set(gp_registers_to_push, 0); 3501 3502 if (save_fpu) { 3503 push_set(call_clobbered_xmm_registers(), gp_area_size); 3504 } 3505 3506 block_comment("push_call_clobbered_registers end"); 3507 } 3508 3509 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 3510 block_comment("pop_call_clobbered_registers start"); 3511 3512 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 3513 3514 int gp_area_size; 3515 int xmm_area_size; 3516 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 3517 gp_area_size, xmm_area_size); 3518 3519 if (restore_fpu) { 3520 pop_set(call_clobbered_xmm_registers(), gp_area_size); 3521 } 3522 3523 pop_set(gp_registers_to_pop, 0); 3524 3525 addptr(rsp, total_save_size); 3526 3527 vzeroupper(); 3528 3529 block_comment("pop_call_clobbered_registers end"); 3530 } 3531 3532 void MacroAssembler::push_set(XMMRegSet set, int offset) { 3533 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 3534 int spill_offset = offset; 3535 3536 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 3537 save_xmm_register(this, spill_offset, *it); 3538 spill_offset += xmm_save_size(); 3539 } 3540 } 3541 3542 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 3543 int restore_size = set.size() * xmm_save_size(); 3544 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 3545 3546 int restore_offset = offset + restore_size - xmm_save_size(); 3547 3548 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 3549 restore_xmm_register(this, restore_offset, *it); 3550 restore_offset -= xmm_save_size(); 3551 } 3552 } 3553 3554 void MacroAssembler::push_set(RegSet set, int offset) { 3555 int spill_offset; 3556 if (offset == -1) { 3557 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3558 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 3559 subptr(rsp, aligned_size); 3560 spill_offset = 0; 3561 } else { 3562 spill_offset = offset; 3563 } 3564 3565 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 3566 movptr(Address(rsp, spill_offset), *it); 3567 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3568 } 3569 } 3570 3571 void MacroAssembler::pop_set(RegSet set, int offset) { 3572 3573 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3574 int restore_size = set.size() * gp_reg_size; 3575 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 3576 3577 int restore_offset; 3578 if (offset == -1) { 3579 restore_offset = restore_size - gp_reg_size; 3580 } else { 3581 restore_offset = offset + restore_size - gp_reg_size; 3582 } 3583 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 3584 movptr(*it, Address(rsp, restore_offset)); 3585 restore_offset -= gp_reg_size; 3586 } 3587 3588 if (offset == -1) { 3589 addptr(rsp, aligned_size); 3590 } 3591 } 3592 3593 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 3594 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 3595 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 3596 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 3597 Label done; 3598 3599 testptr(length_in_bytes, length_in_bytes); 3600 jcc(Assembler::zero, done); 3601 3602 // initialize topmost word, divide index by 2, check if odd and test if zero 3603 // note: for the remaining code to work, index must be a multiple of BytesPerWord 3604 #ifdef ASSERT 3605 { 3606 Label L; 3607 testptr(length_in_bytes, BytesPerWord - 1); 3608 jcc(Assembler::zero, L); 3609 stop("length must be a multiple of BytesPerWord"); 3610 bind(L); 3611 } 3612 #endif 3613 Register index = length_in_bytes; 3614 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 3615 if (UseIncDec) { 3616 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 3617 } else { 3618 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 3619 shrptr(index, 1); 3620 } 3621 3622 // initialize remaining object fields: index is a multiple of 2 now 3623 { 3624 Label loop; 3625 bind(loop); 3626 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 3627 decrement(index); 3628 jcc(Assembler::notZero, loop); 3629 } 3630 3631 bind(done); 3632 } 3633 3634 // Look up the method for a megamorphic invokeinterface call. 3635 // The target method is determined by <intf_klass, itable_index>. 3636 // The receiver klass is in recv_klass. 3637 // On success, the result will be in method_result, and execution falls through. 3638 // On failure, execution transfers to the given label. 3639 void MacroAssembler::lookup_interface_method(Register recv_klass, 3640 Register intf_klass, 3641 RegisterOrConstant itable_index, 3642 Register method_result, 3643 Register scan_temp, 3644 Label& L_no_such_interface, 3645 bool return_method) { 3646 assert_different_registers(recv_klass, intf_klass, scan_temp); 3647 assert_different_registers(method_result, intf_klass, scan_temp); 3648 assert(recv_klass != method_result || !return_method, 3649 "recv_klass can be destroyed when method isn't needed"); 3650 3651 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 3652 "caller must use same register for non-constant itable index as for method"); 3653 3654 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 3655 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3656 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3657 int scan_step = itableOffsetEntry::size() * wordSize; 3658 int vte_size = vtableEntry::size_in_bytes(); 3659 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3660 assert(vte_size == wordSize, "else adjust times_vte_scale"); 3661 3662 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3663 3664 // Could store the aligned, prescaled offset in the klass. 3665 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 3666 3667 if (return_method) { 3668 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 3669 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3670 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 3671 } 3672 3673 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 3674 // if (scan->interface() == intf) { 3675 // result = (klass + scan->offset() + itable_index); 3676 // } 3677 // } 3678 Label search, found_method; 3679 3680 for (int peel = 1; peel >= 0; peel--) { 3681 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 3682 cmpptr(intf_klass, method_result); 3683 3684 if (peel) { 3685 jccb(Assembler::equal, found_method); 3686 } else { 3687 jccb(Assembler::notEqual, search); 3688 // (invert the test to fall through to found_method...) 3689 } 3690 3691 if (!peel) break; 3692 3693 bind(search); 3694 3695 // Check that the previous entry is non-null. A null entry means that 3696 // the receiver class doesn't implement the interface, and wasn't the 3697 // same as when the caller was compiled. 3698 testptr(method_result, method_result); 3699 jcc(Assembler::zero, L_no_such_interface); 3700 addptr(scan_temp, scan_step); 3701 } 3702 3703 bind(found_method); 3704 3705 if (return_method) { 3706 // Got a hit. 3707 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 3708 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 3709 } 3710 } 3711 3712 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 3713 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 3714 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 3715 // The target method is determined by <holder_klass, itable_index>. 3716 // The receiver klass is in recv_klass. 3717 // On success, the result will be in method_result, and execution falls through. 3718 // On failure, execution transfers to the given label. 3719 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 3720 Register holder_klass, 3721 Register resolved_klass, 3722 Register method_result, 3723 Register scan_temp, 3724 Register temp_reg2, 3725 Register receiver, 3726 int itable_index, 3727 Label& L_no_such_interface) { 3728 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 3729 Register temp_itbl_klass = method_result; 3730 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 3731 3732 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3733 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3734 int scan_step = itableOffsetEntry::size() * wordSize; 3735 int vte_size = vtableEntry::size_in_bytes(); 3736 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 3737 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 3738 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3739 assert(vte_size == wordSize, "adjust times_vte_scale"); 3740 3741 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 3742 3743 // temp_itbl_klass = recv_klass.itable[0] 3744 // scan_temp = &recv_klass.itable[0] + step 3745 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3746 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 3747 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 3748 xorptr(temp_reg, temp_reg); 3749 3750 // Initial checks: 3751 // - if (holder_klass != resolved_klass), go to "scan for resolved" 3752 // - if (itable[0] == 0), no such interface 3753 // - if (itable[0] == holder_klass), shortcut to "holder found" 3754 cmpptr(holder_klass, resolved_klass); 3755 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 3756 testptr(temp_itbl_klass, temp_itbl_klass); 3757 jccb(Assembler::zero, L_no_such_interface); 3758 cmpptr(holder_klass, temp_itbl_klass); 3759 jccb(Assembler::equal, L_holder_found); 3760 3761 // Loop: Look for holder_klass record in itable 3762 // do { 3763 // tmp = itable[index]; 3764 // index += step; 3765 // if (tmp == holder_klass) { 3766 // goto L_holder_found; // Found! 3767 // } 3768 // } while (tmp != 0); 3769 // goto L_no_such_interface // Not found. 3770 Label L_scan_holder; 3771 bind(L_scan_holder); 3772 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3773 addptr(scan_temp, scan_step); 3774 cmpptr(holder_klass, temp_itbl_klass); 3775 jccb(Assembler::equal, L_holder_found); 3776 testptr(temp_itbl_klass, temp_itbl_klass); 3777 jccb(Assembler::notZero, L_scan_holder); 3778 3779 jmpb(L_no_such_interface); 3780 3781 // Loop: Look for resolved_class record in itable 3782 // do { 3783 // tmp = itable[index]; 3784 // index += step; 3785 // if (tmp == holder_klass) { 3786 // // Also check if we have met a holder klass 3787 // holder_tmp = itable[index-step-ioffset]; 3788 // } 3789 // if (tmp == resolved_klass) { 3790 // goto L_resolved_found; // Found! 3791 // } 3792 // } while (tmp != 0); 3793 // goto L_no_such_interface // Not found. 3794 // 3795 Label L_loop_scan_resolved; 3796 bind(L_loop_scan_resolved); 3797 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3798 addptr(scan_temp, scan_step); 3799 bind(L_loop_scan_resolved_entry); 3800 cmpptr(holder_klass, temp_itbl_klass); 3801 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3802 cmpptr(resolved_klass, temp_itbl_klass); 3803 jccb(Assembler::equal, L_resolved_found); 3804 testptr(temp_itbl_klass, temp_itbl_klass); 3805 jccb(Assembler::notZero, L_loop_scan_resolved); 3806 3807 jmpb(L_no_such_interface); 3808 3809 Label L_ready; 3810 3811 // See if we already have a holder klass. If not, go and scan for it. 3812 bind(L_resolved_found); 3813 testptr(temp_reg, temp_reg); 3814 jccb(Assembler::zero, L_scan_holder); 3815 jmpb(L_ready); 3816 3817 bind(L_holder_found); 3818 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3819 3820 // Finally, temp_reg contains holder_klass vtable offset 3821 bind(L_ready); 3822 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3823 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 3824 load_klass(scan_temp, receiver, noreg); 3825 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3826 } else { 3827 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3828 } 3829 } 3830 3831 3832 // virtual method calling 3833 void MacroAssembler::lookup_virtual_method(Register recv_klass, 3834 RegisterOrConstant vtable_index, 3835 Register method_result) { 3836 const ByteSize base = Klass::vtable_start_offset(); 3837 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 3838 Address vtable_entry_addr(recv_klass, 3839 vtable_index, Address::times_ptr, 3840 base + vtableEntry::method_offset()); 3841 movptr(method_result, vtable_entry_addr); 3842 } 3843 3844 3845 void MacroAssembler::check_klass_subtype(Register sub_klass, 3846 Register super_klass, 3847 Register temp_reg, 3848 Label& L_success) { 3849 Label L_failure; 3850 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 3851 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 3852 bind(L_failure); 3853 } 3854 3855 3856 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 3857 Register super_klass, 3858 Register temp_reg, 3859 Label* L_success, 3860 Label* L_failure, 3861 Label* L_slow_path, 3862 RegisterOrConstant super_check_offset) { 3863 assert_different_registers(sub_klass, super_klass, temp_reg); 3864 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 3865 if (super_check_offset.is_register()) { 3866 assert_different_registers(sub_klass, super_klass, 3867 super_check_offset.as_register()); 3868 } else if (must_load_sco) { 3869 assert(temp_reg != noreg, "supply either a temp or a register offset"); 3870 } 3871 3872 Label L_fallthrough; 3873 int label_nulls = 0; 3874 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3875 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3876 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 3877 assert(label_nulls <= 1, "at most one null in the batch"); 3878 3879 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3880 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 3881 Address super_check_offset_addr(super_klass, sco_offset); 3882 3883 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 3884 // range of a jccb. If this routine grows larger, reconsider at 3885 // least some of these. 3886 #define local_jcc(assembler_cond, label) \ 3887 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 3888 else jcc( assembler_cond, label) /*omit semi*/ 3889 3890 // Hacked jmp, which may only be used just before L_fallthrough. 3891 #define final_jmp(label) \ 3892 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3893 else jmp(label) /*omit semi*/ 3894 3895 // If the pointers are equal, we are done (e.g., String[] elements). 3896 // This self-check enables sharing of secondary supertype arrays among 3897 // non-primary types such as array-of-interface. Otherwise, each such 3898 // type would need its own customized SSA. 3899 // We move this check to the front of the fast path because many 3900 // type checks are in fact trivially successful in this manner, 3901 // so we get a nicely predicted branch right at the start of the check. 3902 cmpptr(sub_klass, super_klass); 3903 local_jcc(Assembler::equal, *L_success); 3904 3905 // Check the supertype display: 3906 if (must_load_sco) { 3907 // Positive movl does right thing on LP64. 3908 movl(temp_reg, super_check_offset_addr); 3909 super_check_offset = RegisterOrConstant(temp_reg); 3910 } 3911 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 3912 cmpptr(super_klass, super_check_addr); // load displayed supertype 3913 3914 // This check has worked decisively for primary supers. 3915 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3916 // (Secondary supers are interfaces and very deeply nested subtypes.) 3917 // This works in the same check above because of a tricky aliasing 3918 // between the super_cache and the primary super display elements. 3919 // (The 'super_check_addr' can address either, as the case requires.) 3920 // Note that the cache is updated below if it does not help us find 3921 // what we need immediately. 3922 // So if it was a primary super, we can just fail immediately. 3923 // Otherwise, it's the slow path for us (no success at this point). 3924 3925 if (super_check_offset.is_register()) { 3926 local_jcc(Assembler::equal, *L_success); 3927 cmpl(super_check_offset.as_register(), sc_offset); 3928 if (L_failure == &L_fallthrough) { 3929 local_jcc(Assembler::equal, *L_slow_path); 3930 } else { 3931 local_jcc(Assembler::notEqual, *L_failure); 3932 final_jmp(*L_slow_path); 3933 } 3934 } else if (super_check_offset.as_constant() == sc_offset) { 3935 // Need a slow path; fast failure is impossible. 3936 if (L_slow_path == &L_fallthrough) { 3937 local_jcc(Assembler::equal, *L_success); 3938 } else { 3939 local_jcc(Assembler::notEqual, *L_slow_path); 3940 final_jmp(*L_success); 3941 } 3942 } else { 3943 // No slow path; it's a fast decision. 3944 if (L_failure == &L_fallthrough) { 3945 local_jcc(Assembler::equal, *L_success); 3946 } else { 3947 local_jcc(Assembler::notEqual, *L_failure); 3948 final_jmp(*L_success); 3949 } 3950 } 3951 3952 bind(L_fallthrough); 3953 3954 #undef local_jcc 3955 #undef final_jmp 3956 } 3957 3958 3959 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 3960 Register super_klass, 3961 Register temp_reg, 3962 Register temp2_reg, 3963 Label* L_success, 3964 Label* L_failure, 3965 bool set_cond_codes) { 3966 assert_different_registers(sub_klass, super_klass, temp_reg); 3967 if (temp2_reg != noreg) 3968 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 3969 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 3970 3971 Label L_fallthrough; 3972 int label_nulls = 0; 3973 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3974 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3975 assert(label_nulls <= 1, "at most one null in the batch"); 3976 3977 // a couple of useful fields in sub_klass: 3978 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3979 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3980 Address secondary_supers_addr(sub_klass, ss_offset); 3981 Address super_cache_addr( sub_klass, sc_offset); 3982 3983 // Do a linear scan of the secondary super-klass chain. 3984 // This code is rarely used, so simplicity is a virtue here. 3985 // The repne_scan instruction uses fixed registers, which we must spill. 3986 // Don't worry too much about pre-existing connections with the input regs. 3987 3988 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 3989 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 3990 3991 // Get super_klass value into rax (even if it was in rdi or rcx). 3992 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 3993 if (super_klass != rax) { 3994 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 3995 mov(rax, super_klass); 3996 } 3997 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 3998 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 3999 4000 #ifndef PRODUCT 4001 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4002 ExternalAddress pst_counter_addr((address) pst_counter); 4003 lea(rcx, pst_counter_addr); 4004 incrementl(Address(rcx, 0)); 4005 #endif //PRODUCT 4006 4007 // We will consult the secondary-super array. 4008 movptr(rdi, secondary_supers_addr); 4009 // Load the array length. (Positive movl does right thing on LP64.) 4010 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4011 // Skip to start of data. 4012 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4013 4014 // Scan RCX words at [RDI] for an occurrence of RAX. 4015 // Set NZ/Z based on last compare. 4016 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4017 // not change flags (only scas instruction which is repeated sets flags). 4018 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4019 4020 testptr(rax,rax); // Set Z = 0 4021 repne_scan(); 4022 4023 // Unspill the temp. registers: 4024 if (pushed_rdi) pop(rdi); 4025 if (pushed_rcx) pop(rcx); 4026 if (pushed_rax) pop(rax); 4027 4028 if (set_cond_codes) { 4029 // Special hack for the AD files: rdi is guaranteed non-zero. 4030 assert(!pushed_rdi, "rdi must be left non-null"); 4031 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4032 } 4033 4034 if (L_failure == &L_fallthrough) 4035 jccb(Assembler::notEqual, *L_failure); 4036 else jcc(Assembler::notEqual, *L_failure); 4037 4038 // Success. Cache the super we found and proceed in triumph. 4039 movptr(super_cache_addr, super_klass); 4040 4041 if (L_success != &L_fallthrough) { 4042 jmp(*L_success); 4043 } 4044 4045 #undef IS_A_TEMP 4046 4047 bind(L_fallthrough); 4048 } 4049 4050 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4051 Register super_klass, 4052 Register temp_reg, 4053 Register temp2_reg, 4054 Label* L_success, 4055 Label* L_failure, 4056 bool set_cond_codes) { 4057 assert(set_cond_codes == false, "must be false on 64-bit x86"); 4058 check_klass_subtype_slow_path 4059 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg, 4060 L_success, L_failure); 4061 } 4062 4063 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4064 Register super_klass, 4065 Register temp_reg, 4066 Register temp2_reg, 4067 Register temp3_reg, 4068 Register temp4_reg, 4069 Label* L_success, 4070 Label* L_failure) { 4071 if (UseSecondarySupersTable) { 4072 check_klass_subtype_slow_path_table 4073 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg, 4074 L_success, L_failure); 4075 } else { 4076 check_klass_subtype_slow_path_linear 4077 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false); 4078 } 4079 } 4080 4081 Register MacroAssembler::allocate_if_noreg(Register r, 4082 RegSetIterator<Register> &available_regs, 4083 RegSet ®s_to_push) { 4084 if (!r->is_valid()) { 4085 r = *available_regs++; 4086 regs_to_push += r; 4087 } 4088 return r; 4089 } 4090 4091 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 4092 Register super_klass, 4093 Register temp_reg, 4094 Register temp2_reg, 4095 Register temp3_reg, 4096 Register result_reg, 4097 Label* L_success, 4098 Label* L_failure) { 4099 // NB! Callers may assume that, when temp2_reg is a valid register, 4100 // this code sets it to a nonzero value. 4101 bool temp2_reg_was_valid = temp2_reg->is_valid(); 4102 4103 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 4104 4105 Label L_fallthrough; 4106 int label_nulls = 0; 4107 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4108 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4109 assert(label_nulls <= 1, "at most one null in the batch"); 4110 4111 BLOCK_COMMENT("check_klass_subtype_slow_path_table"); 4112 4113 RegSetIterator<Register> available_regs 4114 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin(); 4115 4116 RegSet pushed_regs; 4117 4118 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 4119 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 4120 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 4121 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 4122 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs); 4123 4124 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg); 4125 4126 { 4127 4128 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4129 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4130 subptr(rsp, aligned_size); 4131 push_set(pushed_regs, 0); 4132 4133 lookup_secondary_supers_table_var(sub_klass, 4134 super_klass, 4135 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg); 4136 cmpq(result_reg, 0); 4137 4138 // Unspill the temp. registers: 4139 pop_set(pushed_regs, 0); 4140 // Increment SP but do not clobber flags. 4141 lea(rsp, Address(rsp, aligned_size)); 4142 } 4143 4144 if (temp2_reg_was_valid) { 4145 movq(temp2_reg, 1); 4146 } 4147 4148 jcc(Assembler::notEqual, *L_failure); 4149 4150 if (L_success != &L_fallthrough) { 4151 jmp(*L_success); 4152 } 4153 4154 bind(L_fallthrough); 4155 } 4156 4157 // population_count variant for running without the POPCNT 4158 // instruction, which was introduced with SSE4.2 in 2008. 4159 void MacroAssembler::population_count(Register dst, Register src, 4160 Register scratch1, Register scratch2) { 4161 assert_different_registers(src, scratch1, scratch2); 4162 if (UsePopCountInstruction) { 4163 Assembler::popcntq(dst, src); 4164 } else { 4165 assert_different_registers(src, scratch1, scratch2); 4166 assert_different_registers(dst, scratch1, scratch2); 4167 Label loop, done; 4168 4169 mov(scratch1, src); 4170 // dst = 0; 4171 // while(scratch1 != 0) { 4172 // dst++; 4173 // scratch1 &= (scratch1 - 1); 4174 // } 4175 xorl(dst, dst); 4176 testq(scratch1, scratch1); 4177 jccb(Assembler::equal, done); 4178 { 4179 bind(loop); 4180 incq(dst); 4181 movq(scratch2, scratch1); 4182 decq(scratch2); 4183 andq(scratch1, scratch2); 4184 jccb(Assembler::notEqual, loop); 4185 } 4186 bind(done); 4187 } 4188 #ifdef ASSERT 4189 mov64(scratch1, 0xCafeBabeDeadBeef); 4190 movq(scratch2, scratch1); 4191 #endif 4192 } 4193 4194 // Ensure that the inline code and the stub are using the same registers. 4195 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 4196 do { \ 4197 assert(r_super_klass == rax, "mismatch"); \ 4198 assert(r_array_base == rbx, "mismatch"); \ 4199 assert(r_array_length == rcx, "mismatch"); \ 4200 assert(r_array_index == rdx, "mismatch"); \ 4201 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 4202 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 4203 assert(result == rdi || result == noreg, "mismatch"); \ 4204 } while(0) 4205 4206 // Versions of salq and rorq that don't need count to be in rcx 4207 4208 void MacroAssembler::salq(Register dest, Register count) { 4209 if (count == rcx) { 4210 Assembler::salq(dest); 4211 } else { 4212 assert_different_registers(rcx, dest); 4213 xchgq(rcx, count); 4214 Assembler::salq(dest); 4215 xchgq(rcx, count); 4216 } 4217 } 4218 4219 void MacroAssembler::rorq(Register dest, Register count) { 4220 if (count == rcx) { 4221 Assembler::rorq(dest); 4222 } else { 4223 assert_different_registers(rcx, dest); 4224 xchgq(rcx, count); 4225 Assembler::rorq(dest); 4226 xchgq(rcx, count); 4227 } 4228 } 4229 4230 // Return true: we succeeded in generating this code 4231 // 4232 // At runtime, return 0 in result if r_super_klass is a superclass of 4233 // r_sub_klass, otherwise return nonzero. Use this if you know the 4234 // super_klass_slot of the class you're looking for. This is always 4235 // the case for instanceof and checkcast. 4236 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 4237 Register r_super_klass, 4238 Register temp1, 4239 Register temp2, 4240 Register temp3, 4241 Register temp4, 4242 Register result, 4243 u1 super_klass_slot) { 4244 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4245 4246 Label L_fallthrough, L_success, L_failure; 4247 4248 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4249 4250 const Register 4251 r_array_index = temp1, 4252 r_array_length = temp2, 4253 r_array_base = temp3, 4254 r_bitmap = temp4; 4255 4256 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4257 4258 xorq(result, result); // = 0 4259 4260 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4261 movq(r_array_index, r_bitmap); 4262 4263 // First check the bitmap to see if super_klass might be present. If 4264 // the bit is zero, we are certain that super_klass is not one of 4265 // the secondary supers. 4266 u1 bit = super_klass_slot; 4267 { 4268 // NB: If the count in a x86 shift instruction is 0, the flags are 4269 // not affected, so we do a testq instead. 4270 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 4271 if (shift_count != 0) { 4272 salq(r_array_index, shift_count); 4273 } else { 4274 testq(r_array_index, r_array_index); 4275 } 4276 } 4277 // We test the MSB of r_array_index, i.e. its sign bit 4278 jcc(Assembler::positive, L_failure); 4279 4280 // Get the first array index that can contain super_klass into r_array_index. 4281 if (bit != 0) { 4282 population_count(r_array_index, r_array_index, temp2, temp3); 4283 } else { 4284 movl(r_array_index, 1); 4285 } 4286 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4287 4288 // We will consult the secondary-super array. 4289 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4290 4291 // We're asserting that the first word in an Array<Klass*> is the 4292 // length, and the second word is the first word of the data. If 4293 // that ever changes, r_array_base will have to be adjusted here. 4294 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4295 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4296 4297 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4298 jccb(Assembler::equal, L_success); 4299 4300 // Is there another entry to check? Consult the bitmap. 4301 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 4302 jccb(Assembler::carryClear, L_failure); 4303 4304 // Linear probe. Rotate the bitmap so that the next bit to test is 4305 // in Bit 1. 4306 if (bit != 0) { 4307 rorq(r_bitmap, bit); 4308 } 4309 4310 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4311 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4312 // Kills: r_array_length. 4313 // Returns: result. 4314 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 4315 // Result (0/1) is in rdi 4316 jmpb(L_fallthrough); 4317 4318 bind(L_failure); 4319 incq(result); // 0 => 1 4320 4321 bind(L_success); 4322 // result = 0; 4323 4324 bind(L_fallthrough); 4325 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4326 4327 if (VerifySecondarySupers) { 4328 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4329 temp1, temp2, temp3); 4330 } 4331 } 4332 4333 // At runtime, return 0 in result if r_super_klass is a superclass of 4334 // r_sub_klass, otherwise return nonzero. Use this version of 4335 // lookup_secondary_supers_table() if you don't know ahead of time 4336 // which superclass will be searched for. Used by interpreter and 4337 // runtime stubs. It is larger and has somewhat greater latency than 4338 // the version above, which takes a constant super_klass_slot. 4339 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 4340 Register r_super_klass, 4341 Register temp1, 4342 Register temp2, 4343 Register temp3, 4344 Register temp4, 4345 Register result) { 4346 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4347 assert_different_registers(r_sub_klass, r_super_klass, rcx); 4348 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4); 4349 4350 Label L_fallthrough, L_success, L_failure; 4351 4352 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4353 4354 RegSetIterator<Register> available_regs = (temps - rcx).begin(); 4355 4356 // FIXME. Once we are sure that all paths reaching this point really 4357 // do pass rcx as one of our temps we can get rid of the following 4358 // workaround. 4359 assert(temps.contains(rcx), "fix this code"); 4360 4361 // We prefer to have our shift count in rcx. If rcx is one of our 4362 // temps, use it for slot. If not, pick any of our temps. 4363 Register slot; 4364 if (!temps.contains(rcx)) { 4365 slot = *available_regs++; 4366 } else { 4367 slot = rcx; 4368 } 4369 4370 const Register r_array_index = *available_regs++; 4371 const Register r_bitmap = *available_regs++; 4372 4373 // The logic above guarantees this property, but we state it here. 4374 assert_different_registers(r_array_index, r_bitmap, rcx); 4375 4376 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4377 movq(r_array_index, r_bitmap); 4378 4379 // First check the bitmap to see if super_klass might be present. If 4380 // the bit is zero, we are certain that super_klass is not one of 4381 // the secondary supers. 4382 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4383 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 4384 salq(r_array_index, slot); 4385 4386 testq(r_array_index, r_array_index); 4387 // We test the MSB of r_array_index, i.e. its sign bit 4388 jcc(Assembler::positive, L_failure); 4389 4390 const Register r_array_base = *available_regs++; 4391 4392 // Get the first array index that can contain super_klass into r_array_index. 4393 // Note: Clobbers r_array_base and slot. 4394 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot); 4395 4396 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4397 4398 // We will consult the secondary-super array. 4399 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4400 4401 // We're asserting that the first word in an Array<Klass*> is the 4402 // length, and the second word is the first word of the data. If 4403 // that ever changes, r_array_base will have to be adjusted here. 4404 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4405 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4406 4407 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4408 jccb(Assembler::equal, L_success); 4409 4410 // Restore slot to its true value 4411 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4412 4413 // Linear probe. Rotate the bitmap so that the next bit to test is 4414 // in Bit 1. 4415 rorq(r_bitmap, slot); 4416 4417 // Is there another entry to check? Consult the bitmap. 4418 btq(r_bitmap, 1); 4419 jccb(Assembler::carryClear, L_failure); 4420 4421 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4422 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4423 // Kills: r_array_length. 4424 // Returns: result. 4425 lookup_secondary_supers_table_slow_path(r_super_klass, 4426 r_array_base, 4427 r_array_index, 4428 r_bitmap, 4429 /*temp1*/result, 4430 /*temp2*/slot, 4431 &L_success, 4432 nullptr); 4433 4434 bind(L_failure); 4435 movq(result, 1); 4436 jmpb(L_fallthrough); 4437 4438 bind(L_success); 4439 xorq(result, result); // = 0 4440 4441 bind(L_fallthrough); 4442 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4443 4444 if (VerifySecondarySupers) { 4445 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4446 temp1, temp2, temp3); 4447 } 4448 } 4449 4450 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 4451 Label* L_success, Label* L_failure) { 4452 Label L_loop, L_fallthrough; 4453 { 4454 int label_nulls = 0; 4455 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4456 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4457 assert(label_nulls <= 1, "at most one null in the batch"); 4458 } 4459 bind(L_loop); 4460 cmpq(value, Address(addr, count, Address::times_8)); 4461 jcc(Assembler::equal, *L_success); 4462 addl(count, 1); 4463 cmpl(count, limit); 4464 jcc(Assembler::less, L_loop); 4465 4466 if (&L_fallthrough != L_failure) { 4467 jmp(*L_failure); 4468 } 4469 bind(L_fallthrough); 4470 } 4471 4472 // Called by code generated by check_klass_subtype_slow_path 4473 // above. This is called when there is a collision in the hashed 4474 // lookup in the secondary supers array. 4475 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 4476 Register r_array_base, 4477 Register r_array_index, 4478 Register r_bitmap, 4479 Register temp1, 4480 Register temp2, 4481 Label* L_success, 4482 Label* L_failure) { 4483 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 4484 4485 const Register 4486 r_array_length = temp1, 4487 r_sub_klass = noreg, 4488 result = noreg; 4489 4490 Label L_fallthrough; 4491 int label_nulls = 0; 4492 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4493 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4494 assert(label_nulls <= 1, "at most one null in the batch"); 4495 4496 // Load the array length. 4497 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4498 // And adjust the array base to point to the data. 4499 // NB! Effectively increments current slot index by 1. 4500 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 4501 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4502 4503 // Linear probe 4504 Label L_huge; 4505 4506 // The bitmap is full to bursting. 4507 // Implicit invariant: BITMAP_FULL implies (length > 0) 4508 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 4509 jcc(Assembler::greater, L_huge); 4510 4511 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 4512 // current slot (at secondary_supers[r_array_index]) has not yet 4513 // been inspected, and r_array_index may be out of bounds if we 4514 // wrapped around the end of the array. 4515 4516 { // This is conventional linear probing, but instead of terminating 4517 // when a null entry is found in the table, we maintain a bitmap 4518 // in which a 0 indicates missing entries. 4519 // The check above guarantees there are 0s in the bitmap, so the loop 4520 // eventually terminates. 4521 4522 xorl(temp2, temp2); // = 0; 4523 4524 Label L_again; 4525 bind(L_again); 4526 4527 // Check for array wraparound. 4528 cmpl(r_array_index, r_array_length); 4529 cmovl(Assembler::greaterEqual, r_array_index, temp2); 4530 4531 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4532 jcc(Assembler::equal, *L_success); 4533 4534 // If the next bit in bitmap is zero, we're done. 4535 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 4536 jcc(Assembler::carryClear, *L_failure); 4537 4538 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 4539 addl(r_array_index, 1); 4540 4541 jmp(L_again); 4542 } 4543 4544 { // Degenerate case: more than 64 secondary supers. 4545 // FIXME: We could do something smarter here, maybe a vectorized 4546 // comparison or a binary search, but is that worth any added 4547 // complexity? 4548 bind(L_huge); 4549 xorl(r_array_index, r_array_index); // = 0 4550 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 4551 L_success, 4552 (&L_fallthrough != L_failure ? L_failure : nullptr)); 4553 4554 bind(L_fallthrough); 4555 } 4556 } 4557 4558 struct VerifyHelperArguments { 4559 Klass* _super; 4560 Klass* _sub; 4561 intptr_t _linear_result; 4562 intptr_t _table_result; 4563 }; 4564 4565 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 4566 Klass::on_secondary_supers_verification_failure(args->_super, 4567 args->_sub, 4568 args->_linear_result, 4569 args->_table_result, 4570 msg); 4571 } 4572 4573 // Make sure that the hashed lookup and a linear scan agree. 4574 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 4575 Register r_super_klass, 4576 Register result, 4577 Register temp1, 4578 Register temp2, 4579 Register temp3) { 4580 const Register 4581 r_array_index = temp1, 4582 r_array_length = temp2, 4583 r_array_base = temp3, 4584 r_bitmap = noreg; 4585 4586 BLOCK_COMMENT("verify_secondary_supers_table {"); 4587 4588 Label L_success, L_failure, L_check, L_done; 4589 4590 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4591 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4592 // And adjust the array base to point to the data. 4593 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4594 4595 testl(r_array_length, r_array_length); // array_length == 0? 4596 jcc(Assembler::zero, L_failure); 4597 4598 movl(r_array_index, 0); 4599 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 4600 // fall through to L_failure 4601 4602 const Register linear_result = r_array_index; // reuse temp1 4603 4604 bind(L_failure); // not present 4605 movl(linear_result, 1); 4606 jmp(L_check); 4607 4608 bind(L_success); // present 4609 movl(linear_result, 0); 4610 4611 bind(L_check); 4612 cmpl(linear_result, result); 4613 jcc(Assembler::equal, L_done); 4614 4615 { // To avoid calling convention issues, build a record on the stack 4616 // and pass the pointer to that instead. 4617 push(result); 4618 push(linear_result); 4619 push(r_sub_klass); 4620 push(r_super_klass); 4621 movptr(c_rarg1, rsp); 4622 movptr(c_rarg0, (uintptr_t) "mismatch"); 4623 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 4624 should_not_reach_here(); 4625 } 4626 bind(L_done); 4627 4628 BLOCK_COMMENT("} verify_secondary_supers_table"); 4629 } 4630 4631 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 4632 4633 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) { 4634 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 4635 4636 Label L_fallthrough; 4637 if (L_fast_path == nullptr) { 4638 L_fast_path = &L_fallthrough; 4639 } else if (L_slow_path == nullptr) { 4640 L_slow_path = &L_fallthrough; 4641 } 4642 4643 // Fast path check: class is fully initialized. 4644 // init_state needs acquire, but x86 is TSO, and so we are already good. 4645 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 4646 jcc(Assembler::equal, *L_fast_path); 4647 4648 // Fast path check: current thread is initializer thread 4649 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset())); 4650 if (L_slow_path == &L_fallthrough) { 4651 jcc(Assembler::equal, *L_fast_path); 4652 bind(*L_slow_path); 4653 } else if (L_fast_path == &L_fallthrough) { 4654 jcc(Assembler::notEqual, *L_slow_path); 4655 bind(*L_fast_path); 4656 } else { 4657 Unimplemented(); 4658 } 4659 } 4660 4661 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 4662 if (VM_Version::supports_cmov()) { 4663 cmovl(cc, dst, src); 4664 } else { 4665 Label L; 4666 jccb(negate_condition(cc), L); 4667 movl(dst, src); 4668 bind(L); 4669 } 4670 } 4671 4672 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 4673 if (VM_Version::supports_cmov()) { 4674 cmovl(cc, dst, src); 4675 } else { 4676 Label L; 4677 jccb(negate_condition(cc), L); 4678 movl(dst, src); 4679 bind(L); 4680 } 4681 } 4682 4683 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 4684 if (!VerifyOops) return; 4685 4686 BLOCK_COMMENT("verify_oop {"); 4687 push(rscratch1); 4688 push(rax); // save rax 4689 push(reg); // pass register argument 4690 4691 // Pass register number to verify_oop_subroutine 4692 const char* b = nullptr; 4693 { 4694 ResourceMark rm; 4695 stringStream ss; 4696 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 4697 b = code_string(ss.as_string()); 4698 } 4699 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4700 pushptr(buffer.addr(), rscratch1); 4701 4702 // call indirectly to solve generation ordering problem 4703 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4704 call(rax); 4705 // Caller pops the arguments (oop, message) and restores rax, r10 4706 BLOCK_COMMENT("} verify_oop"); 4707 } 4708 4709 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 4710 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 4711 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 4712 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 4713 vpternlogd(dst, 0xFF, dst, dst, vector_len); 4714 } else if (VM_Version::supports_avx()) { 4715 vpcmpeqd(dst, dst, dst, vector_len); 4716 } else { 4717 pcmpeqd(dst, dst); 4718 } 4719 } 4720 4721 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 4722 int extra_slot_offset) { 4723 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 4724 int stackElementSize = Interpreter::stackElementSize; 4725 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 4726 #ifdef ASSERT 4727 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 4728 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 4729 #endif 4730 Register scale_reg = noreg; 4731 Address::ScaleFactor scale_factor = Address::no_scale; 4732 if (arg_slot.is_constant()) { 4733 offset += arg_slot.as_constant() * stackElementSize; 4734 } else { 4735 scale_reg = arg_slot.as_register(); 4736 scale_factor = Address::times(stackElementSize); 4737 } 4738 offset += wordSize; // return PC is on stack 4739 return Address(rsp, scale_reg, scale_factor, offset); 4740 } 4741 4742 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 4743 if (!VerifyOops) return; 4744 4745 push(rscratch1); 4746 push(rax); // save rax, 4747 // addr may contain rsp so we will have to adjust it based on the push 4748 // we just did (and on 64 bit we do two pushes) 4749 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 4750 // stores rax into addr which is backwards of what was intended. 4751 if (addr.uses(rsp)) { 4752 lea(rax, addr); 4753 pushptr(Address(rax, 2 * BytesPerWord)); 4754 } else { 4755 pushptr(addr); 4756 } 4757 4758 // Pass register number to verify_oop_subroutine 4759 const char* b = nullptr; 4760 { 4761 ResourceMark rm; 4762 stringStream ss; 4763 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 4764 b = code_string(ss.as_string()); 4765 } 4766 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4767 pushptr(buffer.addr(), rscratch1); 4768 4769 // call indirectly to solve generation ordering problem 4770 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4771 call(rax); 4772 // Caller pops the arguments (addr, message) and restores rax, r10. 4773 } 4774 4775 void MacroAssembler::verify_tlab() { 4776 #ifdef ASSERT 4777 if (UseTLAB && VerifyOops) { 4778 Label next, ok; 4779 Register t1 = rsi; 4780 4781 push(t1); 4782 4783 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4784 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset()))); 4785 jcc(Assembler::aboveEqual, next); 4786 STOP("assert(top >= start)"); 4787 should_not_reach_here(); 4788 4789 bind(next); 4790 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 4791 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4792 jcc(Assembler::aboveEqual, ok); 4793 STOP("assert(top <= end)"); 4794 should_not_reach_here(); 4795 4796 bind(ok); 4797 pop(t1); 4798 } 4799 #endif 4800 } 4801 4802 class ControlWord { 4803 public: 4804 int32_t _value; 4805 4806 int rounding_control() const { return (_value >> 10) & 3 ; } 4807 int precision_control() const { return (_value >> 8) & 3 ; } 4808 bool precision() const { return ((_value >> 5) & 1) != 0; } 4809 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4810 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4811 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4812 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4813 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4814 4815 void print() const { 4816 // rounding control 4817 const char* rc; 4818 switch (rounding_control()) { 4819 case 0: rc = "round near"; break; 4820 case 1: rc = "round down"; break; 4821 case 2: rc = "round up "; break; 4822 case 3: rc = "chop "; break; 4823 default: 4824 rc = nullptr; // silence compiler warnings 4825 fatal("Unknown rounding control: %d", rounding_control()); 4826 }; 4827 // precision control 4828 const char* pc; 4829 switch (precision_control()) { 4830 case 0: pc = "24 bits "; break; 4831 case 1: pc = "reserved"; break; 4832 case 2: pc = "53 bits "; break; 4833 case 3: pc = "64 bits "; break; 4834 default: 4835 pc = nullptr; // silence compiler warnings 4836 fatal("Unknown precision control: %d", precision_control()); 4837 }; 4838 // flags 4839 char f[9]; 4840 f[0] = ' '; 4841 f[1] = ' '; 4842 f[2] = (precision ()) ? 'P' : 'p'; 4843 f[3] = (underflow ()) ? 'U' : 'u'; 4844 f[4] = (overflow ()) ? 'O' : 'o'; 4845 f[5] = (zero_divide ()) ? 'Z' : 'z'; 4846 f[6] = (denormalized()) ? 'D' : 'd'; 4847 f[7] = (invalid ()) ? 'I' : 'i'; 4848 f[8] = '\x0'; 4849 // output 4850 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 4851 } 4852 4853 }; 4854 4855 class StatusWord { 4856 public: 4857 int32_t _value; 4858 4859 bool busy() const { return ((_value >> 15) & 1) != 0; } 4860 bool C3() const { return ((_value >> 14) & 1) != 0; } 4861 bool C2() const { return ((_value >> 10) & 1) != 0; } 4862 bool C1() const { return ((_value >> 9) & 1) != 0; } 4863 bool C0() const { return ((_value >> 8) & 1) != 0; } 4864 int top() const { return (_value >> 11) & 7 ; } 4865 bool error_status() const { return ((_value >> 7) & 1) != 0; } 4866 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 4867 bool precision() const { return ((_value >> 5) & 1) != 0; } 4868 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4869 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4870 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4871 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4872 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4873 4874 void print() const { 4875 // condition codes 4876 char c[5]; 4877 c[0] = (C3()) ? '3' : '-'; 4878 c[1] = (C2()) ? '2' : '-'; 4879 c[2] = (C1()) ? '1' : '-'; 4880 c[3] = (C0()) ? '0' : '-'; 4881 c[4] = '\x0'; 4882 // flags 4883 char f[9]; 4884 f[0] = (error_status()) ? 'E' : '-'; 4885 f[1] = (stack_fault ()) ? 'S' : '-'; 4886 f[2] = (precision ()) ? 'P' : '-'; 4887 f[3] = (underflow ()) ? 'U' : '-'; 4888 f[4] = (overflow ()) ? 'O' : '-'; 4889 f[5] = (zero_divide ()) ? 'Z' : '-'; 4890 f[6] = (denormalized()) ? 'D' : '-'; 4891 f[7] = (invalid ()) ? 'I' : '-'; 4892 f[8] = '\x0'; 4893 // output 4894 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 4895 } 4896 4897 }; 4898 4899 class TagWord { 4900 public: 4901 int32_t _value; 4902 4903 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 4904 4905 void print() const { 4906 printf("%04x", _value & 0xFFFF); 4907 } 4908 4909 }; 4910 4911 class FPU_Register { 4912 public: 4913 int32_t _m0; 4914 int32_t _m1; 4915 int16_t _ex; 4916 4917 bool is_indefinite() const { 4918 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 4919 } 4920 4921 void print() const { 4922 char sign = (_ex < 0) ? '-' : '+'; 4923 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 4924 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 4925 }; 4926 4927 }; 4928 4929 class FPU_State { 4930 public: 4931 enum { 4932 register_size = 10, 4933 number_of_registers = 8, 4934 register_mask = 7 4935 }; 4936 4937 ControlWord _control_word; 4938 StatusWord _status_word; 4939 TagWord _tag_word; 4940 int32_t _error_offset; 4941 int32_t _error_selector; 4942 int32_t _data_offset; 4943 int32_t _data_selector; 4944 int8_t _register[register_size * number_of_registers]; 4945 4946 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 4947 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 4948 4949 const char* tag_as_string(int tag) const { 4950 switch (tag) { 4951 case 0: return "valid"; 4952 case 1: return "zero"; 4953 case 2: return "special"; 4954 case 3: return "empty"; 4955 } 4956 ShouldNotReachHere(); 4957 return nullptr; 4958 } 4959 4960 void print() const { 4961 // print computation registers 4962 { int t = _status_word.top(); 4963 for (int i = 0; i < number_of_registers; i++) { 4964 int j = (i - t) & register_mask; 4965 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 4966 st(j)->print(); 4967 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 4968 } 4969 } 4970 printf("\n"); 4971 // print control registers 4972 printf("ctrl = "); _control_word.print(); printf("\n"); 4973 printf("stat = "); _status_word .print(); printf("\n"); 4974 printf("tags = "); _tag_word .print(); printf("\n"); 4975 } 4976 4977 }; 4978 4979 class Flag_Register { 4980 public: 4981 int32_t _value; 4982 4983 bool overflow() const { return ((_value >> 11) & 1) != 0; } 4984 bool direction() const { return ((_value >> 10) & 1) != 0; } 4985 bool sign() const { return ((_value >> 7) & 1) != 0; } 4986 bool zero() const { return ((_value >> 6) & 1) != 0; } 4987 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 4988 bool parity() const { return ((_value >> 2) & 1) != 0; } 4989 bool carry() const { return ((_value >> 0) & 1) != 0; } 4990 4991 void print() const { 4992 // flags 4993 char f[8]; 4994 f[0] = (overflow ()) ? 'O' : '-'; 4995 f[1] = (direction ()) ? 'D' : '-'; 4996 f[2] = (sign ()) ? 'S' : '-'; 4997 f[3] = (zero ()) ? 'Z' : '-'; 4998 f[4] = (auxiliary_carry()) ? 'A' : '-'; 4999 f[5] = (parity ()) ? 'P' : '-'; 5000 f[6] = (carry ()) ? 'C' : '-'; 5001 f[7] = '\x0'; 5002 // output 5003 printf("%08x flags = %s", _value, f); 5004 } 5005 5006 }; 5007 5008 class IU_Register { 5009 public: 5010 int32_t _value; 5011 5012 void print() const { 5013 printf("%08x %11d", _value, _value); 5014 } 5015 5016 }; 5017 5018 class IU_State { 5019 public: 5020 Flag_Register _eflags; 5021 IU_Register _rdi; 5022 IU_Register _rsi; 5023 IU_Register _rbp; 5024 IU_Register _rsp; 5025 IU_Register _rbx; 5026 IU_Register _rdx; 5027 IU_Register _rcx; 5028 IU_Register _rax; 5029 5030 void print() const { 5031 // computation registers 5032 printf("rax, = "); _rax.print(); printf("\n"); 5033 printf("rbx, = "); _rbx.print(); printf("\n"); 5034 printf("rcx = "); _rcx.print(); printf("\n"); 5035 printf("rdx = "); _rdx.print(); printf("\n"); 5036 printf("rdi = "); _rdi.print(); printf("\n"); 5037 printf("rsi = "); _rsi.print(); printf("\n"); 5038 printf("rbp, = "); _rbp.print(); printf("\n"); 5039 printf("rsp = "); _rsp.print(); printf("\n"); 5040 printf("\n"); 5041 // control registers 5042 printf("flgs = "); _eflags.print(); printf("\n"); 5043 } 5044 }; 5045 5046 5047 class CPU_State { 5048 public: 5049 FPU_State _fpu_state; 5050 IU_State _iu_state; 5051 5052 void print() const { 5053 printf("--------------------------------------------------\n"); 5054 _iu_state .print(); 5055 printf("\n"); 5056 _fpu_state.print(); 5057 printf("--------------------------------------------------\n"); 5058 } 5059 5060 }; 5061 5062 5063 static void _print_CPU_state(CPU_State* state) { 5064 state->print(); 5065 }; 5066 5067 5068 void MacroAssembler::print_CPU_state() { 5069 push_CPU_state(); 5070 push(rsp); // pass CPU state 5071 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5072 addptr(rsp, wordSize); // discard argument 5073 pop_CPU_state(); 5074 } 5075 5076 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5077 // Either restore the MXCSR register after returning from the JNI Call 5078 // or verify that it wasn't changed (with -Xcheck:jni flag). 5079 if (VM_Version::supports_sse()) { 5080 if (RestoreMXCSROnJNICalls) { 5081 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5082 } else if (CheckJNICalls) { 5083 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5084 } 5085 } 5086 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5087 vzeroupper(); 5088 } 5089 5090 // ((OopHandle)result).resolve(); 5091 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5092 assert_different_registers(result, tmp); 5093 5094 // Only 64 bit platforms support GCs that require a tmp register 5095 // Only IN_HEAP loads require a thread_tmp register 5096 // OopHandle::resolve is an indirection like jobject. 5097 access_load_at(T_OBJECT, IN_NATIVE, 5098 result, Address(result, 0), tmp); 5099 } 5100 5101 // ((WeakHandle)result).resolve(); 5102 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5103 assert_different_registers(rresult, rtmp); 5104 Label resolved; 5105 5106 // A null weak handle resolves to null. 5107 cmpptr(rresult, 0); 5108 jcc(Assembler::equal, resolved); 5109 5110 // Only 64 bit platforms support GCs that require a tmp register 5111 // Only IN_HEAP loads require a thread_tmp register 5112 // WeakHandle::resolve is an indirection like jweak. 5113 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5114 rresult, Address(rresult, 0), rtmp); 5115 bind(resolved); 5116 } 5117 5118 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5119 // get mirror 5120 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5121 load_method_holder(mirror, method); 5122 movptr(mirror, Address(mirror, mirror_offset)); 5123 resolve_oop_handle(mirror, tmp); 5124 } 5125 5126 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5127 load_method_holder(rresult, rmethod); 5128 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5129 } 5130 5131 void MacroAssembler::load_method_holder(Register holder, Register method) { 5132 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5133 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5134 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5135 } 5136 5137 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5138 assert(UseCompactObjectHeaders, "expect compact object headers"); 5139 movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5140 shrq(dst, markWord::klass_shift); 5141 } 5142 5143 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5144 assert_different_registers(src, tmp); 5145 assert_different_registers(dst, tmp); 5146 5147 if (UseCompactObjectHeaders) { 5148 load_narrow_klass_compact(dst, src); 5149 decode_klass_not_null(dst, tmp); 5150 } else if (UseCompressedClassPointers) { 5151 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5152 decode_klass_not_null(dst, tmp); 5153 } else { 5154 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5155 } 5156 } 5157 5158 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5159 assert(!UseCompactObjectHeaders, "not with compact headers"); 5160 assert_different_registers(src, tmp); 5161 assert_different_registers(dst, tmp); 5162 if (UseCompressedClassPointers) { 5163 encode_klass_not_null(src, tmp); 5164 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5165 } else { 5166 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5167 } 5168 } 5169 5170 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { 5171 if (UseCompactObjectHeaders) { 5172 assert(tmp != noreg, "need tmp"); 5173 assert_different_registers(klass, obj, tmp); 5174 load_narrow_klass_compact(tmp, obj); 5175 cmpl(klass, tmp); 5176 } else if (UseCompressedClassPointers) { 5177 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5178 } else { 5179 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5180 } 5181 } 5182 5183 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5184 if (UseCompactObjectHeaders) { 5185 assert(tmp2 != noreg, "need tmp2"); 5186 assert_different_registers(obj1, obj2, tmp1, tmp2); 5187 load_narrow_klass_compact(tmp1, obj1); 5188 load_narrow_klass_compact(tmp2, obj2); 5189 cmpl(tmp1, tmp2); 5190 } else if (UseCompressedClassPointers) { 5191 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5192 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5193 } else { 5194 movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5195 cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5196 } 5197 } 5198 5199 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5200 Register tmp1) { 5201 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5202 decorators = AccessInternal::decorator_fixup(decorators, type); 5203 bool as_raw = (decorators & AS_RAW) != 0; 5204 if (as_raw) { 5205 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1); 5206 } else { 5207 bs->load_at(this, decorators, type, dst, src, tmp1); 5208 } 5209 } 5210 5211 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5212 Register tmp1, Register tmp2, Register tmp3) { 5213 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5214 decorators = AccessInternal::decorator_fixup(decorators, type); 5215 bool as_raw = (decorators & AS_RAW) != 0; 5216 if (as_raw) { 5217 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5218 } else { 5219 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5220 } 5221 } 5222 5223 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5224 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1); 5225 } 5226 5227 // Doesn't do verification, generates fixed size code 5228 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5229 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1); 5230 } 5231 5232 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5233 Register tmp2, Register tmp3, DecoratorSet decorators) { 5234 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5235 } 5236 5237 // Used for storing nulls. 5238 void MacroAssembler::store_heap_oop_null(Address dst) { 5239 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5240 } 5241 5242 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5243 assert(!UseCompactObjectHeaders, "Don't use with compact headers"); 5244 if (UseCompressedClassPointers) { 5245 // Store to klass gap in destination 5246 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5247 } 5248 } 5249 5250 #ifdef ASSERT 5251 void MacroAssembler::verify_heapbase(const char* msg) { 5252 assert (UseCompressedOops, "should be compressed"); 5253 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5254 if (CheckCompressedOops) { 5255 Label ok; 5256 ExternalAddress src2(CompressedOops::base_addr()); 5257 const bool is_src2_reachable = reachable(src2); 5258 if (!is_src2_reachable) { 5259 push(rscratch1); // cmpptr trashes rscratch1 5260 } 5261 cmpptr(r12_heapbase, src2, rscratch1); 5262 jcc(Assembler::equal, ok); 5263 STOP(msg); 5264 bind(ok); 5265 if (!is_src2_reachable) { 5266 pop(rscratch1); 5267 } 5268 } 5269 } 5270 #endif 5271 5272 // Algorithm must match oop.inline.hpp encode_heap_oop. 5273 void MacroAssembler::encode_heap_oop(Register r) { 5274 #ifdef ASSERT 5275 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5276 #endif 5277 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5278 if (CompressedOops::base() == nullptr) { 5279 if (CompressedOops::shift() != 0) { 5280 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5281 shrq(r, LogMinObjAlignmentInBytes); 5282 } 5283 return; 5284 } 5285 testq(r, r); 5286 cmovq(Assembler::equal, r, r12_heapbase); 5287 subq(r, r12_heapbase); 5288 shrq(r, LogMinObjAlignmentInBytes); 5289 } 5290 5291 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5292 #ifdef ASSERT 5293 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5294 if (CheckCompressedOops) { 5295 Label ok; 5296 testq(r, r); 5297 jcc(Assembler::notEqual, ok); 5298 STOP("null oop passed to encode_heap_oop_not_null"); 5299 bind(ok); 5300 } 5301 #endif 5302 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5303 if (CompressedOops::base() != nullptr) { 5304 subq(r, r12_heapbase); 5305 } 5306 if (CompressedOops::shift() != 0) { 5307 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5308 shrq(r, LogMinObjAlignmentInBytes); 5309 } 5310 } 5311 5312 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5313 #ifdef ASSERT 5314 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5315 if (CheckCompressedOops) { 5316 Label ok; 5317 testq(src, src); 5318 jcc(Assembler::notEqual, ok); 5319 STOP("null oop passed to encode_heap_oop_not_null2"); 5320 bind(ok); 5321 } 5322 #endif 5323 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5324 if (dst != src) { 5325 movq(dst, src); 5326 } 5327 if (CompressedOops::base() != nullptr) { 5328 subq(dst, r12_heapbase); 5329 } 5330 if (CompressedOops::shift() != 0) { 5331 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5332 shrq(dst, LogMinObjAlignmentInBytes); 5333 } 5334 } 5335 5336 void MacroAssembler::decode_heap_oop(Register r) { 5337 #ifdef ASSERT 5338 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5339 #endif 5340 if (CompressedOops::base() == nullptr) { 5341 if (CompressedOops::shift() != 0) { 5342 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5343 shlq(r, LogMinObjAlignmentInBytes); 5344 } 5345 } else { 5346 Label done; 5347 shlq(r, LogMinObjAlignmentInBytes); 5348 jccb(Assembler::equal, done); 5349 addq(r, r12_heapbase); 5350 bind(done); 5351 } 5352 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5353 } 5354 5355 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5356 // Note: it will change flags 5357 assert (UseCompressedOops, "should only be used for compressed headers"); 5358 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5359 // Cannot assert, unverified entry point counts instructions (see .ad file) 5360 // vtableStubs also counts instructions in pd_code_size_limit. 5361 // Also do not verify_oop as this is called by verify_oop. 5362 if (CompressedOops::shift() != 0) { 5363 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5364 shlq(r, LogMinObjAlignmentInBytes); 5365 if (CompressedOops::base() != nullptr) { 5366 addq(r, r12_heapbase); 5367 } 5368 } else { 5369 assert (CompressedOops::base() == nullptr, "sanity"); 5370 } 5371 } 5372 5373 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5374 // Note: it will change flags 5375 assert (UseCompressedOops, "should only be used for compressed headers"); 5376 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5377 // Cannot assert, unverified entry point counts instructions (see .ad file) 5378 // vtableStubs also counts instructions in pd_code_size_limit. 5379 // Also do not verify_oop as this is called by verify_oop. 5380 if (CompressedOops::shift() != 0) { 5381 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5382 if (LogMinObjAlignmentInBytes == Address::times_8) { 5383 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5384 } else { 5385 if (dst != src) { 5386 movq(dst, src); 5387 } 5388 shlq(dst, LogMinObjAlignmentInBytes); 5389 if (CompressedOops::base() != nullptr) { 5390 addq(dst, r12_heapbase); 5391 } 5392 } 5393 } else { 5394 assert (CompressedOops::base() == nullptr, "sanity"); 5395 if (dst != src) { 5396 movq(dst, src); 5397 } 5398 } 5399 } 5400 5401 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5402 BLOCK_COMMENT("encode_klass_not_null {"); 5403 assert_different_registers(r, tmp); 5404 if (CompressedKlassPointers::base() != nullptr) { 5405 if (AOTCodeCache::is_on_for_dump()) { 5406 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5407 } else { 5408 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5409 } 5410 subq(r, tmp); 5411 } 5412 if (CompressedKlassPointers::shift() != 0) { 5413 shrq(r, CompressedKlassPointers::shift()); 5414 } 5415 BLOCK_COMMENT("} encode_klass_not_null"); 5416 } 5417 5418 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5419 BLOCK_COMMENT("encode_and_move_klass_not_null {"); 5420 assert_different_registers(src, dst); 5421 if (CompressedKlassPointers::base() != nullptr) { 5422 movptr(dst, -(intptr_t)CompressedKlassPointers::base()); 5423 addq(dst, src); 5424 } else { 5425 movptr(dst, src); 5426 } 5427 if (CompressedKlassPointers::shift() != 0) { 5428 shrq(dst, CompressedKlassPointers::shift()); 5429 } 5430 BLOCK_COMMENT("} encode_and_move_klass_not_null"); 5431 } 5432 5433 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5434 BLOCK_COMMENT("decode_klass_not_null {"); 5435 assert_different_registers(r, tmp); 5436 // Note: it will change flags 5437 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5438 // Cannot assert, unverified entry point counts instructions (see .ad file) 5439 // vtableStubs also counts instructions in pd_code_size_limit. 5440 // Also do not verify_oop as this is called by verify_oop. 5441 if (CompressedKlassPointers::shift() != 0) { 5442 shlq(r, CompressedKlassPointers::shift()); 5443 } 5444 if (CompressedKlassPointers::base() != nullptr) { 5445 if (AOTCodeCache::is_on_for_dump()) { 5446 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5447 } else { 5448 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5449 } 5450 addq(r, tmp); 5451 } 5452 BLOCK_COMMENT("} decode_klass_not_null"); 5453 } 5454 5455 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5456 BLOCK_COMMENT("decode_and_move_klass_not_null {"); 5457 assert_different_registers(src, dst); 5458 // Note: it will change flags 5459 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5460 // Cannot assert, unverified entry point counts instructions (see .ad file) 5461 // vtableStubs also counts instructions in pd_code_size_limit. 5462 // Also do not verify_oop as this is called by verify_oop. 5463 5464 if (CompressedKlassPointers::base() == nullptr && 5465 CompressedKlassPointers::shift() == 0) { 5466 // The best case scenario is that there is no base or shift. Then it is already 5467 // a pointer that needs nothing but a register rename. 5468 movl(dst, src); 5469 } else { 5470 if (CompressedKlassPointers::shift() <= Address::times_8) { 5471 if (CompressedKlassPointers::base() != nullptr) { 5472 movptr(dst, (intptr_t)CompressedKlassPointers::base()); 5473 } else { 5474 xorq(dst, dst); 5475 } 5476 if (CompressedKlassPointers::shift() != 0) { 5477 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?"); 5478 leaq(dst, Address(dst, src, Address::times_8, 0)); 5479 } else { 5480 addq(dst, src); 5481 } 5482 } else { 5483 if (CompressedKlassPointers::base() != nullptr) { 5484 const intptr_t base_right_shifted = 5485 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5486 movptr(dst, base_right_shifted); 5487 } else { 5488 xorq(dst, dst); 5489 } 5490 addq(dst, src); 5491 shlq(dst, CompressedKlassPointers::shift()); 5492 } 5493 } 5494 BLOCK_COMMENT("} decode_and_move_klass_not_null"); 5495 } 5496 5497 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5498 assert (UseCompressedOops, "should only be used for compressed headers"); 5499 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5500 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5501 int oop_index = oop_recorder()->find_index(obj); 5502 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5503 mov_narrow_oop(dst, oop_index, rspec); 5504 } 5505 5506 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5507 assert (UseCompressedOops, "should only be used for compressed headers"); 5508 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5509 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5510 int oop_index = oop_recorder()->find_index(obj); 5511 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5512 mov_narrow_oop(dst, oop_index, rspec); 5513 } 5514 5515 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5516 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5517 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5518 int klass_index = oop_recorder()->find_index(k); 5519 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5520 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5521 } 5522 5523 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 5524 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5525 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5526 int klass_index = oop_recorder()->find_index(k); 5527 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5528 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5529 } 5530 5531 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 5532 assert (UseCompressedOops, "should only be used for compressed headers"); 5533 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5534 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5535 int oop_index = oop_recorder()->find_index(obj); 5536 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5537 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5538 } 5539 5540 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 5541 assert (UseCompressedOops, "should only be used for compressed headers"); 5542 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5543 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5544 int oop_index = oop_recorder()->find_index(obj); 5545 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5546 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5547 } 5548 5549 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 5550 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5551 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5552 int klass_index = oop_recorder()->find_index(k); 5553 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5554 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5555 } 5556 5557 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 5558 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5559 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5560 int klass_index = oop_recorder()->find_index(k); 5561 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5562 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5563 } 5564 5565 void MacroAssembler::reinit_heapbase() { 5566 if (UseCompressedOops) { 5567 if (Universe::heap() != nullptr) { 5568 if (CompressedOops::base() == nullptr) { 5569 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 5570 } else { 5571 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 5572 } 5573 } else { 5574 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 5575 } 5576 } 5577 } 5578 5579 #if COMPILER2_OR_JVMCI 5580 5581 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 5582 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5583 // cnt - number of qwords (8-byte words). 5584 // base - start address, qword aligned. 5585 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 5586 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 5587 if (use64byteVector) { 5588 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 5589 } else if (MaxVectorSize >= 32) { 5590 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 5591 } else { 5592 pxor(xtmp, xtmp); 5593 } 5594 jmp(L_zero_64_bytes); 5595 5596 BIND(L_loop); 5597 if (MaxVectorSize >= 32) { 5598 fill64(base, 0, xtmp, use64byteVector); 5599 } else { 5600 movdqu(Address(base, 0), xtmp); 5601 movdqu(Address(base, 16), xtmp); 5602 movdqu(Address(base, 32), xtmp); 5603 movdqu(Address(base, 48), xtmp); 5604 } 5605 addptr(base, 64); 5606 5607 BIND(L_zero_64_bytes); 5608 subptr(cnt, 8); 5609 jccb(Assembler::greaterEqual, L_loop); 5610 5611 // Copy trailing 64 bytes 5612 if (use64byteVector) { 5613 addptr(cnt, 8); 5614 jccb(Assembler::equal, L_end); 5615 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 5616 jmp(L_end); 5617 } else { 5618 addptr(cnt, 4); 5619 jccb(Assembler::less, L_tail); 5620 if (MaxVectorSize >= 32) { 5621 vmovdqu(Address(base, 0), xtmp); 5622 } else { 5623 movdqu(Address(base, 0), xtmp); 5624 movdqu(Address(base, 16), xtmp); 5625 } 5626 } 5627 addptr(base, 32); 5628 subptr(cnt, 4); 5629 5630 BIND(L_tail); 5631 addptr(cnt, 4); 5632 jccb(Assembler::lessEqual, L_end); 5633 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 5634 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 5635 } else { 5636 decrement(cnt); 5637 5638 BIND(L_sloop); 5639 movq(Address(base, 0), xtmp); 5640 addptr(base, 8); 5641 decrement(cnt); 5642 jccb(Assembler::greaterEqual, L_sloop); 5643 } 5644 BIND(L_end); 5645 } 5646 5647 // Clearing constant sized memory using YMM/ZMM registers. 5648 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5649 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 5650 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 5651 5652 int vector64_count = (cnt & (~0x7)) >> 3; 5653 cnt = cnt & 0x7; 5654 const int fill64_per_loop = 4; 5655 const int max_unrolled_fill64 = 8; 5656 5657 // 64 byte initialization loop. 5658 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 5659 int start64 = 0; 5660 if (vector64_count > max_unrolled_fill64) { 5661 Label LOOP; 5662 Register index = rtmp; 5663 5664 start64 = vector64_count - (vector64_count % fill64_per_loop); 5665 5666 movl(index, 0); 5667 BIND(LOOP); 5668 for (int i = 0; i < fill64_per_loop; i++) { 5669 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 5670 } 5671 addl(index, fill64_per_loop * 64); 5672 cmpl(index, start64 * 64); 5673 jccb(Assembler::less, LOOP); 5674 } 5675 for (int i = start64; i < vector64_count; i++) { 5676 fill64(base, i * 64, xtmp, use64byteVector); 5677 } 5678 5679 // Clear remaining 64 byte tail. 5680 int disp = vector64_count * 64; 5681 if (cnt) { 5682 switch (cnt) { 5683 case 1: 5684 movq(Address(base, disp), xtmp); 5685 break; 5686 case 2: 5687 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 5688 break; 5689 case 3: 5690 movl(rtmp, 0x7); 5691 kmovwl(mask, rtmp); 5692 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 5693 break; 5694 case 4: 5695 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5696 break; 5697 case 5: 5698 if (use64byteVector) { 5699 movl(rtmp, 0x1F); 5700 kmovwl(mask, rtmp); 5701 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5702 } else { 5703 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5704 movq(Address(base, disp + 32), xtmp); 5705 } 5706 break; 5707 case 6: 5708 if (use64byteVector) { 5709 movl(rtmp, 0x3F); 5710 kmovwl(mask, rtmp); 5711 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5712 } else { 5713 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5714 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 5715 } 5716 break; 5717 case 7: 5718 if (use64byteVector) { 5719 movl(rtmp, 0x7F); 5720 kmovwl(mask, rtmp); 5721 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5722 } else { 5723 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5724 movl(rtmp, 0x7); 5725 kmovwl(mask, rtmp); 5726 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 5727 } 5728 break; 5729 default: 5730 fatal("Unexpected length : %d\n",cnt); 5731 break; 5732 } 5733 } 5734 } 5735 5736 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 5737 bool is_large, KRegister mask) { 5738 // cnt - number of qwords (8-byte words). 5739 // base - start address, qword aligned. 5740 // is_large - if optimizers know cnt is larger than InitArrayShortSize 5741 assert(base==rdi, "base register must be edi for rep stos"); 5742 assert(tmp==rax, "tmp register must be eax for rep stos"); 5743 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 5744 assert(InitArrayShortSize % BytesPerLong == 0, 5745 "InitArrayShortSize should be the multiple of BytesPerLong"); 5746 5747 Label DONE; 5748 if (!is_large || !UseXMMForObjInit) { 5749 xorptr(tmp, tmp); 5750 } 5751 5752 if (!is_large) { 5753 Label LOOP, LONG; 5754 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 5755 jccb(Assembler::greater, LONG); 5756 5757 decrement(cnt); 5758 jccb(Assembler::negative, DONE); // Zero length 5759 5760 // Use individual pointer-sized stores for small counts: 5761 BIND(LOOP); 5762 movptr(Address(base, cnt, Address::times_ptr), tmp); 5763 decrement(cnt); 5764 jccb(Assembler::greaterEqual, LOOP); 5765 jmpb(DONE); 5766 5767 BIND(LONG); 5768 } 5769 5770 // Use longer rep-prefixed ops for non-small counts: 5771 if (UseFastStosb) { 5772 shlptr(cnt, 3); // convert to number of bytes 5773 rep_stosb(); 5774 } else if (UseXMMForObjInit) { 5775 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 5776 } else { 5777 rep_stos(); 5778 } 5779 5780 BIND(DONE); 5781 } 5782 5783 #endif //COMPILER2_OR_JVMCI 5784 5785 5786 void MacroAssembler::generate_fill(BasicType t, bool aligned, 5787 Register to, Register value, Register count, 5788 Register rtmp, XMMRegister xtmp) { 5789 ShortBranchVerifier sbv(this); 5790 assert_different_registers(to, value, count, rtmp); 5791 Label L_exit; 5792 Label L_fill_2_bytes, L_fill_4_bytes; 5793 5794 #if defined(COMPILER2) 5795 if(MaxVectorSize >=32 && 5796 VM_Version::supports_avx512vlbw() && 5797 VM_Version::supports_bmi2()) { 5798 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 5799 return; 5800 } 5801 #endif 5802 5803 int shift = -1; 5804 switch (t) { 5805 case T_BYTE: 5806 shift = 2; 5807 break; 5808 case T_SHORT: 5809 shift = 1; 5810 break; 5811 case T_INT: 5812 shift = 0; 5813 break; 5814 default: ShouldNotReachHere(); 5815 } 5816 5817 if (t == T_BYTE) { 5818 andl(value, 0xff); 5819 movl(rtmp, value); 5820 shll(rtmp, 8); 5821 orl(value, rtmp); 5822 } 5823 if (t == T_SHORT) { 5824 andl(value, 0xffff); 5825 } 5826 if (t == T_BYTE || t == T_SHORT) { 5827 movl(rtmp, value); 5828 shll(rtmp, 16); 5829 orl(value, rtmp); 5830 } 5831 5832 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 5833 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 5834 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 5835 Label L_skip_align2; 5836 // align source address at 4 bytes address boundary 5837 if (t == T_BYTE) { 5838 Label L_skip_align1; 5839 // One byte misalignment happens only for byte arrays 5840 testptr(to, 1); 5841 jccb(Assembler::zero, L_skip_align1); 5842 movb(Address(to, 0), value); 5843 increment(to); 5844 decrement(count); 5845 BIND(L_skip_align1); 5846 } 5847 // Two bytes misalignment happens only for byte and short (char) arrays 5848 testptr(to, 2); 5849 jccb(Assembler::zero, L_skip_align2); 5850 movw(Address(to, 0), value); 5851 addptr(to, 2); 5852 subptr(count, 1<<(shift-1)); 5853 BIND(L_skip_align2); 5854 } 5855 { 5856 Label L_fill_32_bytes; 5857 if (!UseUnalignedLoadStores) { 5858 // align to 8 bytes, we know we are 4 byte aligned to start 5859 testptr(to, 4); 5860 jccb(Assembler::zero, L_fill_32_bytes); 5861 movl(Address(to, 0), value); 5862 addptr(to, 4); 5863 subptr(count, 1<<shift); 5864 } 5865 BIND(L_fill_32_bytes); 5866 { 5867 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 5868 movdl(xtmp, value); 5869 if (UseAVX >= 2 && UseUnalignedLoadStores) { 5870 Label L_check_fill_32_bytes; 5871 if (UseAVX > 2) { 5872 // Fill 64-byte chunks 5873 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 5874 5875 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 5876 cmpptr(count, VM_Version::avx3_threshold()); 5877 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 5878 5879 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 5880 5881 subptr(count, 16 << shift); 5882 jccb(Assembler::less, L_check_fill_32_bytes); 5883 align(16); 5884 5885 BIND(L_fill_64_bytes_loop_avx3); 5886 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 5887 addptr(to, 64); 5888 subptr(count, 16 << shift); 5889 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 5890 jmpb(L_check_fill_32_bytes); 5891 5892 BIND(L_check_fill_64_bytes_avx2); 5893 } 5894 // Fill 64-byte chunks 5895 Label L_fill_64_bytes_loop; 5896 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 5897 5898 subptr(count, 16 << shift); 5899 jcc(Assembler::less, L_check_fill_32_bytes); 5900 align(16); 5901 5902 BIND(L_fill_64_bytes_loop); 5903 vmovdqu(Address(to, 0), xtmp); 5904 vmovdqu(Address(to, 32), xtmp); 5905 addptr(to, 64); 5906 subptr(count, 16 << shift); 5907 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 5908 5909 BIND(L_check_fill_32_bytes); 5910 addptr(count, 8 << shift); 5911 jccb(Assembler::less, L_check_fill_8_bytes); 5912 vmovdqu(Address(to, 0), xtmp); 5913 addptr(to, 32); 5914 subptr(count, 8 << shift); 5915 5916 BIND(L_check_fill_8_bytes); 5917 // clean upper bits of YMM registers 5918 movdl(xtmp, value); 5919 pshufd(xtmp, xtmp, 0); 5920 } else { 5921 // Fill 32-byte chunks 5922 pshufd(xtmp, xtmp, 0); 5923 5924 subptr(count, 8 << shift); 5925 jcc(Assembler::less, L_check_fill_8_bytes); 5926 align(16); 5927 5928 BIND(L_fill_32_bytes_loop); 5929 5930 if (UseUnalignedLoadStores) { 5931 movdqu(Address(to, 0), xtmp); 5932 movdqu(Address(to, 16), xtmp); 5933 } else { 5934 movq(Address(to, 0), xtmp); 5935 movq(Address(to, 8), xtmp); 5936 movq(Address(to, 16), xtmp); 5937 movq(Address(to, 24), xtmp); 5938 } 5939 5940 addptr(to, 32); 5941 subptr(count, 8 << shift); 5942 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 5943 5944 BIND(L_check_fill_8_bytes); 5945 } 5946 addptr(count, 8 << shift); 5947 jccb(Assembler::zero, L_exit); 5948 jmpb(L_fill_8_bytes); 5949 5950 // 5951 // length is too short, just fill qwords 5952 // 5953 BIND(L_fill_8_bytes_loop); 5954 movq(Address(to, 0), xtmp); 5955 addptr(to, 8); 5956 BIND(L_fill_8_bytes); 5957 subptr(count, 1 << (shift + 1)); 5958 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 5959 } 5960 } 5961 // fill trailing 4 bytes 5962 BIND(L_fill_4_bytes); 5963 testl(count, 1<<shift); 5964 jccb(Assembler::zero, L_fill_2_bytes); 5965 movl(Address(to, 0), value); 5966 if (t == T_BYTE || t == T_SHORT) { 5967 Label L_fill_byte; 5968 addptr(to, 4); 5969 BIND(L_fill_2_bytes); 5970 // fill trailing 2 bytes 5971 testl(count, 1<<(shift-1)); 5972 jccb(Assembler::zero, L_fill_byte); 5973 movw(Address(to, 0), value); 5974 if (t == T_BYTE) { 5975 addptr(to, 2); 5976 BIND(L_fill_byte); 5977 // fill trailing byte 5978 testl(count, 1); 5979 jccb(Assembler::zero, L_exit); 5980 movb(Address(to, 0), value); 5981 } else { 5982 BIND(L_fill_byte); 5983 } 5984 } else { 5985 BIND(L_fill_2_bytes); 5986 } 5987 BIND(L_exit); 5988 } 5989 5990 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 5991 switch(type) { 5992 case T_BYTE: 5993 case T_BOOLEAN: 5994 evpbroadcastb(dst, src, vector_len); 5995 break; 5996 case T_SHORT: 5997 case T_CHAR: 5998 evpbroadcastw(dst, src, vector_len); 5999 break; 6000 case T_INT: 6001 case T_FLOAT: 6002 evpbroadcastd(dst, src, vector_len); 6003 break; 6004 case T_LONG: 6005 case T_DOUBLE: 6006 evpbroadcastq(dst, src, vector_len); 6007 break; 6008 default: 6009 fatal("Unhandled type : %s", type2name(type)); 6010 break; 6011 } 6012 } 6013 6014 // encode char[] to byte[] in ISO_8859_1 or ASCII 6015 //@IntrinsicCandidate 6016 //private static int implEncodeISOArray(byte[] sa, int sp, 6017 //byte[] da, int dp, int len) { 6018 // int i = 0; 6019 // for (; i < len; i++) { 6020 // char c = StringUTF16.getChar(sa, sp++); 6021 // if (c > '\u00FF') 6022 // break; 6023 // da[dp++] = (byte)c; 6024 // } 6025 // return i; 6026 //} 6027 // 6028 //@IntrinsicCandidate 6029 //private static int implEncodeAsciiArray(char[] sa, int sp, 6030 // byte[] da, int dp, int len) { 6031 // int i = 0; 6032 // for (; i < len; i++) { 6033 // char c = sa[sp++]; 6034 // if (c >= '\u0080') 6035 // break; 6036 // da[dp++] = (byte)c; 6037 // } 6038 // return i; 6039 //} 6040 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6041 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6042 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6043 Register tmp5, Register result, bool ascii) { 6044 6045 // rsi: src 6046 // rdi: dst 6047 // rdx: len 6048 // rcx: tmp5 6049 // rax: result 6050 ShortBranchVerifier sbv(this); 6051 assert_different_registers(src, dst, len, tmp5, result); 6052 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6053 6054 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6055 int short_mask = ascii ? 0xff80 : 0xff00; 6056 6057 // set result 6058 xorl(result, result); 6059 // check for zero length 6060 testl(len, len); 6061 jcc(Assembler::zero, L_done); 6062 6063 movl(result, len); 6064 6065 // Setup pointers 6066 lea(src, Address(src, len, Address::times_2)); // char[] 6067 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6068 negptr(len); 6069 6070 if (UseSSE42Intrinsics || UseAVX >= 2) { 6071 Label L_copy_8_chars, L_copy_8_chars_exit; 6072 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6073 6074 if (UseAVX >= 2) { 6075 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6076 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6077 movdl(tmp1Reg, tmp5); 6078 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6079 jmp(L_chars_32_check); 6080 6081 bind(L_copy_32_chars); 6082 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6083 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6084 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6085 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6086 jccb(Assembler::notZero, L_copy_32_chars_exit); 6087 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6088 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6089 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6090 6091 bind(L_chars_32_check); 6092 addptr(len, 32); 6093 jcc(Assembler::lessEqual, L_copy_32_chars); 6094 6095 bind(L_copy_32_chars_exit); 6096 subptr(len, 16); 6097 jccb(Assembler::greater, L_copy_16_chars_exit); 6098 6099 } else if (UseSSE42Intrinsics) { 6100 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6101 movdl(tmp1Reg, tmp5); 6102 pshufd(tmp1Reg, tmp1Reg, 0); 6103 jmpb(L_chars_16_check); 6104 } 6105 6106 bind(L_copy_16_chars); 6107 if (UseAVX >= 2) { 6108 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6109 vptest(tmp2Reg, tmp1Reg); 6110 jcc(Assembler::notZero, L_copy_16_chars_exit); 6111 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6112 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6113 } else { 6114 if (UseAVX > 0) { 6115 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6116 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6117 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6118 } else { 6119 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6120 por(tmp2Reg, tmp3Reg); 6121 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6122 por(tmp2Reg, tmp4Reg); 6123 } 6124 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6125 jccb(Assembler::notZero, L_copy_16_chars_exit); 6126 packuswb(tmp3Reg, tmp4Reg); 6127 } 6128 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6129 6130 bind(L_chars_16_check); 6131 addptr(len, 16); 6132 jcc(Assembler::lessEqual, L_copy_16_chars); 6133 6134 bind(L_copy_16_chars_exit); 6135 if (UseAVX >= 2) { 6136 // clean upper bits of YMM registers 6137 vpxor(tmp2Reg, tmp2Reg); 6138 vpxor(tmp3Reg, tmp3Reg); 6139 vpxor(tmp4Reg, tmp4Reg); 6140 movdl(tmp1Reg, tmp5); 6141 pshufd(tmp1Reg, tmp1Reg, 0); 6142 } 6143 subptr(len, 8); 6144 jccb(Assembler::greater, L_copy_8_chars_exit); 6145 6146 bind(L_copy_8_chars); 6147 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6148 ptest(tmp3Reg, tmp1Reg); 6149 jccb(Assembler::notZero, L_copy_8_chars_exit); 6150 packuswb(tmp3Reg, tmp1Reg); 6151 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6152 addptr(len, 8); 6153 jccb(Assembler::lessEqual, L_copy_8_chars); 6154 6155 bind(L_copy_8_chars_exit); 6156 subptr(len, 8); 6157 jccb(Assembler::zero, L_done); 6158 } 6159 6160 bind(L_copy_1_char); 6161 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6162 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6163 jccb(Assembler::notZero, L_copy_1_char_exit); 6164 movb(Address(dst, len, Address::times_1, 0), tmp5); 6165 addptr(len, 1); 6166 jccb(Assembler::less, L_copy_1_char); 6167 6168 bind(L_copy_1_char_exit); 6169 addptr(result, len); // len is negative count of not processed elements 6170 6171 bind(L_done); 6172 } 6173 6174 /** 6175 * Helper for multiply_to_len(). 6176 */ 6177 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6178 addq(dest_lo, src1); 6179 adcq(dest_hi, 0); 6180 addq(dest_lo, src2); 6181 adcq(dest_hi, 0); 6182 } 6183 6184 /** 6185 * Multiply 64 bit by 64 bit first loop. 6186 */ 6187 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6188 Register y, Register y_idx, Register z, 6189 Register carry, Register product, 6190 Register idx, Register kdx) { 6191 // 6192 // jlong carry, x[], y[], z[]; 6193 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6194 // huge_128 product = y[idx] * x[xstart] + carry; 6195 // z[kdx] = (jlong)product; 6196 // carry = (jlong)(product >>> 64); 6197 // } 6198 // z[xstart] = carry; 6199 // 6200 6201 Label L_first_loop, L_first_loop_exit; 6202 Label L_one_x, L_one_y, L_multiply; 6203 6204 decrementl(xstart); 6205 jcc(Assembler::negative, L_one_x); 6206 6207 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6208 rorq(x_xstart, 32); // convert big-endian to little-endian 6209 6210 bind(L_first_loop); 6211 decrementl(idx); 6212 jcc(Assembler::negative, L_first_loop_exit); 6213 decrementl(idx); 6214 jcc(Assembler::negative, L_one_y); 6215 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6216 rorq(y_idx, 32); // convert big-endian to little-endian 6217 bind(L_multiply); 6218 movq(product, x_xstart); 6219 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6220 addq(product, carry); 6221 adcq(rdx, 0); 6222 subl(kdx, 2); 6223 movl(Address(z, kdx, Address::times_4, 4), product); 6224 shrq(product, 32); 6225 movl(Address(z, kdx, Address::times_4, 0), product); 6226 movq(carry, rdx); 6227 jmp(L_first_loop); 6228 6229 bind(L_one_y); 6230 movl(y_idx, Address(y, 0)); 6231 jmp(L_multiply); 6232 6233 bind(L_one_x); 6234 movl(x_xstart, Address(x, 0)); 6235 jmp(L_first_loop); 6236 6237 bind(L_first_loop_exit); 6238 } 6239 6240 /** 6241 * Multiply 64 bit by 64 bit and add 128 bit. 6242 */ 6243 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6244 Register yz_idx, Register idx, 6245 Register carry, Register product, int offset) { 6246 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6247 // z[kdx] = (jlong)product; 6248 6249 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6250 rorq(yz_idx, 32); // convert big-endian to little-endian 6251 movq(product, x_xstart); 6252 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6253 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6254 rorq(yz_idx, 32); // convert big-endian to little-endian 6255 6256 add2_with_carry(rdx, product, carry, yz_idx); 6257 6258 movl(Address(z, idx, Address::times_4, offset+4), product); 6259 shrq(product, 32); 6260 movl(Address(z, idx, Address::times_4, offset), product); 6261 6262 } 6263 6264 /** 6265 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6266 */ 6267 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6268 Register yz_idx, Register idx, Register jdx, 6269 Register carry, Register product, 6270 Register carry2) { 6271 // jlong carry, x[], y[], z[]; 6272 // int kdx = ystart+1; 6273 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6274 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6275 // z[kdx+idx+1] = (jlong)product; 6276 // jlong carry2 = (jlong)(product >>> 64); 6277 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6278 // z[kdx+idx] = (jlong)product; 6279 // carry = (jlong)(product >>> 64); 6280 // } 6281 // idx += 2; 6282 // if (idx > 0) { 6283 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6284 // z[kdx+idx] = (jlong)product; 6285 // carry = (jlong)(product >>> 64); 6286 // } 6287 // 6288 6289 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6290 6291 movl(jdx, idx); 6292 andl(jdx, 0xFFFFFFFC); 6293 shrl(jdx, 2); 6294 6295 bind(L_third_loop); 6296 subl(jdx, 1); 6297 jcc(Assembler::negative, L_third_loop_exit); 6298 subl(idx, 4); 6299 6300 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6301 movq(carry2, rdx); 6302 6303 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6304 movq(carry, rdx); 6305 jmp(L_third_loop); 6306 6307 bind (L_third_loop_exit); 6308 6309 andl (idx, 0x3); 6310 jcc(Assembler::zero, L_post_third_loop_done); 6311 6312 Label L_check_1; 6313 subl(idx, 2); 6314 jcc(Assembler::negative, L_check_1); 6315 6316 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6317 movq(carry, rdx); 6318 6319 bind (L_check_1); 6320 addl (idx, 0x2); 6321 andl (idx, 0x1); 6322 subl(idx, 1); 6323 jcc(Assembler::negative, L_post_third_loop_done); 6324 6325 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6326 movq(product, x_xstart); 6327 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6328 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6329 6330 add2_with_carry(rdx, product, yz_idx, carry); 6331 6332 movl(Address(z, idx, Address::times_4, 0), product); 6333 shrq(product, 32); 6334 6335 shlq(rdx, 32); 6336 orq(product, rdx); 6337 movq(carry, product); 6338 6339 bind(L_post_third_loop_done); 6340 } 6341 6342 /** 6343 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6344 * 6345 */ 6346 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6347 Register carry, Register carry2, 6348 Register idx, Register jdx, 6349 Register yz_idx1, Register yz_idx2, 6350 Register tmp, Register tmp3, Register tmp4) { 6351 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6352 6353 // jlong carry, x[], y[], z[]; 6354 // int kdx = ystart+1; 6355 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6356 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6357 // jlong carry2 = (jlong)(tmp3 >>> 64); 6358 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6359 // carry = (jlong)(tmp4 >>> 64); 6360 // z[kdx+idx+1] = (jlong)tmp3; 6361 // z[kdx+idx] = (jlong)tmp4; 6362 // } 6363 // idx += 2; 6364 // if (idx > 0) { 6365 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6366 // z[kdx+idx] = (jlong)yz_idx1; 6367 // carry = (jlong)(yz_idx1 >>> 64); 6368 // } 6369 // 6370 6371 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6372 6373 movl(jdx, idx); 6374 andl(jdx, 0xFFFFFFFC); 6375 shrl(jdx, 2); 6376 6377 bind(L_third_loop); 6378 subl(jdx, 1); 6379 jcc(Assembler::negative, L_third_loop_exit); 6380 subl(idx, 4); 6381 6382 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6383 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6384 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6385 rorxq(yz_idx2, yz_idx2, 32); 6386 6387 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6388 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6389 6390 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6391 rorxq(yz_idx1, yz_idx1, 32); 6392 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6393 rorxq(yz_idx2, yz_idx2, 32); 6394 6395 if (VM_Version::supports_adx()) { 6396 adcxq(tmp3, carry); 6397 adoxq(tmp3, yz_idx1); 6398 6399 adcxq(tmp4, tmp); 6400 adoxq(tmp4, yz_idx2); 6401 6402 movl(carry, 0); // does not affect flags 6403 adcxq(carry2, carry); 6404 adoxq(carry2, carry); 6405 } else { 6406 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6407 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6408 } 6409 movq(carry, carry2); 6410 6411 movl(Address(z, idx, Address::times_4, 12), tmp3); 6412 shrq(tmp3, 32); 6413 movl(Address(z, idx, Address::times_4, 8), tmp3); 6414 6415 movl(Address(z, idx, Address::times_4, 4), tmp4); 6416 shrq(tmp4, 32); 6417 movl(Address(z, idx, Address::times_4, 0), tmp4); 6418 6419 jmp(L_third_loop); 6420 6421 bind (L_third_loop_exit); 6422 6423 andl (idx, 0x3); 6424 jcc(Assembler::zero, L_post_third_loop_done); 6425 6426 Label L_check_1; 6427 subl(idx, 2); 6428 jcc(Assembler::negative, L_check_1); 6429 6430 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6431 rorxq(yz_idx1, yz_idx1, 32); 6432 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6433 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6434 rorxq(yz_idx2, yz_idx2, 32); 6435 6436 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6437 6438 movl(Address(z, idx, Address::times_4, 4), tmp3); 6439 shrq(tmp3, 32); 6440 movl(Address(z, idx, Address::times_4, 0), tmp3); 6441 movq(carry, tmp4); 6442 6443 bind (L_check_1); 6444 addl (idx, 0x2); 6445 andl (idx, 0x1); 6446 subl(idx, 1); 6447 jcc(Assembler::negative, L_post_third_loop_done); 6448 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6449 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6450 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6451 6452 add2_with_carry(carry2, tmp3, tmp4, carry); 6453 6454 movl(Address(z, idx, Address::times_4, 0), tmp3); 6455 shrq(tmp3, 32); 6456 6457 shlq(carry2, 32); 6458 orq(tmp3, carry2); 6459 movq(carry, tmp3); 6460 6461 bind(L_post_third_loop_done); 6462 } 6463 6464 /** 6465 * Code for BigInteger::multiplyToLen() intrinsic. 6466 * 6467 * rdi: x 6468 * rax: xlen 6469 * rsi: y 6470 * rcx: ylen 6471 * r8: z 6472 * r11: tmp0 6473 * r12: tmp1 6474 * r13: tmp2 6475 * r14: tmp3 6476 * r15: tmp4 6477 * rbx: tmp5 6478 * 6479 */ 6480 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 6481 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 6482 ShortBranchVerifier sbv(this); 6483 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 6484 6485 push(tmp0); 6486 push(tmp1); 6487 push(tmp2); 6488 push(tmp3); 6489 push(tmp4); 6490 push(tmp5); 6491 6492 push(xlen); 6493 6494 const Register idx = tmp1; 6495 const Register kdx = tmp2; 6496 const Register xstart = tmp3; 6497 6498 const Register y_idx = tmp4; 6499 const Register carry = tmp5; 6500 const Register product = xlen; 6501 const Register x_xstart = tmp0; 6502 6503 // First Loop. 6504 // 6505 // final static long LONG_MASK = 0xffffffffL; 6506 // int xstart = xlen - 1; 6507 // int ystart = ylen - 1; 6508 // long carry = 0; 6509 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6510 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6511 // z[kdx] = (int)product; 6512 // carry = product >>> 32; 6513 // } 6514 // z[xstart] = (int)carry; 6515 // 6516 6517 movl(idx, ylen); // idx = ylen; 6518 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 6519 xorq(carry, carry); // carry = 0; 6520 6521 Label L_done; 6522 6523 movl(xstart, xlen); 6524 decrementl(xstart); 6525 jcc(Assembler::negative, L_done); 6526 6527 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6528 6529 Label L_second_loop; 6530 testl(kdx, kdx); 6531 jcc(Assembler::zero, L_second_loop); 6532 6533 Label L_carry; 6534 subl(kdx, 1); 6535 jcc(Assembler::zero, L_carry); 6536 6537 movl(Address(z, kdx, Address::times_4, 0), carry); 6538 shrq(carry, 32); 6539 subl(kdx, 1); 6540 6541 bind(L_carry); 6542 movl(Address(z, kdx, Address::times_4, 0), carry); 6543 6544 // Second and third (nested) loops. 6545 // 6546 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6547 // carry = 0; 6548 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6549 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6550 // (z[k] & LONG_MASK) + carry; 6551 // z[k] = (int)product; 6552 // carry = product >>> 32; 6553 // } 6554 // z[i] = (int)carry; 6555 // } 6556 // 6557 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6558 6559 const Register jdx = tmp1; 6560 6561 bind(L_second_loop); 6562 xorl(carry, carry); // carry = 0; 6563 movl(jdx, ylen); // j = ystart+1 6564 6565 subl(xstart, 1); // i = xstart-1; 6566 jcc(Assembler::negative, L_done); 6567 6568 push (z); 6569 6570 Label L_last_x; 6571 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 6572 subl(xstart, 1); // i = xstart-1; 6573 jcc(Assembler::negative, L_last_x); 6574 6575 if (UseBMI2Instructions) { 6576 movq(rdx, Address(x, xstart, Address::times_4, 0)); 6577 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 6578 } else { 6579 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6580 rorq(x_xstart, 32); // convert big-endian to little-endian 6581 } 6582 6583 Label L_third_loop_prologue; 6584 bind(L_third_loop_prologue); 6585 6586 push (x); 6587 push (xstart); 6588 push (ylen); 6589 6590 6591 if (UseBMI2Instructions) { 6592 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 6593 } else { // !UseBMI2Instructions 6594 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6595 } 6596 6597 pop(ylen); 6598 pop(xlen); 6599 pop(x); 6600 pop(z); 6601 6602 movl(tmp3, xlen); 6603 addl(tmp3, 1); 6604 movl(Address(z, tmp3, Address::times_4, 0), carry); 6605 subl(tmp3, 1); 6606 jccb(Assembler::negative, L_done); 6607 6608 shrq(carry, 32); 6609 movl(Address(z, tmp3, Address::times_4, 0), carry); 6610 jmp(L_second_loop); 6611 6612 // Next infrequent code is moved outside loops. 6613 bind(L_last_x); 6614 if (UseBMI2Instructions) { 6615 movl(rdx, Address(x, 0)); 6616 } else { 6617 movl(x_xstart, Address(x, 0)); 6618 } 6619 jmp(L_third_loop_prologue); 6620 6621 bind(L_done); 6622 6623 pop(xlen); 6624 6625 pop(tmp5); 6626 pop(tmp4); 6627 pop(tmp3); 6628 pop(tmp2); 6629 pop(tmp1); 6630 pop(tmp0); 6631 } 6632 6633 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 6634 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 6635 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 6636 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 6637 Label VECTOR8_TAIL, VECTOR4_TAIL; 6638 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 6639 Label SAME_TILL_END, DONE; 6640 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 6641 6642 //scale is in rcx in both Win64 and Unix 6643 ShortBranchVerifier sbv(this); 6644 6645 shlq(length); 6646 xorq(result, result); 6647 6648 if ((AVX3Threshold == 0) && (UseAVX > 2) && 6649 VM_Version::supports_avx512vlbw()) { 6650 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 6651 6652 cmpq(length, 64); 6653 jcc(Assembler::less, VECTOR32_TAIL); 6654 6655 movq(tmp1, length); 6656 andq(tmp1, 0x3F); // tail count 6657 andq(length, ~(0x3F)); //vector count 6658 6659 bind(VECTOR64_LOOP); 6660 // AVX512 code to compare 64 byte vectors. 6661 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 6662 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 6663 kortestql(k7, k7); 6664 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 6665 addq(result, 64); 6666 subq(length, 64); 6667 jccb(Assembler::notZero, VECTOR64_LOOP); 6668 6669 //bind(VECTOR64_TAIL); 6670 testq(tmp1, tmp1); 6671 jcc(Assembler::zero, SAME_TILL_END); 6672 6673 //bind(VECTOR64_TAIL); 6674 // AVX512 code to compare up to 63 byte vectors. 6675 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 6676 shlxq(tmp2, tmp2, tmp1); 6677 notq(tmp2); 6678 kmovql(k3, tmp2); 6679 6680 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 6681 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 6682 6683 ktestql(k7, k3); 6684 jcc(Assembler::below, SAME_TILL_END); // not mismatch 6685 6686 bind(VECTOR64_NOT_EQUAL); 6687 kmovql(tmp1, k7); 6688 notq(tmp1); 6689 tzcntq(tmp1, tmp1); 6690 addq(result, tmp1); 6691 shrq(result); 6692 jmp(DONE); 6693 bind(VECTOR32_TAIL); 6694 } 6695 6696 cmpq(length, 8); 6697 jcc(Assembler::equal, VECTOR8_LOOP); 6698 jcc(Assembler::less, VECTOR4_TAIL); 6699 6700 if (UseAVX >= 2) { 6701 Label VECTOR16_TAIL, VECTOR32_LOOP; 6702 6703 cmpq(length, 16); 6704 jcc(Assembler::equal, VECTOR16_LOOP); 6705 jcc(Assembler::less, VECTOR8_LOOP); 6706 6707 cmpq(length, 32); 6708 jccb(Assembler::less, VECTOR16_TAIL); 6709 6710 subq(length, 32); 6711 bind(VECTOR32_LOOP); 6712 vmovdqu(rymm0, Address(obja, result)); 6713 vmovdqu(rymm1, Address(objb, result)); 6714 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 6715 vptest(rymm2, rymm2); 6716 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 6717 addq(result, 32); 6718 subq(length, 32); 6719 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 6720 addq(length, 32); 6721 jcc(Assembler::equal, SAME_TILL_END); 6722 //falling through if less than 32 bytes left //close the branch here. 6723 6724 bind(VECTOR16_TAIL); 6725 cmpq(length, 16); 6726 jccb(Assembler::less, VECTOR8_TAIL); 6727 bind(VECTOR16_LOOP); 6728 movdqu(rymm0, Address(obja, result)); 6729 movdqu(rymm1, Address(objb, result)); 6730 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 6731 ptest(rymm2, rymm2); 6732 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6733 addq(result, 16); 6734 subq(length, 16); 6735 jcc(Assembler::equal, SAME_TILL_END); 6736 //falling through if less than 16 bytes left 6737 } else {//regular intrinsics 6738 6739 cmpq(length, 16); 6740 jccb(Assembler::less, VECTOR8_TAIL); 6741 6742 subq(length, 16); 6743 bind(VECTOR16_LOOP); 6744 movdqu(rymm0, Address(obja, result)); 6745 movdqu(rymm1, Address(objb, result)); 6746 pxor(rymm0, rymm1); 6747 ptest(rymm0, rymm0); 6748 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6749 addq(result, 16); 6750 subq(length, 16); 6751 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 6752 addq(length, 16); 6753 jcc(Assembler::equal, SAME_TILL_END); 6754 //falling through if less than 16 bytes left 6755 } 6756 6757 bind(VECTOR8_TAIL); 6758 cmpq(length, 8); 6759 jccb(Assembler::less, VECTOR4_TAIL); 6760 bind(VECTOR8_LOOP); 6761 movq(tmp1, Address(obja, result)); 6762 movq(tmp2, Address(objb, result)); 6763 xorq(tmp1, tmp2); 6764 testq(tmp1, tmp1); 6765 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 6766 addq(result, 8); 6767 subq(length, 8); 6768 jcc(Assembler::equal, SAME_TILL_END); 6769 //falling through if less than 8 bytes left 6770 6771 bind(VECTOR4_TAIL); 6772 cmpq(length, 4); 6773 jccb(Assembler::less, BYTES_TAIL); 6774 bind(VECTOR4_LOOP); 6775 movl(tmp1, Address(obja, result)); 6776 xorl(tmp1, Address(objb, result)); 6777 testl(tmp1, tmp1); 6778 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 6779 addq(result, 4); 6780 subq(length, 4); 6781 jcc(Assembler::equal, SAME_TILL_END); 6782 //falling through if less than 4 bytes left 6783 6784 bind(BYTES_TAIL); 6785 bind(BYTES_LOOP); 6786 load_unsigned_byte(tmp1, Address(obja, result)); 6787 load_unsigned_byte(tmp2, Address(objb, result)); 6788 xorl(tmp1, tmp2); 6789 testl(tmp1, tmp1); 6790 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6791 decq(length); 6792 jcc(Assembler::zero, SAME_TILL_END); 6793 incq(result); 6794 load_unsigned_byte(tmp1, Address(obja, result)); 6795 load_unsigned_byte(tmp2, Address(objb, result)); 6796 xorl(tmp1, tmp2); 6797 testl(tmp1, tmp1); 6798 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6799 decq(length); 6800 jcc(Assembler::zero, SAME_TILL_END); 6801 incq(result); 6802 load_unsigned_byte(tmp1, Address(obja, result)); 6803 load_unsigned_byte(tmp2, Address(objb, result)); 6804 xorl(tmp1, tmp2); 6805 testl(tmp1, tmp1); 6806 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6807 jmp(SAME_TILL_END); 6808 6809 if (UseAVX >= 2) { 6810 bind(VECTOR32_NOT_EQUAL); 6811 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 6812 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 6813 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 6814 vpmovmskb(tmp1, rymm0); 6815 bsfq(tmp1, tmp1); 6816 addq(result, tmp1); 6817 shrq(result); 6818 jmp(DONE); 6819 } 6820 6821 bind(VECTOR16_NOT_EQUAL); 6822 if (UseAVX >= 2) { 6823 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 6824 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 6825 pxor(rymm0, rymm2); 6826 } else { 6827 pcmpeqb(rymm2, rymm2); 6828 pxor(rymm0, rymm1); 6829 pcmpeqb(rymm0, rymm1); 6830 pxor(rymm0, rymm2); 6831 } 6832 pmovmskb(tmp1, rymm0); 6833 bsfq(tmp1, tmp1); 6834 addq(result, tmp1); 6835 shrq(result); 6836 jmpb(DONE); 6837 6838 bind(VECTOR8_NOT_EQUAL); 6839 bind(VECTOR4_NOT_EQUAL); 6840 bsfq(tmp1, tmp1); 6841 shrq(tmp1, 3); 6842 addq(result, tmp1); 6843 bind(BYTES_NOT_EQUAL); 6844 shrq(result); 6845 jmpb(DONE); 6846 6847 bind(SAME_TILL_END); 6848 mov64(result, -1); 6849 6850 bind(DONE); 6851 } 6852 6853 //Helper functions for square_to_len() 6854 6855 /** 6856 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 6857 * Preserves x and z and modifies rest of the registers. 6858 */ 6859 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 6860 // Perform square and right shift by 1 6861 // Handle odd xlen case first, then for even xlen do the following 6862 // jlong carry = 0; 6863 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 6864 // huge_128 product = x[j:j+1] * x[j:j+1]; 6865 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 6866 // z[i+2:i+3] = (jlong)(product >>> 1); 6867 // carry = (jlong)product; 6868 // } 6869 6870 xorq(tmp5, tmp5); // carry 6871 xorq(rdxReg, rdxReg); 6872 xorl(tmp1, tmp1); // index for x 6873 xorl(tmp4, tmp4); // index for z 6874 6875 Label L_first_loop, L_first_loop_exit; 6876 6877 testl(xlen, 1); 6878 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 6879 6880 // Square and right shift by 1 the odd element using 32 bit multiply 6881 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 6882 imulq(raxReg, raxReg); 6883 shrq(raxReg, 1); 6884 adcq(tmp5, 0); 6885 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 6886 incrementl(tmp1); 6887 addl(tmp4, 2); 6888 6889 // Square and right shift by 1 the rest using 64 bit multiply 6890 bind(L_first_loop); 6891 cmpptr(tmp1, xlen); 6892 jccb(Assembler::equal, L_first_loop_exit); 6893 6894 // Square 6895 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 6896 rorq(raxReg, 32); // convert big-endian to little-endian 6897 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 6898 6899 // Right shift by 1 and save carry 6900 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 6901 rcrq(rdxReg, 1); 6902 rcrq(raxReg, 1); 6903 adcq(tmp5, 0); 6904 6905 // Store result in z 6906 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 6907 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 6908 6909 // Update indices for x and z 6910 addl(tmp1, 2); 6911 addl(tmp4, 4); 6912 jmp(L_first_loop); 6913 6914 bind(L_first_loop_exit); 6915 } 6916 6917 6918 /** 6919 * Perform the following multiply add operation using BMI2 instructions 6920 * carry:sum = sum + op1*op2 + carry 6921 * op2 should be in rdx 6922 * op2 is preserved, all other registers are modified 6923 */ 6924 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 6925 // assert op2 is rdx 6926 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 6927 addq(sum, carry); 6928 adcq(tmp2, 0); 6929 addq(sum, op1); 6930 adcq(tmp2, 0); 6931 movq(carry, tmp2); 6932 } 6933 6934 /** 6935 * Perform the following multiply add operation: 6936 * carry:sum = sum + op1*op2 + carry 6937 * Preserves op1, op2 and modifies rest of registers 6938 */ 6939 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 6940 // rdx:rax = op1 * op2 6941 movq(raxReg, op2); 6942 mulq(op1); 6943 6944 // rdx:rax = sum + carry + rdx:rax 6945 addq(sum, carry); 6946 adcq(rdxReg, 0); 6947 addq(sum, raxReg); 6948 adcq(rdxReg, 0); 6949 6950 // carry:sum = rdx:sum 6951 movq(carry, rdxReg); 6952 } 6953 6954 /** 6955 * Add 64 bit long carry into z[] with carry propagation. 6956 * Preserves z and carry register values and modifies rest of registers. 6957 * 6958 */ 6959 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 6960 Label L_fourth_loop, L_fourth_loop_exit; 6961 6962 movl(tmp1, 1); 6963 subl(zlen, 2); 6964 addq(Address(z, zlen, Address::times_4, 0), carry); 6965 6966 bind(L_fourth_loop); 6967 jccb(Assembler::carryClear, L_fourth_loop_exit); 6968 subl(zlen, 2); 6969 jccb(Assembler::negative, L_fourth_loop_exit); 6970 addq(Address(z, zlen, Address::times_4, 0), tmp1); 6971 jmp(L_fourth_loop); 6972 bind(L_fourth_loop_exit); 6973 } 6974 6975 /** 6976 * Shift z[] left by 1 bit. 6977 * Preserves x, len, z and zlen registers and modifies rest of the registers. 6978 * 6979 */ 6980 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 6981 6982 Label L_fifth_loop, L_fifth_loop_exit; 6983 6984 // Fifth loop 6985 // Perform primitiveLeftShift(z, zlen, 1) 6986 6987 const Register prev_carry = tmp1; 6988 const Register new_carry = tmp4; 6989 const Register value = tmp2; 6990 const Register zidx = tmp3; 6991 6992 // int zidx, carry; 6993 // long value; 6994 // carry = 0; 6995 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 6996 // (carry:value) = (z[i] << 1) | carry ; 6997 // z[i] = value; 6998 // } 6999 7000 movl(zidx, zlen); 7001 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7002 7003 bind(L_fifth_loop); 7004 decl(zidx); // Use decl to preserve carry flag 7005 decl(zidx); 7006 jccb(Assembler::negative, L_fifth_loop_exit); 7007 7008 if (UseBMI2Instructions) { 7009 movq(value, Address(z, zidx, Address::times_4, 0)); 7010 rclq(value, 1); 7011 rorxq(value, value, 32); 7012 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7013 } 7014 else { 7015 // clear new_carry 7016 xorl(new_carry, new_carry); 7017 7018 // Shift z[i] by 1, or in previous carry and save new carry 7019 movq(value, Address(z, zidx, Address::times_4, 0)); 7020 shlq(value, 1); 7021 adcl(new_carry, 0); 7022 7023 orq(value, prev_carry); 7024 rorq(value, 0x20); 7025 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7026 7027 // Set previous carry = new carry 7028 movl(prev_carry, new_carry); 7029 } 7030 jmp(L_fifth_loop); 7031 7032 bind(L_fifth_loop_exit); 7033 } 7034 7035 7036 /** 7037 * Code for BigInteger::squareToLen() intrinsic 7038 * 7039 * rdi: x 7040 * rsi: len 7041 * r8: z 7042 * rcx: zlen 7043 * r12: tmp1 7044 * r13: tmp2 7045 * r14: tmp3 7046 * r15: tmp4 7047 * rbx: tmp5 7048 * 7049 */ 7050 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7051 7052 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7053 push(tmp1); 7054 push(tmp2); 7055 push(tmp3); 7056 push(tmp4); 7057 push(tmp5); 7058 7059 // First loop 7060 // Store the squares, right shifted one bit (i.e., divided by 2). 7061 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7062 7063 // Add in off-diagonal sums. 7064 // 7065 // Second, third (nested) and fourth loops. 7066 // zlen +=2; 7067 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7068 // carry = 0; 7069 // long op2 = x[xidx:xidx+1]; 7070 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7071 // k -= 2; 7072 // long op1 = x[j:j+1]; 7073 // long sum = z[k:k+1]; 7074 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7075 // z[k:k+1] = sum; 7076 // } 7077 // add_one_64(z, k, carry, tmp_regs); 7078 // } 7079 7080 const Register carry = tmp5; 7081 const Register sum = tmp3; 7082 const Register op1 = tmp4; 7083 Register op2 = tmp2; 7084 7085 push(zlen); 7086 push(len); 7087 addl(zlen,2); 7088 bind(L_second_loop); 7089 xorq(carry, carry); 7090 subl(zlen, 4); 7091 subl(len, 2); 7092 push(zlen); 7093 push(len); 7094 cmpl(len, 0); 7095 jccb(Assembler::lessEqual, L_second_loop_exit); 7096 7097 // Multiply an array by one 64 bit long. 7098 if (UseBMI2Instructions) { 7099 op2 = rdxReg; 7100 movq(op2, Address(x, len, Address::times_4, 0)); 7101 rorxq(op2, op2, 32); 7102 } 7103 else { 7104 movq(op2, Address(x, len, Address::times_4, 0)); 7105 rorq(op2, 32); 7106 } 7107 7108 bind(L_third_loop); 7109 decrementl(len); 7110 jccb(Assembler::negative, L_third_loop_exit); 7111 decrementl(len); 7112 jccb(Assembler::negative, L_last_x); 7113 7114 movq(op1, Address(x, len, Address::times_4, 0)); 7115 rorq(op1, 32); 7116 7117 bind(L_multiply); 7118 subl(zlen, 2); 7119 movq(sum, Address(z, zlen, Address::times_4, 0)); 7120 7121 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7122 if (UseBMI2Instructions) { 7123 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7124 } 7125 else { 7126 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7127 } 7128 7129 movq(Address(z, zlen, Address::times_4, 0), sum); 7130 7131 jmp(L_third_loop); 7132 bind(L_third_loop_exit); 7133 7134 // Fourth loop 7135 // Add 64 bit long carry into z with carry propagation. 7136 // Uses offsetted zlen. 7137 add_one_64(z, zlen, carry, tmp1); 7138 7139 pop(len); 7140 pop(zlen); 7141 jmp(L_second_loop); 7142 7143 // Next infrequent code is moved outside loops. 7144 bind(L_last_x); 7145 movl(op1, Address(x, 0)); 7146 jmp(L_multiply); 7147 7148 bind(L_second_loop_exit); 7149 pop(len); 7150 pop(zlen); 7151 pop(len); 7152 pop(zlen); 7153 7154 // Fifth loop 7155 // Shift z left 1 bit. 7156 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7157 7158 // z[zlen-1] |= x[len-1] & 1; 7159 movl(tmp3, Address(x, len, Address::times_4, -4)); 7160 andl(tmp3, 1); 7161 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7162 7163 pop(tmp5); 7164 pop(tmp4); 7165 pop(tmp3); 7166 pop(tmp2); 7167 pop(tmp1); 7168 } 7169 7170 /** 7171 * Helper function for mul_add() 7172 * Multiply the in[] by int k and add to out[] starting at offset offs using 7173 * 128 bit by 32 bit multiply and return the carry in tmp5. 7174 * Only quad int aligned length of in[] is operated on in this function. 7175 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7176 * This function preserves out, in and k registers. 7177 * len and offset point to the appropriate index in "in" & "out" correspondingly 7178 * tmp5 has the carry. 7179 * other registers are temporary and are modified. 7180 * 7181 */ 7182 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7183 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7184 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7185 7186 Label L_first_loop, L_first_loop_exit; 7187 7188 movl(tmp1, len); 7189 shrl(tmp1, 2); 7190 7191 bind(L_first_loop); 7192 subl(tmp1, 1); 7193 jccb(Assembler::negative, L_first_loop_exit); 7194 7195 subl(len, 4); 7196 subl(offset, 4); 7197 7198 Register op2 = tmp2; 7199 const Register sum = tmp3; 7200 const Register op1 = tmp4; 7201 const Register carry = tmp5; 7202 7203 if (UseBMI2Instructions) { 7204 op2 = rdxReg; 7205 } 7206 7207 movq(op1, Address(in, len, Address::times_4, 8)); 7208 rorq(op1, 32); 7209 movq(sum, Address(out, offset, Address::times_4, 8)); 7210 rorq(sum, 32); 7211 if (UseBMI2Instructions) { 7212 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7213 } 7214 else { 7215 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7216 } 7217 // Store back in big endian from little endian 7218 rorq(sum, 0x20); 7219 movq(Address(out, offset, Address::times_4, 8), sum); 7220 7221 movq(op1, Address(in, len, Address::times_4, 0)); 7222 rorq(op1, 32); 7223 movq(sum, Address(out, offset, Address::times_4, 0)); 7224 rorq(sum, 32); 7225 if (UseBMI2Instructions) { 7226 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7227 } 7228 else { 7229 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7230 } 7231 // Store back in big endian from little endian 7232 rorq(sum, 0x20); 7233 movq(Address(out, offset, Address::times_4, 0), sum); 7234 7235 jmp(L_first_loop); 7236 bind(L_first_loop_exit); 7237 } 7238 7239 /** 7240 * Code for BigInteger::mulAdd() intrinsic 7241 * 7242 * rdi: out 7243 * rsi: in 7244 * r11: offs (out.length - offset) 7245 * rcx: len 7246 * r8: k 7247 * r12: tmp1 7248 * r13: tmp2 7249 * r14: tmp3 7250 * r15: tmp4 7251 * rbx: tmp5 7252 * Multiply the in[] by word k and add to out[], return the carry in rax 7253 */ 7254 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7255 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7256 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7257 7258 Label L_carry, L_last_in, L_done; 7259 7260 // carry = 0; 7261 // for (int j=len-1; j >= 0; j--) { 7262 // long product = (in[j] & LONG_MASK) * kLong + 7263 // (out[offs] & LONG_MASK) + carry; 7264 // out[offs--] = (int)product; 7265 // carry = product >>> 32; 7266 // } 7267 // 7268 push(tmp1); 7269 push(tmp2); 7270 push(tmp3); 7271 push(tmp4); 7272 push(tmp5); 7273 7274 Register op2 = tmp2; 7275 const Register sum = tmp3; 7276 const Register op1 = tmp4; 7277 const Register carry = tmp5; 7278 7279 if (UseBMI2Instructions) { 7280 op2 = rdxReg; 7281 movl(op2, k); 7282 } 7283 else { 7284 movl(op2, k); 7285 } 7286 7287 xorq(carry, carry); 7288 7289 //First loop 7290 7291 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7292 //The carry is in tmp5 7293 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7294 7295 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7296 decrementl(len); 7297 jccb(Assembler::negative, L_carry); 7298 decrementl(len); 7299 jccb(Assembler::negative, L_last_in); 7300 7301 movq(op1, Address(in, len, Address::times_4, 0)); 7302 rorq(op1, 32); 7303 7304 subl(offs, 2); 7305 movq(sum, Address(out, offs, Address::times_4, 0)); 7306 rorq(sum, 32); 7307 7308 if (UseBMI2Instructions) { 7309 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7310 } 7311 else { 7312 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7313 } 7314 7315 // Store back in big endian from little endian 7316 rorq(sum, 0x20); 7317 movq(Address(out, offs, Address::times_4, 0), sum); 7318 7319 testl(len, len); 7320 jccb(Assembler::zero, L_carry); 7321 7322 //Multiply the last in[] entry, if any 7323 bind(L_last_in); 7324 movl(op1, Address(in, 0)); 7325 movl(sum, Address(out, offs, Address::times_4, -4)); 7326 7327 movl(raxReg, k); 7328 mull(op1); //tmp4 * eax -> edx:eax 7329 addl(sum, carry); 7330 adcl(rdxReg, 0); 7331 addl(sum, raxReg); 7332 adcl(rdxReg, 0); 7333 movl(carry, rdxReg); 7334 7335 movl(Address(out, offs, Address::times_4, -4), sum); 7336 7337 bind(L_carry); 7338 //return tmp5/carry as carry in rax 7339 movl(rax, carry); 7340 7341 bind(L_done); 7342 pop(tmp5); 7343 pop(tmp4); 7344 pop(tmp3); 7345 pop(tmp2); 7346 pop(tmp1); 7347 } 7348 7349 /** 7350 * Emits code to update CRC-32 with a byte value according to constants in table 7351 * 7352 * @param [in,out]crc Register containing the crc. 7353 * @param [in]val Register containing the byte to fold into the CRC. 7354 * @param [in]table Register containing the table of crc constants. 7355 * 7356 * uint32_t crc; 7357 * val = crc_table[(val ^ crc) & 0xFF]; 7358 * crc = val ^ (crc >> 8); 7359 * 7360 */ 7361 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7362 xorl(val, crc); 7363 andl(val, 0xFF); 7364 shrl(crc, 8); // unsigned shift 7365 xorl(crc, Address(table, val, Address::times_4, 0)); 7366 } 7367 7368 /** 7369 * Fold 128-bit data chunk 7370 */ 7371 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7372 if (UseAVX > 0) { 7373 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7374 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7375 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7376 pxor(xcrc, xtmp); 7377 } else { 7378 movdqa(xtmp, xcrc); 7379 pclmulhdq(xtmp, xK); // [123:64] 7380 pclmulldq(xcrc, xK); // [63:0] 7381 pxor(xcrc, xtmp); 7382 movdqu(xtmp, Address(buf, offset)); 7383 pxor(xcrc, xtmp); 7384 } 7385 } 7386 7387 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7388 if (UseAVX > 0) { 7389 vpclmulhdq(xtmp, xK, xcrc); 7390 vpclmulldq(xcrc, xK, xcrc); 7391 pxor(xcrc, xbuf); 7392 pxor(xcrc, xtmp); 7393 } else { 7394 movdqa(xtmp, xcrc); 7395 pclmulhdq(xtmp, xK); 7396 pclmulldq(xcrc, xK); 7397 pxor(xcrc, xbuf); 7398 pxor(xcrc, xtmp); 7399 } 7400 } 7401 7402 /** 7403 * 8-bit folds to compute 32-bit CRC 7404 * 7405 * uint64_t xcrc; 7406 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7407 */ 7408 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7409 movdl(tmp, xcrc); 7410 andl(tmp, 0xFF); 7411 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7412 psrldq(xcrc, 1); // unsigned shift one byte 7413 pxor(xcrc, xtmp); 7414 } 7415 7416 /** 7417 * uint32_t crc; 7418 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7419 */ 7420 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7421 movl(tmp, crc); 7422 andl(tmp, 0xFF); 7423 shrl(crc, 8); 7424 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7425 } 7426 7427 /** 7428 * @param crc register containing existing CRC (32-bit) 7429 * @param buf register pointing to input byte buffer (byte*) 7430 * @param len register containing number of bytes 7431 * @param table register that will contain address of CRC table 7432 * @param tmp scratch register 7433 */ 7434 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7435 assert_different_registers(crc, buf, len, table, tmp, rax); 7436 7437 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7438 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7439 7440 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7441 // context for the registers used, where all instructions below are using 128-bit mode 7442 // On EVEX without VL and BW, these instructions will all be AVX. 7443 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7444 notl(crc); // ~crc 7445 cmpl(len, 16); 7446 jcc(Assembler::less, L_tail); 7447 7448 // Align buffer to 16 bytes 7449 movl(tmp, buf); 7450 andl(tmp, 0xF); 7451 jccb(Assembler::zero, L_aligned); 7452 subl(tmp, 16); 7453 addl(len, tmp); 7454 7455 align(4); 7456 BIND(L_align_loop); 7457 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7458 update_byte_crc32(crc, rax, table); 7459 increment(buf); 7460 incrementl(tmp); 7461 jccb(Assembler::less, L_align_loop); 7462 7463 BIND(L_aligned); 7464 movl(tmp, len); // save 7465 shrl(len, 4); 7466 jcc(Assembler::zero, L_tail_restore); 7467 7468 // Fold crc into first bytes of vector 7469 movdqa(xmm1, Address(buf, 0)); 7470 movdl(rax, xmm1); 7471 xorl(crc, rax); 7472 if (VM_Version::supports_sse4_1()) { 7473 pinsrd(xmm1, crc, 0); 7474 } else { 7475 pinsrw(xmm1, crc, 0); 7476 shrl(crc, 16); 7477 pinsrw(xmm1, crc, 1); 7478 } 7479 addptr(buf, 16); 7480 subl(len, 4); // len > 0 7481 jcc(Assembler::less, L_fold_tail); 7482 7483 movdqa(xmm2, Address(buf, 0)); 7484 movdqa(xmm3, Address(buf, 16)); 7485 movdqa(xmm4, Address(buf, 32)); 7486 addptr(buf, 48); 7487 subl(len, 3); 7488 jcc(Assembler::lessEqual, L_fold_512b); 7489 7490 // Fold total 512 bits of polynomial on each iteration, 7491 // 128 bits per each of 4 parallel streams. 7492 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 7493 7494 align32(); 7495 BIND(L_fold_512b_loop); 7496 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7497 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 7498 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 7499 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 7500 addptr(buf, 64); 7501 subl(len, 4); 7502 jcc(Assembler::greater, L_fold_512b_loop); 7503 7504 // Fold 512 bits to 128 bits. 7505 BIND(L_fold_512b); 7506 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7507 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 7508 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 7509 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 7510 7511 // Fold the rest of 128 bits data chunks 7512 BIND(L_fold_tail); 7513 addl(len, 3); 7514 jccb(Assembler::lessEqual, L_fold_128b); 7515 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7516 7517 BIND(L_fold_tail_loop); 7518 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7519 addptr(buf, 16); 7520 decrementl(len); 7521 jccb(Assembler::greater, L_fold_tail_loop); 7522 7523 // Fold 128 bits in xmm1 down into 32 bits in crc register. 7524 BIND(L_fold_128b); 7525 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 7526 if (UseAVX > 0) { 7527 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 7528 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 7529 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 7530 } else { 7531 movdqa(xmm2, xmm0); 7532 pclmulqdq(xmm2, xmm1, 0x1); 7533 movdqa(xmm3, xmm0); 7534 pand(xmm3, xmm2); 7535 pclmulqdq(xmm0, xmm3, 0x1); 7536 } 7537 psrldq(xmm1, 8); 7538 psrldq(xmm2, 4); 7539 pxor(xmm0, xmm1); 7540 pxor(xmm0, xmm2); 7541 7542 // 8 8-bit folds to compute 32-bit CRC. 7543 for (int j = 0; j < 4; j++) { 7544 fold_8bit_crc32(xmm0, table, xmm1, rax); 7545 } 7546 movdl(crc, xmm0); // mov 32 bits to general register 7547 for (int j = 0; j < 4; j++) { 7548 fold_8bit_crc32(crc, table, rax); 7549 } 7550 7551 BIND(L_tail_restore); 7552 movl(len, tmp); // restore 7553 BIND(L_tail); 7554 andl(len, 0xf); 7555 jccb(Assembler::zero, L_exit); 7556 7557 // Fold the rest of bytes 7558 align(4); 7559 BIND(L_tail_loop); 7560 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7561 update_byte_crc32(crc, rax, table); 7562 increment(buf); 7563 decrementl(len); 7564 jccb(Assembler::greater, L_tail_loop); 7565 7566 BIND(L_exit); 7567 notl(crc); // ~c 7568 } 7569 7570 // Helper function for AVX 512 CRC32 7571 // Fold 512-bit data chunks 7572 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 7573 Register pos, int offset) { 7574 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 7575 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 7576 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 7577 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 7578 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 7579 } 7580 7581 // Helper function for AVX 512 CRC32 7582 // Compute CRC32 for < 256B buffers 7583 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 7584 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 7585 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 7586 7587 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 7588 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 7589 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 7590 7591 // check if there is enough buffer to be able to fold 16B at a time 7592 cmpl(len, 32); 7593 jcc(Assembler::less, L_less_than_32); 7594 7595 // if there is, load the constants 7596 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 7597 movdl(xmm0, crc); // get the initial crc value 7598 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7599 pxor(xmm7, xmm0); 7600 7601 // update the buffer pointer 7602 addl(pos, 16); 7603 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 7604 subl(len, 32); 7605 jmp(L_16B_reduction_loop); 7606 7607 bind(L_less_than_32); 7608 //mov initial crc to the return value. this is necessary for zero - length buffers. 7609 movl(rax, crc); 7610 testl(len, len); 7611 jcc(Assembler::equal, L_cleanup); 7612 7613 movdl(xmm0, crc); //get the initial crc value 7614 7615 cmpl(len, 16); 7616 jcc(Assembler::equal, L_exact_16_left); 7617 jcc(Assembler::less, L_less_than_16_left); 7618 7619 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7620 pxor(xmm7, xmm0); //xor the initial crc value 7621 addl(pos, 16); 7622 subl(len, 16); 7623 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 7624 jmp(L_get_last_two_xmms); 7625 7626 bind(L_less_than_16_left); 7627 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 7628 pxor(xmm1, xmm1); 7629 movptr(tmp1, rsp); 7630 movdqu(Address(tmp1, 0 * 16), xmm1); 7631 7632 cmpl(len, 4); 7633 jcc(Assembler::less, L_only_less_than_4); 7634 7635 //backup the counter value 7636 movl(tmp2, len); 7637 cmpl(len, 8); 7638 jcc(Assembler::less, L_less_than_8_left); 7639 7640 //load 8 Bytes 7641 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 7642 movq(Address(tmp1, 0 * 16), rax); 7643 addptr(tmp1, 8); 7644 subl(len, 8); 7645 addl(pos, 8); 7646 7647 bind(L_less_than_8_left); 7648 cmpl(len, 4); 7649 jcc(Assembler::less, L_less_than_4_left); 7650 7651 //load 4 Bytes 7652 movl(rax, Address(buf, pos, Address::times_1, 0)); 7653 movl(Address(tmp1, 0 * 16), rax); 7654 addptr(tmp1, 4); 7655 subl(len, 4); 7656 addl(pos, 4); 7657 7658 bind(L_less_than_4_left); 7659 cmpl(len, 2); 7660 jcc(Assembler::less, L_less_than_2_left); 7661 7662 // load 2 Bytes 7663 movw(rax, Address(buf, pos, Address::times_1, 0)); 7664 movl(Address(tmp1, 0 * 16), rax); 7665 addptr(tmp1, 2); 7666 subl(len, 2); 7667 addl(pos, 2); 7668 7669 bind(L_less_than_2_left); 7670 cmpl(len, 1); 7671 jcc(Assembler::less, L_zero_left); 7672 7673 // load 1 Byte 7674 movb(rax, Address(buf, pos, Address::times_1, 0)); 7675 movb(Address(tmp1, 0 * 16), rax); 7676 7677 bind(L_zero_left); 7678 movdqu(xmm7, Address(rsp, 0)); 7679 pxor(xmm7, xmm0); //xor the initial crc value 7680 7681 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7682 movdqu(xmm0, Address(rax, tmp2)); 7683 pshufb(xmm7, xmm0); 7684 jmp(L_128_done); 7685 7686 bind(L_exact_16_left); 7687 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 7688 pxor(xmm7, xmm0); //xor the initial crc value 7689 jmp(L_128_done); 7690 7691 bind(L_only_less_than_4); 7692 cmpl(len, 3); 7693 jcc(Assembler::less, L_only_less_than_3); 7694 7695 // load 3 Bytes 7696 movb(rax, Address(buf, pos, Address::times_1, 0)); 7697 movb(Address(tmp1, 0), rax); 7698 7699 movb(rax, Address(buf, pos, Address::times_1, 1)); 7700 movb(Address(tmp1, 1), rax); 7701 7702 movb(rax, Address(buf, pos, Address::times_1, 2)); 7703 movb(Address(tmp1, 2), rax); 7704 7705 movdqu(xmm7, Address(rsp, 0)); 7706 pxor(xmm7, xmm0); //xor the initial crc value 7707 7708 pslldq(xmm7, 0x5); 7709 jmp(L_barrett); 7710 bind(L_only_less_than_3); 7711 cmpl(len, 2); 7712 jcc(Assembler::less, L_only_less_than_2); 7713 7714 // load 2 Bytes 7715 movb(rax, Address(buf, pos, Address::times_1, 0)); 7716 movb(Address(tmp1, 0), rax); 7717 7718 movb(rax, Address(buf, pos, Address::times_1, 1)); 7719 movb(Address(tmp1, 1), rax); 7720 7721 movdqu(xmm7, Address(rsp, 0)); 7722 pxor(xmm7, xmm0); //xor the initial crc value 7723 7724 pslldq(xmm7, 0x6); 7725 jmp(L_barrett); 7726 7727 bind(L_only_less_than_2); 7728 //load 1 Byte 7729 movb(rax, Address(buf, pos, Address::times_1, 0)); 7730 movb(Address(tmp1, 0), rax); 7731 7732 movdqu(xmm7, Address(rsp, 0)); 7733 pxor(xmm7, xmm0); //xor the initial crc value 7734 7735 pslldq(xmm7, 0x7); 7736 } 7737 7738 /** 7739 * Compute CRC32 using AVX512 instructions 7740 * param crc register containing existing CRC (32-bit) 7741 * param buf register pointing to input byte buffer (byte*) 7742 * param len register containing number of bytes 7743 * param table address of crc or crc32c table 7744 * param tmp1 scratch register 7745 * param tmp2 scratch register 7746 * return rax result register 7747 * 7748 * This routine is identical for crc32c with the exception of the precomputed constant 7749 * table which will be passed as the table argument. The calculation steps are 7750 * the same for both variants. 7751 */ 7752 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 7753 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 7754 7755 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7756 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7757 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 7758 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 7759 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 7760 7761 const Register pos = r12; 7762 push(r12); 7763 subptr(rsp, 16 * 2 + 8); 7764 7765 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7766 // context for the registers used, where all instructions below are using 128-bit mode 7767 // On EVEX without VL and BW, these instructions will all be AVX. 7768 movl(pos, 0); 7769 7770 // check if smaller than 256B 7771 cmpl(len, 256); 7772 jcc(Assembler::less, L_less_than_256); 7773 7774 // load the initial crc value 7775 movdl(xmm10, crc); 7776 7777 // receive the initial 64B data, xor the initial crc value 7778 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 7779 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 7780 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 7781 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 7782 7783 subl(len, 256); 7784 cmpl(len, 256); 7785 jcc(Assembler::less, L_fold_128_B_loop); 7786 7787 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 7788 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 7789 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 7790 subl(len, 256); 7791 7792 bind(L_fold_256_B_loop); 7793 addl(pos, 256); 7794 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 7795 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 7796 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 7797 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 7798 7799 subl(len, 256); 7800 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 7801 7802 // Fold 256 into 128 7803 addl(pos, 256); 7804 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 7805 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 7806 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 7807 7808 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 7809 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 7810 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 7811 7812 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 7813 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 7814 7815 addl(len, 128); 7816 jmp(L_fold_128_B_register); 7817 7818 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 7819 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 7820 7821 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 7822 bind(L_fold_128_B_loop); 7823 addl(pos, 128); 7824 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 7825 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 7826 7827 subl(len, 128); 7828 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 7829 7830 addl(pos, 128); 7831 7832 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 7833 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 7834 bind(L_fold_128_B_register); 7835 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 7836 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 7837 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 7838 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 7839 // save last that has no multiplicand 7840 vextracti64x2(xmm7, xmm4, 3); 7841 7842 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 7843 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 7844 // Needed later in reduction loop 7845 movdqu(xmm10, Address(table, 1 * 16)); 7846 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 7847 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 7848 7849 // Swap 1,0,3,2 - 01 00 11 10 7850 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 7851 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 7852 vextracti128(xmm5, xmm8, 1); 7853 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 7854 7855 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 7856 // instead of a cmp instruction, we use the negative flag with the jl instruction 7857 addl(len, 128 - 16); 7858 jcc(Assembler::less, L_final_reduction_for_128); 7859 7860 bind(L_16B_reduction_loop); 7861 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7862 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7863 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7864 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 7865 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7866 addl(pos, 16); 7867 subl(len, 16); 7868 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 7869 7870 bind(L_final_reduction_for_128); 7871 addl(len, 16); 7872 jcc(Assembler::equal, L_128_done); 7873 7874 bind(L_get_last_two_xmms); 7875 movdqu(xmm2, xmm7); 7876 addl(pos, len); 7877 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 7878 subl(pos, len); 7879 7880 // get rid of the extra data that was loaded before 7881 // load the shift constant 7882 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7883 movdqu(xmm0, Address(rax, len)); 7884 addl(rax, len); 7885 7886 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7887 //Change mask to 512 7888 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 7889 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 7890 7891 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 7892 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7893 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7894 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7895 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 7896 7897 bind(L_128_done); 7898 // compute crc of a 128-bit value 7899 movdqu(xmm10, Address(table, 3 * 16)); 7900 movdqu(xmm0, xmm7); 7901 7902 // 64b fold 7903 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 7904 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 7905 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7906 7907 // 32b fold 7908 movdqu(xmm0, xmm7); 7909 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 7910 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7911 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7912 jmp(L_barrett); 7913 7914 bind(L_less_than_256); 7915 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 7916 7917 //barrett reduction 7918 bind(L_barrett); 7919 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 7920 movdqu(xmm1, xmm7); 7921 movdqu(xmm2, xmm7); 7922 movdqu(xmm10, Address(table, 4 * 16)); 7923 7924 pclmulqdq(xmm7, xmm10, 0x0); 7925 pxor(xmm7, xmm2); 7926 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 7927 movdqu(xmm2, xmm7); 7928 pclmulqdq(xmm7, xmm10, 0x10); 7929 pxor(xmm7, xmm2); 7930 pxor(xmm7, xmm1); 7931 pextrd(crc, xmm7, 2); 7932 7933 bind(L_cleanup); 7934 addptr(rsp, 16 * 2 + 8); 7935 pop(r12); 7936 } 7937 7938 // S. Gueron / Information Processing Letters 112 (2012) 184 7939 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 7940 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 7941 // Output: the 64-bit carry-less product of B * CONST 7942 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 7943 Register tmp1, Register tmp2, Register tmp3) { 7944 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 7945 if (n > 0) { 7946 addq(tmp3, n * 256 * 8); 7947 } 7948 // Q1 = TABLEExt[n][B & 0xFF]; 7949 movl(tmp1, in); 7950 andl(tmp1, 0x000000FF); 7951 shll(tmp1, 3); 7952 addq(tmp1, tmp3); 7953 movq(tmp1, Address(tmp1, 0)); 7954 7955 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 7956 movl(tmp2, in); 7957 shrl(tmp2, 8); 7958 andl(tmp2, 0x000000FF); 7959 shll(tmp2, 3); 7960 addq(tmp2, tmp3); 7961 movq(tmp2, Address(tmp2, 0)); 7962 7963 shlq(tmp2, 8); 7964 xorq(tmp1, tmp2); 7965 7966 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 7967 movl(tmp2, in); 7968 shrl(tmp2, 16); 7969 andl(tmp2, 0x000000FF); 7970 shll(tmp2, 3); 7971 addq(tmp2, tmp3); 7972 movq(tmp2, Address(tmp2, 0)); 7973 7974 shlq(tmp2, 16); 7975 xorq(tmp1, tmp2); 7976 7977 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 7978 shrl(in, 24); 7979 andl(in, 0x000000FF); 7980 shll(in, 3); 7981 addq(in, tmp3); 7982 movq(in, Address(in, 0)); 7983 7984 shlq(in, 24); 7985 xorq(in, tmp1); 7986 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 7987 } 7988 7989 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 7990 Register in_out, 7991 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 7992 XMMRegister w_xtmp2, 7993 Register tmp1, 7994 Register n_tmp2, Register n_tmp3) { 7995 if (is_pclmulqdq_supported) { 7996 movdl(w_xtmp1, in_out); // modified blindly 7997 7998 movl(tmp1, const_or_pre_comp_const_index); 7999 movdl(w_xtmp2, tmp1); 8000 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8001 8002 movdq(in_out, w_xtmp1); 8003 } else { 8004 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8005 } 8006 } 8007 8008 // Recombination Alternative 2: No bit-reflections 8009 // T1 = (CRC_A * U1) << 1 8010 // T2 = (CRC_B * U2) << 1 8011 // C1 = T1 >> 32 8012 // C2 = T2 >> 32 8013 // T1 = T1 & 0xFFFFFFFF 8014 // T2 = T2 & 0xFFFFFFFF 8015 // T1 = CRC32(0, T1) 8016 // T2 = CRC32(0, T2) 8017 // C1 = C1 ^ T1 8018 // C2 = C2 ^ T2 8019 // CRC = C1 ^ C2 ^ CRC_C 8020 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8021 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8022 Register tmp1, Register tmp2, 8023 Register n_tmp3) { 8024 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8025 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8026 shlq(in_out, 1); 8027 movl(tmp1, in_out); 8028 shrq(in_out, 32); 8029 xorl(tmp2, tmp2); 8030 crc32(tmp2, tmp1, 4); 8031 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8032 shlq(in1, 1); 8033 movl(tmp1, in1); 8034 shrq(in1, 32); 8035 xorl(tmp2, tmp2); 8036 crc32(tmp2, tmp1, 4); 8037 xorl(in1, tmp2); 8038 xorl(in_out, in1); 8039 xorl(in_out, in2); 8040 } 8041 8042 // Set N to predefined value 8043 // Subtract from a length of a buffer 8044 // execute in a loop: 8045 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8046 // for i = 1 to N do 8047 // CRC_A = CRC32(CRC_A, A[i]) 8048 // CRC_B = CRC32(CRC_B, B[i]) 8049 // CRC_C = CRC32(CRC_C, C[i]) 8050 // end for 8051 // Recombine 8052 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8053 Register in_out1, Register in_out2, Register in_out3, 8054 Register tmp1, Register tmp2, Register tmp3, 8055 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8056 Register tmp4, Register tmp5, 8057 Register n_tmp6) { 8058 Label L_processPartitions; 8059 Label L_processPartition; 8060 Label L_exit; 8061 8062 bind(L_processPartitions); 8063 cmpl(in_out1, 3 * size); 8064 jcc(Assembler::less, L_exit); 8065 xorl(tmp1, tmp1); 8066 xorl(tmp2, tmp2); 8067 movq(tmp3, in_out2); 8068 addq(tmp3, size); 8069 8070 bind(L_processPartition); 8071 crc32(in_out3, Address(in_out2, 0), 8); 8072 crc32(tmp1, Address(in_out2, size), 8); 8073 crc32(tmp2, Address(in_out2, size * 2), 8); 8074 addq(in_out2, 8); 8075 cmpq(in_out2, tmp3); 8076 jcc(Assembler::less, L_processPartition); 8077 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8078 w_xtmp1, w_xtmp2, w_xtmp3, 8079 tmp4, tmp5, 8080 n_tmp6); 8081 addq(in_out2, 2 * size); 8082 subl(in_out1, 3 * size); 8083 jmp(L_processPartitions); 8084 8085 bind(L_exit); 8086 } 8087 8088 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8089 // Input: A buffer I of L bytes. 8090 // Output: the CRC32C value of the buffer. 8091 // Notations: 8092 // Write L = 24N + r, with N = floor (L/24). 8093 // r = L mod 24 (0 <= r < 24). 8094 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8095 // N quadwords, and R consists of r bytes. 8096 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8097 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8098 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8099 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8100 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8101 Register tmp1, Register tmp2, Register tmp3, 8102 Register tmp4, Register tmp5, Register tmp6, 8103 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8104 bool is_pclmulqdq_supported) { 8105 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8106 Label L_wordByWord; 8107 Label L_byteByByteProlog; 8108 Label L_byteByByte; 8109 Label L_exit; 8110 8111 if (is_pclmulqdq_supported ) { 8112 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr(); 8113 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1); 8114 8115 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2); 8116 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3); 8117 8118 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4); 8119 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5); 8120 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8121 } else { 8122 const_or_pre_comp_const_index[0] = 1; 8123 const_or_pre_comp_const_index[1] = 0; 8124 8125 const_or_pre_comp_const_index[2] = 3; 8126 const_or_pre_comp_const_index[3] = 2; 8127 8128 const_or_pre_comp_const_index[4] = 5; 8129 const_or_pre_comp_const_index[5] = 4; 8130 } 8131 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8132 in2, in1, in_out, 8133 tmp1, tmp2, tmp3, 8134 w_xtmp1, w_xtmp2, w_xtmp3, 8135 tmp4, tmp5, 8136 tmp6); 8137 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8138 in2, in1, in_out, 8139 tmp1, tmp2, tmp3, 8140 w_xtmp1, w_xtmp2, w_xtmp3, 8141 tmp4, tmp5, 8142 tmp6); 8143 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8144 in2, in1, in_out, 8145 tmp1, tmp2, tmp3, 8146 w_xtmp1, w_xtmp2, w_xtmp3, 8147 tmp4, tmp5, 8148 tmp6); 8149 movl(tmp1, in2); 8150 andl(tmp1, 0x00000007); 8151 negl(tmp1); 8152 addl(tmp1, in2); 8153 addq(tmp1, in1); 8154 8155 cmpq(in1, tmp1); 8156 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8157 align(16); 8158 BIND(L_wordByWord); 8159 crc32(in_out, Address(in1, 0), 8); 8160 addq(in1, 8); 8161 cmpq(in1, tmp1); 8162 jcc(Assembler::less, L_wordByWord); 8163 8164 BIND(L_byteByByteProlog); 8165 andl(in2, 0x00000007); 8166 movl(tmp2, 1); 8167 8168 cmpl(tmp2, in2); 8169 jccb(Assembler::greater, L_exit); 8170 BIND(L_byteByByte); 8171 crc32(in_out, Address(in1, 0), 1); 8172 incq(in1); 8173 incl(tmp2); 8174 cmpl(tmp2, in2); 8175 jcc(Assembler::lessEqual, L_byteByByte); 8176 8177 BIND(L_exit); 8178 } 8179 #undef BIND 8180 #undef BLOCK_COMMENT 8181 8182 // Compress char[] array to byte[]. 8183 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 8184 // Return the array length if every element in array can be encoded, 8185 // otherwise, the index of first non-latin1 (> 0xff) character. 8186 // @IntrinsicCandidate 8187 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8188 // for (int i = 0; i < len; i++) { 8189 // char c = src[srcOff]; 8190 // if (c > 0xff) { 8191 // return i; // return index of non-latin1 char 8192 // } 8193 // dst[dstOff] = (byte)c; 8194 // srcOff++; 8195 // dstOff++; 8196 // } 8197 // return len; 8198 // } 8199 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8200 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8201 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8202 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8203 Label copy_chars_loop, done, reset_sp, copy_tail; 8204 8205 // rsi: src 8206 // rdi: dst 8207 // rdx: len 8208 // rcx: tmp5 8209 // rax: result 8210 8211 // rsi holds start addr of source char[] to be compressed 8212 // rdi holds start addr of destination byte[] 8213 // rdx holds length 8214 8215 assert(len != result, ""); 8216 8217 // save length for return 8218 movl(result, len); 8219 8220 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8221 VM_Version::supports_avx512vlbw() && 8222 VM_Version::supports_bmi2()) { 8223 8224 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 8225 8226 // alignment 8227 Label post_alignment; 8228 8229 // if length of the string is less than 32, handle it the old fashioned way 8230 testl(len, -32); 8231 jcc(Assembler::zero, below_threshold); 8232 8233 // First check whether a character is compressible ( <= 0xFF). 8234 // Create mask to test for Unicode chars inside zmm vector 8235 movl(tmp5, 0x00FF); 8236 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 8237 8238 testl(len, -64); 8239 jccb(Assembler::zero, post_alignment); 8240 8241 movl(tmp5, dst); 8242 andl(tmp5, (32 - 1)); 8243 negl(tmp5); 8244 andl(tmp5, (32 - 1)); 8245 8246 // bail out when there is nothing to be done 8247 testl(tmp5, 0xFFFFFFFF); 8248 jccb(Assembler::zero, post_alignment); 8249 8250 // ~(~0 << len), where len is the # of remaining elements to process 8251 movl(len, 0xFFFFFFFF); 8252 shlxl(len, len, tmp5); 8253 notl(len); 8254 kmovdl(mask2, len); 8255 movl(len, result); 8256 8257 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8258 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8259 ktestd(mask1, mask2); 8260 jcc(Assembler::carryClear, copy_tail); 8261 8262 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8263 8264 addptr(src, tmp5); 8265 addptr(src, tmp5); 8266 addptr(dst, tmp5); 8267 subl(len, tmp5); 8268 8269 bind(post_alignment); 8270 // end of alignment 8271 8272 movl(tmp5, len); 8273 andl(tmp5, (32 - 1)); // tail count (in chars) 8274 andl(len, ~(32 - 1)); // vector count (in chars) 8275 jccb(Assembler::zero, copy_loop_tail); 8276 8277 lea(src, Address(src, len, Address::times_2)); 8278 lea(dst, Address(dst, len, Address::times_1)); 8279 negptr(len); 8280 8281 bind(copy_32_loop); 8282 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 8283 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 8284 kortestdl(mask1, mask1); 8285 jccb(Assembler::carryClear, reset_for_copy_tail); 8286 8287 // All elements in current processed chunk are valid candidates for 8288 // compression. Write a truncated byte elements to the memory. 8289 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 8290 addptr(len, 32); 8291 jccb(Assembler::notZero, copy_32_loop); 8292 8293 bind(copy_loop_tail); 8294 // bail out when there is nothing to be done 8295 testl(tmp5, 0xFFFFFFFF); 8296 jcc(Assembler::zero, done); 8297 8298 movl(len, tmp5); 8299 8300 // ~(~0 << len), where len is the # of remaining elements to process 8301 movl(tmp5, 0xFFFFFFFF); 8302 shlxl(tmp5, tmp5, len); 8303 notl(tmp5); 8304 8305 kmovdl(mask2, tmp5); 8306 8307 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8308 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8309 ktestd(mask1, mask2); 8310 jcc(Assembler::carryClear, copy_tail); 8311 8312 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8313 jmp(done); 8314 8315 bind(reset_for_copy_tail); 8316 lea(src, Address(src, tmp5, Address::times_2)); 8317 lea(dst, Address(dst, tmp5, Address::times_1)); 8318 subptr(len, tmp5); 8319 jmp(copy_chars_loop); 8320 8321 bind(below_threshold); 8322 } 8323 8324 if (UseSSE42Intrinsics) { 8325 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 8326 8327 // vectored compression 8328 testl(len, 0xfffffff8); 8329 jcc(Assembler::zero, copy_tail); 8330 8331 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 8332 movdl(tmp1Reg, tmp5); 8333 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 8334 8335 andl(len, 0xfffffff0); 8336 jccb(Assembler::zero, copy_16); 8337 8338 // compress 16 chars per iter 8339 pxor(tmp4Reg, tmp4Reg); 8340 8341 lea(src, Address(src, len, Address::times_2)); 8342 lea(dst, Address(dst, len, Address::times_1)); 8343 negptr(len); 8344 8345 bind(copy_32_loop); 8346 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 8347 por(tmp4Reg, tmp2Reg); 8348 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 8349 por(tmp4Reg, tmp3Reg); 8350 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 8351 jccb(Assembler::notZero, reset_for_copy_tail); 8352 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 8353 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 8354 addptr(len, 16); 8355 jccb(Assembler::notZero, copy_32_loop); 8356 8357 // compress next vector of 8 chars (if any) 8358 bind(copy_16); 8359 // len = 0 8360 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 8361 jccb(Assembler::zero, copy_tail_sse); 8362 8363 pxor(tmp3Reg, tmp3Reg); 8364 8365 movdqu(tmp2Reg, Address(src, 0)); 8366 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8367 jccb(Assembler::notZero, reset_for_copy_tail); 8368 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 8369 movq(Address(dst, 0), tmp2Reg); 8370 addptr(src, 16); 8371 addptr(dst, 8); 8372 jmpb(copy_tail_sse); 8373 8374 bind(reset_for_copy_tail); 8375 movl(tmp5, result); 8376 andl(tmp5, 0x0000000f); 8377 lea(src, Address(src, tmp5, Address::times_2)); 8378 lea(dst, Address(dst, tmp5, Address::times_1)); 8379 subptr(len, tmp5); 8380 jmpb(copy_chars_loop); 8381 8382 bind(copy_tail_sse); 8383 movl(len, result); 8384 andl(len, 0x00000007); // tail count (in chars) 8385 } 8386 // compress 1 char per iter 8387 bind(copy_tail); 8388 testl(len, len); 8389 jccb(Assembler::zero, done); 8390 lea(src, Address(src, len, Address::times_2)); 8391 lea(dst, Address(dst, len, Address::times_1)); 8392 negptr(len); 8393 8394 bind(copy_chars_loop); 8395 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 8396 testl(tmp5, 0xff00); // check if Unicode char 8397 jccb(Assembler::notZero, reset_sp); 8398 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 8399 increment(len); 8400 jccb(Assembler::notZero, copy_chars_loop); 8401 8402 // add len then return (len will be zero if compress succeeded, otherwise negative) 8403 bind(reset_sp); 8404 addl(result, len); 8405 8406 bind(done); 8407 } 8408 8409 // Inflate byte[] array to char[]. 8410 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 8411 // @IntrinsicCandidate 8412 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 8413 // for (int i = 0; i < len; i++) { 8414 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 8415 // } 8416 // } 8417 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 8418 XMMRegister tmp1, Register tmp2, KRegister mask) { 8419 Label copy_chars_loop, done, below_threshold, avx3_threshold; 8420 // rsi: src 8421 // rdi: dst 8422 // rdx: len 8423 // rcx: tmp2 8424 8425 // rsi holds start addr of source byte[] to be inflated 8426 // rdi holds start addr of destination char[] 8427 // rdx holds length 8428 assert_different_registers(src, dst, len, tmp2); 8429 movl(tmp2, len); 8430 if ((UseAVX > 2) && // AVX512 8431 VM_Version::supports_avx512vlbw() && 8432 VM_Version::supports_bmi2()) { 8433 8434 Label copy_32_loop, copy_tail; 8435 Register tmp3_aliased = len; 8436 8437 // if length of the string is less than 16, handle it in an old fashioned way 8438 testl(len, -16); 8439 jcc(Assembler::zero, below_threshold); 8440 8441 testl(len, -1 * AVX3Threshold); 8442 jcc(Assembler::zero, avx3_threshold); 8443 8444 // In order to use only one arithmetic operation for the main loop we use 8445 // this pre-calculation 8446 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 8447 andl(len, -32); // vector count 8448 jccb(Assembler::zero, copy_tail); 8449 8450 lea(src, Address(src, len, Address::times_1)); 8451 lea(dst, Address(dst, len, Address::times_2)); 8452 negptr(len); 8453 8454 8455 // inflate 32 chars per iter 8456 bind(copy_32_loop); 8457 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 8458 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 8459 addptr(len, 32); 8460 jcc(Assembler::notZero, copy_32_loop); 8461 8462 bind(copy_tail); 8463 // bail out when there is nothing to be done 8464 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 8465 jcc(Assembler::zero, done); 8466 8467 // ~(~0 << length), where length is the # of remaining elements to process 8468 movl(tmp3_aliased, -1); 8469 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 8470 notl(tmp3_aliased); 8471 kmovdl(mask, tmp3_aliased); 8472 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 8473 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 8474 8475 jmp(done); 8476 bind(avx3_threshold); 8477 } 8478 if (UseSSE42Intrinsics) { 8479 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 8480 8481 if (UseAVX > 1) { 8482 andl(tmp2, (16 - 1)); 8483 andl(len, -16); 8484 jccb(Assembler::zero, copy_new_tail); 8485 } else { 8486 andl(tmp2, 0x00000007); // tail count (in chars) 8487 andl(len, 0xfffffff8); // vector count (in chars) 8488 jccb(Assembler::zero, copy_tail); 8489 } 8490 8491 // vectored inflation 8492 lea(src, Address(src, len, Address::times_1)); 8493 lea(dst, Address(dst, len, Address::times_2)); 8494 negptr(len); 8495 8496 if (UseAVX > 1) { 8497 bind(copy_16_loop); 8498 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 8499 vmovdqu(Address(dst, len, Address::times_2), tmp1); 8500 addptr(len, 16); 8501 jcc(Assembler::notZero, copy_16_loop); 8502 8503 bind(below_threshold); 8504 bind(copy_new_tail); 8505 movl(len, tmp2); 8506 andl(tmp2, 0x00000007); 8507 andl(len, 0xFFFFFFF8); 8508 jccb(Assembler::zero, copy_tail); 8509 8510 pmovzxbw(tmp1, Address(src, 0)); 8511 movdqu(Address(dst, 0), tmp1); 8512 addptr(src, 8); 8513 addptr(dst, 2 * 8); 8514 8515 jmp(copy_tail, true); 8516 } 8517 8518 // inflate 8 chars per iter 8519 bind(copy_8_loop); 8520 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 8521 movdqu(Address(dst, len, Address::times_2), tmp1); 8522 addptr(len, 8); 8523 jcc(Assembler::notZero, copy_8_loop); 8524 8525 bind(copy_tail); 8526 movl(len, tmp2); 8527 8528 cmpl(len, 4); 8529 jccb(Assembler::less, copy_bytes); 8530 8531 movdl(tmp1, Address(src, 0)); // load 4 byte chars 8532 pmovzxbw(tmp1, tmp1); 8533 movq(Address(dst, 0), tmp1); 8534 subptr(len, 4); 8535 addptr(src, 4); 8536 addptr(dst, 8); 8537 8538 bind(copy_bytes); 8539 } else { 8540 bind(below_threshold); 8541 } 8542 8543 testl(len, len); 8544 jccb(Assembler::zero, done); 8545 lea(src, Address(src, len, Address::times_1)); 8546 lea(dst, Address(dst, len, Address::times_2)); 8547 negptr(len); 8548 8549 // inflate 1 char per iter 8550 bind(copy_chars_loop); 8551 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 8552 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 8553 increment(len); 8554 jcc(Assembler::notZero, copy_chars_loop); 8555 8556 bind(done); 8557 } 8558 8559 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 8560 switch(type) { 8561 case T_BYTE: 8562 case T_BOOLEAN: 8563 evmovdqub(dst, kmask, src, merge, vector_len); 8564 break; 8565 case T_CHAR: 8566 case T_SHORT: 8567 evmovdquw(dst, kmask, src, merge, vector_len); 8568 break; 8569 case T_INT: 8570 case T_FLOAT: 8571 evmovdqul(dst, kmask, src, merge, vector_len); 8572 break; 8573 case T_LONG: 8574 case T_DOUBLE: 8575 evmovdquq(dst, kmask, src, merge, vector_len); 8576 break; 8577 default: 8578 fatal("Unexpected type argument %s", type2name(type)); 8579 break; 8580 } 8581 } 8582 8583 8584 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 8585 switch(type) { 8586 case T_BYTE: 8587 case T_BOOLEAN: 8588 evmovdqub(dst, kmask, src, merge, vector_len); 8589 break; 8590 case T_CHAR: 8591 case T_SHORT: 8592 evmovdquw(dst, kmask, src, merge, vector_len); 8593 break; 8594 case T_INT: 8595 case T_FLOAT: 8596 evmovdqul(dst, kmask, src, merge, vector_len); 8597 break; 8598 case T_LONG: 8599 case T_DOUBLE: 8600 evmovdquq(dst, kmask, src, merge, vector_len); 8601 break; 8602 default: 8603 fatal("Unexpected type argument %s", type2name(type)); 8604 break; 8605 } 8606 } 8607 8608 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 8609 switch(type) { 8610 case T_BYTE: 8611 case T_BOOLEAN: 8612 evmovdqub(dst, kmask, src, merge, vector_len); 8613 break; 8614 case T_CHAR: 8615 case T_SHORT: 8616 evmovdquw(dst, kmask, src, merge, vector_len); 8617 break; 8618 case T_INT: 8619 case T_FLOAT: 8620 evmovdqul(dst, kmask, src, merge, vector_len); 8621 break; 8622 case T_LONG: 8623 case T_DOUBLE: 8624 evmovdquq(dst, kmask, src, merge, vector_len); 8625 break; 8626 default: 8627 fatal("Unexpected type argument %s", type2name(type)); 8628 break; 8629 } 8630 } 8631 8632 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 8633 switch(masklen) { 8634 case 2: 8635 knotbl(dst, src); 8636 movl(rtmp, 3); 8637 kmovbl(ktmp, rtmp); 8638 kandbl(dst, ktmp, dst); 8639 break; 8640 case 4: 8641 knotbl(dst, src); 8642 movl(rtmp, 15); 8643 kmovbl(ktmp, rtmp); 8644 kandbl(dst, ktmp, dst); 8645 break; 8646 case 8: 8647 knotbl(dst, src); 8648 break; 8649 case 16: 8650 knotwl(dst, src); 8651 break; 8652 case 32: 8653 knotdl(dst, src); 8654 break; 8655 case 64: 8656 knotql(dst, src); 8657 break; 8658 default: 8659 fatal("Unexpected vector length %d", masklen); 8660 break; 8661 } 8662 } 8663 8664 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8665 switch(type) { 8666 case T_BOOLEAN: 8667 case T_BYTE: 8668 kandbl(dst, src1, src2); 8669 break; 8670 case T_CHAR: 8671 case T_SHORT: 8672 kandwl(dst, src1, src2); 8673 break; 8674 case T_INT: 8675 case T_FLOAT: 8676 kanddl(dst, src1, src2); 8677 break; 8678 case T_LONG: 8679 case T_DOUBLE: 8680 kandql(dst, src1, src2); 8681 break; 8682 default: 8683 fatal("Unexpected type argument %s", type2name(type)); 8684 break; 8685 } 8686 } 8687 8688 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8689 switch(type) { 8690 case T_BOOLEAN: 8691 case T_BYTE: 8692 korbl(dst, src1, src2); 8693 break; 8694 case T_CHAR: 8695 case T_SHORT: 8696 korwl(dst, src1, src2); 8697 break; 8698 case T_INT: 8699 case T_FLOAT: 8700 kordl(dst, src1, src2); 8701 break; 8702 case T_LONG: 8703 case T_DOUBLE: 8704 korql(dst, src1, src2); 8705 break; 8706 default: 8707 fatal("Unexpected type argument %s", type2name(type)); 8708 break; 8709 } 8710 } 8711 8712 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8713 switch(type) { 8714 case T_BOOLEAN: 8715 case T_BYTE: 8716 kxorbl(dst, src1, src2); 8717 break; 8718 case T_CHAR: 8719 case T_SHORT: 8720 kxorwl(dst, src1, src2); 8721 break; 8722 case T_INT: 8723 case T_FLOAT: 8724 kxordl(dst, src1, src2); 8725 break; 8726 case T_LONG: 8727 case T_DOUBLE: 8728 kxorql(dst, src1, src2); 8729 break; 8730 default: 8731 fatal("Unexpected type argument %s", type2name(type)); 8732 break; 8733 } 8734 } 8735 8736 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8737 switch(type) { 8738 case T_BOOLEAN: 8739 case T_BYTE: 8740 evpermb(dst, mask, nds, src, merge, vector_len); break; 8741 case T_CHAR: 8742 case T_SHORT: 8743 evpermw(dst, mask, nds, src, merge, vector_len); break; 8744 case T_INT: 8745 case T_FLOAT: 8746 evpermd(dst, mask, nds, src, merge, vector_len); break; 8747 case T_LONG: 8748 case T_DOUBLE: 8749 evpermq(dst, mask, nds, src, merge, vector_len); break; 8750 default: 8751 fatal("Unexpected type argument %s", type2name(type)); break; 8752 } 8753 } 8754 8755 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8756 switch(type) { 8757 case T_BOOLEAN: 8758 case T_BYTE: 8759 evpermb(dst, mask, nds, src, merge, vector_len); break; 8760 case T_CHAR: 8761 case T_SHORT: 8762 evpermw(dst, mask, nds, src, merge, vector_len); break; 8763 case T_INT: 8764 case T_FLOAT: 8765 evpermd(dst, mask, nds, src, merge, vector_len); break; 8766 case T_LONG: 8767 case T_DOUBLE: 8768 evpermq(dst, mask, nds, src, merge, vector_len); break; 8769 default: 8770 fatal("Unexpected type argument %s", type2name(type)); break; 8771 } 8772 } 8773 8774 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8775 switch(type) { 8776 case T_BYTE: 8777 evpminub(dst, mask, nds, src, merge, vector_len); break; 8778 case T_SHORT: 8779 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8780 case T_INT: 8781 evpminud(dst, mask, nds, src, merge, vector_len); break; 8782 case T_LONG: 8783 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8784 default: 8785 fatal("Unexpected type argument %s", type2name(type)); break; 8786 } 8787 } 8788 8789 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8790 switch(type) { 8791 case T_BYTE: 8792 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8793 case T_SHORT: 8794 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8795 case T_INT: 8796 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8797 case T_LONG: 8798 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8799 default: 8800 fatal("Unexpected type argument %s", type2name(type)); break; 8801 } 8802 } 8803 8804 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8805 switch(type) { 8806 case T_BYTE: 8807 evpminub(dst, mask, nds, src, merge, vector_len); break; 8808 case T_SHORT: 8809 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8810 case T_INT: 8811 evpminud(dst, mask, nds, src, merge, vector_len); break; 8812 case T_LONG: 8813 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8814 default: 8815 fatal("Unexpected type argument %s", type2name(type)); break; 8816 } 8817 } 8818 8819 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8820 switch(type) { 8821 case T_BYTE: 8822 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8823 case T_SHORT: 8824 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8825 case T_INT: 8826 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8827 case T_LONG: 8828 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8829 default: 8830 fatal("Unexpected type argument %s", type2name(type)); break; 8831 } 8832 } 8833 8834 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8835 switch(type) { 8836 case T_BYTE: 8837 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8838 case T_SHORT: 8839 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8840 case T_INT: 8841 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8842 case T_LONG: 8843 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8844 case T_FLOAT: 8845 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8846 case T_DOUBLE: 8847 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8848 default: 8849 fatal("Unexpected type argument %s", type2name(type)); break; 8850 } 8851 } 8852 8853 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8854 switch(type) { 8855 case T_BYTE: 8856 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8857 case T_SHORT: 8858 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8859 case T_INT: 8860 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8861 case T_LONG: 8862 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8863 case T_FLOAT: 8864 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8865 case T_DOUBLE: 8866 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8867 default: 8868 fatal("Unexpected type argument %s", type2name(type)); break; 8869 } 8870 } 8871 8872 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8873 switch(type) { 8874 case T_BYTE: 8875 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8876 case T_SHORT: 8877 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8878 case T_INT: 8879 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8880 case T_LONG: 8881 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8882 case T_FLOAT: 8883 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8884 case T_DOUBLE: 8885 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8886 default: 8887 fatal("Unexpected type argument %s", type2name(type)); break; 8888 } 8889 } 8890 8891 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8892 switch(type) { 8893 case T_BYTE: 8894 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8895 case T_SHORT: 8896 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8897 case T_INT: 8898 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8899 case T_LONG: 8900 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8901 case T_FLOAT: 8902 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8903 case T_DOUBLE: 8904 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8905 default: 8906 fatal("Unexpected type argument %s", type2name(type)); break; 8907 } 8908 } 8909 8910 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8911 switch(type) { 8912 case T_INT: 8913 evpxord(dst, mask, nds, src, merge, vector_len); break; 8914 case T_LONG: 8915 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8916 default: 8917 fatal("Unexpected type argument %s", type2name(type)); break; 8918 } 8919 } 8920 8921 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8922 switch(type) { 8923 case T_INT: 8924 evpxord(dst, mask, nds, src, merge, vector_len); break; 8925 case T_LONG: 8926 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8927 default: 8928 fatal("Unexpected type argument %s", type2name(type)); break; 8929 } 8930 } 8931 8932 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8933 switch(type) { 8934 case T_INT: 8935 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8936 case T_LONG: 8937 evporq(dst, mask, nds, src, merge, vector_len); break; 8938 default: 8939 fatal("Unexpected type argument %s", type2name(type)); break; 8940 } 8941 } 8942 8943 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8944 switch(type) { 8945 case T_INT: 8946 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8947 case T_LONG: 8948 evporq(dst, mask, nds, src, merge, vector_len); break; 8949 default: 8950 fatal("Unexpected type argument %s", type2name(type)); break; 8951 } 8952 } 8953 8954 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8955 switch(type) { 8956 case T_INT: 8957 evpandd(dst, mask, nds, src, merge, vector_len); break; 8958 case T_LONG: 8959 evpandq(dst, mask, nds, src, merge, vector_len); break; 8960 default: 8961 fatal("Unexpected type argument %s", type2name(type)); break; 8962 } 8963 } 8964 8965 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8966 switch(type) { 8967 case T_INT: 8968 evpandd(dst, mask, nds, src, merge, vector_len); break; 8969 case T_LONG: 8970 evpandq(dst, mask, nds, src, merge, vector_len); break; 8971 default: 8972 fatal("Unexpected type argument %s", type2name(type)); break; 8973 } 8974 } 8975 8976 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 8977 switch(masklen) { 8978 case 8: 8979 kortestbl(src1, src2); 8980 break; 8981 case 16: 8982 kortestwl(src1, src2); 8983 break; 8984 case 32: 8985 kortestdl(src1, src2); 8986 break; 8987 case 64: 8988 kortestql(src1, src2); 8989 break; 8990 default: 8991 fatal("Unexpected mask length %d", masklen); 8992 break; 8993 } 8994 } 8995 8996 8997 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 8998 switch(masklen) { 8999 case 8: 9000 ktestbl(src1, src2); 9001 break; 9002 case 16: 9003 ktestwl(src1, src2); 9004 break; 9005 case 32: 9006 ktestdl(src1, src2); 9007 break; 9008 case 64: 9009 ktestql(src1, src2); 9010 break; 9011 default: 9012 fatal("Unexpected mask length %d", masklen); 9013 break; 9014 } 9015 } 9016 9017 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9018 switch(type) { 9019 case T_INT: 9020 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9021 case T_LONG: 9022 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9023 default: 9024 fatal("Unexpected type argument %s", type2name(type)); break; 9025 break; 9026 } 9027 } 9028 9029 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9030 switch(type) { 9031 case T_INT: 9032 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9033 case T_LONG: 9034 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9035 default: 9036 fatal("Unexpected type argument %s", type2name(type)); break; 9037 } 9038 } 9039 9040 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9041 switch(type) { 9042 case T_INT: 9043 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9044 case T_LONG: 9045 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9046 default: 9047 fatal("Unexpected type argument %s", type2name(type)); break; 9048 } 9049 } 9050 9051 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9052 switch(type) { 9053 case T_INT: 9054 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9055 case T_LONG: 9056 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9057 default: 9058 fatal("Unexpected type argument %s", type2name(type)); break; 9059 } 9060 } 9061 9062 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9063 assert(rscratch != noreg || always_reachable(src), "missing"); 9064 9065 if (reachable(src)) { 9066 evpandq(dst, nds, as_Address(src), vector_len); 9067 } else { 9068 lea(rscratch, src); 9069 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9070 } 9071 } 9072 9073 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 9074 assert(rscratch != noreg || always_reachable(src), "missing"); 9075 9076 if (reachable(src)) { 9077 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 9078 } else { 9079 lea(rscratch, src); 9080 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 9081 } 9082 } 9083 9084 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9085 assert(rscratch != noreg || always_reachable(src), "missing"); 9086 9087 if (reachable(src)) { 9088 evporq(dst, nds, as_Address(src), vector_len); 9089 } else { 9090 lea(rscratch, src); 9091 evporq(dst, nds, Address(rscratch, 0), vector_len); 9092 } 9093 } 9094 9095 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9096 assert(rscratch != noreg || always_reachable(src), "missing"); 9097 9098 if (reachable(src)) { 9099 vpshufb(dst, nds, as_Address(src), vector_len); 9100 } else { 9101 lea(rscratch, src); 9102 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 9103 } 9104 } 9105 9106 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9107 assert(rscratch != noreg || always_reachable(src), "missing"); 9108 9109 if (reachable(src)) { 9110 Assembler::vpor(dst, nds, as_Address(src), vector_len); 9111 } else { 9112 lea(rscratch, src); 9113 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 9114 } 9115 } 9116 9117 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9118 assert(rscratch != noreg || always_reachable(src3), "missing"); 9119 9120 if (reachable(src3)) { 9121 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9122 } else { 9123 lea(rscratch, src3); 9124 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9125 } 9126 } 9127 9128 #if COMPILER2_OR_JVMCI 9129 9130 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9131 Register length, Register temp, int vec_enc) { 9132 // Computing mask for predicated vector store. 9133 movptr(temp, -1); 9134 bzhiq(temp, temp, length); 9135 kmov(mask, temp); 9136 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9137 } 9138 9139 // Set memory operation for length "less than" 64 bytes. 9140 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9141 XMMRegister xmm, KRegister mask, Register length, 9142 Register temp, bool use64byteVector) { 9143 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9144 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9145 if (!use64byteVector) { 9146 fill32(dst, disp, xmm); 9147 subptr(length, 32 >> shift); 9148 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9149 } else { 9150 assert(MaxVectorSize == 64, "vector length != 64"); 9151 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9152 } 9153 } 9154 9155 9156 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9157 XMMRegister xmm, KRegister mask, Register length, 9158 Register temp) { 9159 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9160 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9161 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9162 } 9163 9164 9165 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9166 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9167 vmovdqu(dst, xmm); 9168 } 9169 9170 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9171 fill32(Address(dst, disp), xmm); 9172 } 9173 9174 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9175 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9176 if (!use64byteVector) { 9177 fill32(dst, xmm); 9178 fill32(dst.plus_disp(32), xmm); 9179 } else { 9180 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9181 } 9182 } 9183 9184 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9185 fill64(Address(dst, disp), xmm, use64byteVector); 9186 } 9187 9188 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9189 Register count, Register rtmp, XMMRegister xtmp) { 9190 Label L_exit; 9191 Label L_fill_start; 9192 Label L_fill_64_bytes; 9193 Label L_fill_96_bytes; 9194 Label L_fill_128_bytes; 9195 Label L_fill_128_bytes_loop; 9196 Label L_fill_128_loop_header; 9197 Label L_fill_128_bytes_loop_header; 9198 Label L_fill_128_bytes_loop_pre_header; 9199 Label L_fill_zmm_sequence; 9200 9201 int shift = -1; 9202 int avx3threshold = VM_Version::avx3_threshold(); 9203 switch(type) { 9204 case T_BYTE: shift = 0; 9205 break; 9206 case T_SHORT: shift = 1; 9207 break; 9208 case T_INT: shift = 2; 9209 break; 9210 /* Uncomment when LONG fill stubs are supported. 9211 case T_LONG: shift = 3; 9212 break; 9213 */ 9214 default: 9215 fatal("Unhandled type: %s\n", type2name(type)); 9216 } 9217 9218 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9219 9220 if (MaxVectorSize == 64) { 9221 cmpq(count, avx3threshold >> shift); 9222 jcc(Assembler::greater, L_fill_zmm_sequence); 9223 } 9224 9225 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9226 9227 bind(L_fill_start); 9228 9229 cmpq(count, 32 >> shift); 9230 jccb(Assembler::greater, L_fill_64_bytes); 9231 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9232 jmp(L_exit); 9233 9234 bind(L_fill_64_bytes); 9235 cmpq(count, 64 >> shift); 9236 jccb(Assembler::greater, L_fill_96_bytes); 9237 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9238 jmp(L_exit); 9239 9240 bind(L_fill_96_bytes); 9241 cmpq(count, 96 >> shift); 9242 jccb(Assembler::greater, L_fill_128_bytes); 9243 fill64(to, 0, xtmp); 9244 subq(count, 64 >> shift); 9245 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9246 jmp(L_exit); 9247 9248 bind(L_fill_128_bytes); 9249 cmpq(count, 128 >> shift); 9250 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9251 fill64(to, 0, xtmp); 9252 fill32(to, 64, xtmp); 9253 subq(count, 96 >> shift); 9254 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9255 jmp(L_exit); 9256 9257 bind(L_fill_128_bytes_loop_pre_header); 9258 { 9259 mov(rtmp, to); 9260 andq(rtmp, 31); 9261 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9262 negq(rtmp); 9263 addq(rtmp, 32); 9264 mov64(r8, -1L); 9265 bzhiq(r8, r8, rtmp); 9266 kmovql(k2, r8); 9267 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9268 addq(to, rtmp); 9269 shrq(rtmp, shift); 9270 subq(count, rtmp); 9271 } 9272 9273 cmpq(count, 128 >> shift); 9274 jcc(Assembler::less, L_fill_start); 9275 9276 bind(L_fill_128_bytes_loop_header); 9277 subq(count, 128 >> shift); 9278 9279 align32(); 9280 bind(L_fill_128_bytes_loop); 9281 fill64(to, 0, xtmp); 9282 fill64(to, 64, xtmp); 9283 addq(to, 128); 9284 subq(count, 128 >> shift); 9285 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9286 9287 addq(count, 128 >> shift); 9288 jcc(Assembler::zero, L_exit); 9289 jmp(L_fill_start); 9290 } 9291 9292 if (MaxVectorSize == 64) { 9293 // Sequence using 64 byte ZMM register. 9294 Label L_fill_128_bytes_zmm; 9295 Label L_fill_192_bytes_zmm; 9296 Label L_fill_192_bytes_loop_zmm; 9297 Label L_fill_192_bytes_loop_header_zmm; 9298 Label L_fill_192_bytes_loop_pre_header_zmm; 9299 Label L_fill_start_zmm_sequence; 9300 9301 bind(L_fill_zmm_sequence); 9302 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9303 9304 bind(L_fill_start_zmm_sequence); 9305 cmpq(count, 64 >> shift); 9306 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9307 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9308 jmp(L_exit); 9309 9310 bind(L_fill_128_bytes_zmm); 9311 cmpq(count, 128 >> shift); 9312 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9313 fill64(to, 0, xtmp, true); 9314 subq(count, 64 >> shift); 9315 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9316 jmp(L_exit); 9317 9318 bind(L_fill_192_bytes_zmm); 9319 cmpq(count, 192 >> shift); 9320 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9321 fill64(to, 0, xtmp, true); 9322 fill64(to, 64, xtmp, true); 9323 subq(count, 128 >> shift); 9324 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9325 jmp(L_exit); 9326 9327 bind(L_fill_192_bytes_loop_pre_header_zmm); 9328 { 9329 movq(rtmp, to); 9330 andq(rtmp, 63); 9331 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9332 negq(rtmp); 9333 addq(rtmp, 64); 9334 mov64(r8, -1L); 9335 bzhiq(r8, r8, rtmp); 9336 kmovql(k2, r8); 9337 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9338 addq(to, rtmp); 9339 shrq(rtmp, shift); 9340 subq(count, rtmp); 9341 } 9342 9343 cmpq(count, 192 >> shift); 9344 jcc(Assembler::less, L_fill_start_zmm_sequence); 9345 9346 bind(L_fill_192_bytes_loop_header_zmm); 9347 subq(count, 192 >> shift); 9348 9349 align32(); 9350 bind(L_fill_192_bytes_loop_zmm); 9351 fill64(to, 0, xtmp, true); 9352 fill64(to, 64, xtmp, true); 9353 fill64(to, 128, xtmp, true); 9354 addq(to, 192); 9355 subq(count, 192 >> shift); 9356 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 9357 9358 addq(count, 192 >> shift); 9359 jcc(Assembler::zero, L_exit); 9360 jmp(L_fill_start_zmm_sequence); 9361 } 9362 bind(L_exit); 9363 } 9364 #endif //COMPILER2_OR_JVMCI 9365 9366 9367 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 9368 Label done; 9369 cvttss2sil(dst, src); 9370 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9371 cmpl(dst, 0x80000000); // float_sign_flip 9372 jccb(Assembler::notEqual, done); 9373 subptr(rsp, 8); 9374 movflt(Address(rsp, 0), src); 9375 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 9376 pop(dst); 9377 bind(done); 9378 } 9379 9380 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 9381 Label done; 9382 cvttsd2sil(dst, src); 9383 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9384 cmpl(dst, 0x80000000); // float_sign_flip 9385 jccb(Assembler::notEqual, done); 9386 subptr(rsp, 8); 9387 movdbl(Address(rsp, 0), src); 9388 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 9389 pop(dst); 9390 bind(done); 9391 } 9392 9393 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 9394 Label done; 9395 cvttss2siq(dst, src); 9396 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9397 jccb(Assembler::notEqual, done); 9398 subptr(rsp, 8); 9399 movflt(Address(rsp, 0), src); 9400 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 9401 pop(dst); 9402 bind(done); 9403 } 9404 9405 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9406 // Following code is line by line assembly translation rounding algorithm. 9407 // Please refer to java.lang.Math.round(float) algorithm for details. 9408 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 9409 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 9410 const int32_t FloatConsts_EXP_BIAS = 127; 9411 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 9412 const int32_t MINUS_32 = 0xFFFFFFE0; 9413 Label L_special_case, L_block1, L_exit; 9414 movl(rtmp, FloatConsts_EXP_BIT_MASK); 9415 movdl(dst, src); 9416 andl(dst, rtmp); 9417 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 9418 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 9419 subl(rtmp, dst); 9420 movl(rcx, rtmp); 9421 movl(dst, MINUS_32); 9422 testl(rtmp, dst); 9423 jccb(Assembler::notEqual, L_special_case); 9424 movdl(dst, src); 9425 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 9426 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 9427 movdl(rtmp, src); 9428 testl(rtmp, rtmp); 9429 jccb(Assembler::greaterEqual, L_block1); 9430 negl(dst); 9431 bind(L_block1); 9432 sarl(dst); 9433 addl(dst, 0x1); 9434 sarl(dst, 0x1); 9435 jmp(L_exit); 9436 bind(L_special_case); 9437 convert_f2i(dst, src); 9438 bind(L_exit); 9439 } 9440 9441 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9442 // Following code is line by line assembly translation rounding algorithm. 9443 // Please refer to java.lang.Math.round(double) algorithm for details. 9444 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 9445 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 9446 const int64_t DoubleConsts_EXP_BIAS = 1023; 9447 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 9448 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 9449 Label L_special_case, L_block1, L_exit; 9450 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 9451 movq(dst, src); 9452 andq(dst, rtmp); 9453 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 9454 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 9455 subq(rtmp, dst); 9456 movq(rcx, rtmp); 9457 mov64(dst, MINUS_64); 9458 testq(rtmp, dst); 9459 jccb(Assembler::notEqual, L_special_case); 9460 movq(dst, src); 9461 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 9462 andq(dst, rtmp); 9463 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 9464 orq(dst, rtmp); 9465 movq(rtmp, src); 9466 testq(rtmp, rtmp); 9467 jccb(Assembler::greaterEqual, L_block1); 9468 negq(dst); 9469 bind(L_block1); 9470 sarq(dst); 9471 addq(dst, 0x1); 9472 sarq(dst, 0x1); 9473 jmp(L_exit); 9474 bind(L_special_case); 9475 convert_d2l(dst, src); 9476 bind(L_exit); 9477 } 9478 9479 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 9480 Label done; 9481 cvttsd2siq(dst, src); 9482 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9483 jccb(Assembler::notEqual, done); 9484 subptr(rsp, 8); 9485 movdbl(Address(rsp, 0), src); 9486 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 9487 pop(dst); 9488 bind(done); 9489 } 9490 9491 void MacroAssembler::cache_wb(Address line) 9492 { 9493 // 64 bit cpus always support clflush 9494 assert(VM_Version::supports_clflush(), "clflush should be available"); 9495 bool optimized = VM_Version::supports_clflushopt(); 9496 bool no_evict = VM_Version::supports_clwb(); 9497 9498 // prefer clwb (writeback without evict) otherwise 9499 // prefer clflushopt (potentially parallel writeback with evict) 9500 // otherwise fallback on clflush (serial writeback with evict) 9501 9502 if (optimized) { 9503 if (no_evict) { 9504 clwb(line); 9505 } else { 9506 clflushopt(line); 9507 } 9508 } else { 9509 // no need for fence when using CLFLUSH 9510 clflush(line); 9511 } 9512 } 9513 9514 void MacroAssembler::cache_wbsync(bool is_pre) 9515 { 9516 assert(VM_Version::supports_clflush(), "clflush should be available"); 9517 bool optimized = VM_Version::supports_clflushopt(); 9518 bool no_evict = VM_Version::supports_clwb(); 9519 9520 // pick the correct implementation 9521 9522 if (!is_pre && (optimized || no_evict)) { 9523 // need an sfence for post flush when using clflushopt or clwb 9524 // otherwise no no need for any synchroniaztion 9525 9526 sfence(); 9527 } 9528 } 9529 9530 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 9531 switch (cond) { 9532 // Note some conditions are synonyms for others 9533 case Assembler::zero: return Assembler::notZero; 9534 case Assembler::notZero: return Assembler::zero; 9535 case Assembler::less: return Assembler::greaterEqual; 9536 case Assembler::lessEqual: return Assembler::greater; 9537 case Assembler::greater: return Assembler::lessEqual; 9538 case Assembler::greaterEqual: return Assembler::less; 9539 case Assembler::below: return Assembler::aboveEqual; 9540 case Assembler::belowEqual: return Assembler::above; 9541 case Assembler::above: return Assembler::belowEqual; 9542 case Assembler::aboveEqual: return Assembler::below; 9543 case Assembler::overflow: return Assembler::noOverflow; 9544 case Assembler::noOverflow: return Assembler::overflow; 9545 case Assembler::negative: return Assembler::positive; 9546 case Assembler::positive: return Assembler::negative; 9547 case Assembler::parity: return Assembler::noParity; 9548 case Assembler::noParity: return Assembler::parity; 9549 } 9550 ShouldNotReachHere(); return Assembler::overflow; 9551 } 9552 9553 // This is simply a call to Thread::current() 9554 void MacroAssembler::get_thread_slow(Register thread) { 9555 if (thread != rax) { 9556 push(rax); 9557 } 9558 push(rdi); 9559 push(rsi); 9560 push(rdx); 9561 push(rcx); 9562 push(r8); 9563 push(r9); 9564 push(r10); 9565 push(r11); 9566 9567 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 9568 9569 pop(r11); 9570 pop(r10); 9571 pop(r9); 9572 pop(r8); 9573 pop(rcx); 9574 pop(rdx); 9575 pop(rsi); 9576 pop(rdi); 9577 if (thread != rax) { 9578 mov(thread, rax); 9579 pop(rax); 9580 } 9581 } 9582 9583 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 9584 Label L_stack_ok; 9585 if (bias == 0) { 9586 testptr(sp, 2 * wordSize - 1); 9587 } else { 9588 // lea(tmp, Address(rsp, bias); 9589 mov(tmp, sp); 9590 addptr(tmp, bias); 9591 testptr(tmp, 2 * wordSize - 1); 9592 } 9593 jcc(Assembler::equal, L_stack_ok); 9594 block_comment(msg); 9595 stop(msg); 9596 bind(L_stack_ok); 9597 } 9598 9599 // Implements lightweight-locking. 9600 // 9601 // obj: the object to be locked 9602 // reg_rax: rax 9603 // thread: the thread which attempts to lock obj 9604 // tmp: a temporary register 9605 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) { 9606 Register thread = r15_thread; 9607 9608 assert(reg_rax == rax, ""); 9609 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 9610 9611 Label push; 9612 const Register top = tmp; 9613 9614 // Preload the markWord. It is important that this is the first 9615 // instruction emitted as it is part of C1's null check semantics. 9616 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9617 9618 if (UseObjectMonitorTable) { 9619 // Clear cache in case fast locking succeeds or we need to take the slow-path. 9620 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 9621 } 9622 9623 if (DiagnoseSyncOnValueBasedClasses != 0) { 9624 load_klass(tmp, obj, rscratch1); 9625 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class); 9626 jcc(Assembler::notZero, slow); 9627 } 9628 9629 // Load top. 9630 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9631 9632 // Check if the lock-stack is full. 9633 cmpl(top, LockStack::end_offset()); 9634 jcc(Assembler::greaterEqual, slow); 9635 9636 // Check for recursion. 9637 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9638 jcc(Assembler::equal, push); 9639 9640 // Check header for monitor (0b10). 9641 testptr(reg_rax, markWord::monitor_value); 9642 jcc(Assembler::notZero, slow); 9643 9644 // Try to lock. Transition lock bits 0b01 => 0b00 9645 movptr(tmp, reg_rax); 9646 andptr(tmp, ~(int32_t)markWord::unlocked_value); 9647 orptr(reg_rax, markWord::unlocked_value); 9648 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9649 jcc(Assembler::notEqual, slow); 9650 9651 // Restore top, CAS clobbers register. 9652 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9653 9654 bind(push); 9655 // After successful lock, push object on lock-stack. 9656 movptr(Address(thread, top), obj); 9657 incrementl(top, oopSize); 9658 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 9659 } 9660 9661 // Implements lightweight-unlocking. 9662 // 9663 // obj: the object to be unlocked 9664 // reg_rax: rax 9665 // thread: the thread 9666 // tmp: a temporary register 9667 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) { 9668 Register thread = r15_thread; 9669 9670 assert(reg_rax == rax, ""); 9671 assert_different_registers(obj, reg_rax, thread, tmp); 9672 9673 Label unlocked, push_and_slow; 9674 const Register top = tmp; 9675 9676 // Check if obj is top of lock-stack. 9677 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9678 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9679 jcc(Assembler::notEqual, slow); 9680 9681 // Pop lock-stack. 9682 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 9683 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9684 9685 // Check if recursive. 9686 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 9687 jcc(Assembler::equal, unlocked); 9688 9689 // Not recursive. Check header for monitor (0b10). 9690 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9691 testptr(reg_rax, markWord::monitor_value); 9692 jcc(Assembler::notZero, push_and_slow); 9693 9694 #ifdef ASSERT 9695 // Check header not unlocked (0b01). 9696 Label not_unlocked; 9697 testptr(reg_rax, markWord::unlocked_value); 9698 jcc(Assembler::zero, not_unlocked); 9699 stop("lightweight_unlock already unlocked"); 9700 bind(not_unlocked); 9701 #endif 9702 9703 // Try to unlock. Transition lock bits 0b00 => 0b01 9704 movptr(tmp, reg_rax); 9705 orptr(tmp, markWord::unlocked_value); 9706 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9707 jcc(Assembler::equal, unlocked); 9708 9709 bind(push_and_slow); 9710 // Restore lock-stack and handle the unlock in runtime. 9711 #ifdef ASSERT 9712 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9713 movptr(Address(thread, top), obj); 9714 #endif 9715 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9716 jmp(slow); 9717 9718 bind(unlocked); 9719 } 9720 9721 // Saves legacy GPRs state on stack. 9722 void MacroAssembler::save_legacy_gprs() { 9723 subq(rsp, 16 * wordSize); 9724 movq(Address(rsp, 15 * wordSize), rax); 9725 movq(Address(rsp, 14 * wordSize), rcx); 9726 movq(Address(rsp, 13 * wordSize), rdx); 9727 movq(Address(rsp, 12 * wordSize), rbx); 9728 movq(Address(rsp, 10 * wordSize), rbp); 9729 movq(Address(rsp, 9 * wordSize), rsi); 9730 movq(Address(rsp, 8 * wordSize), rdi); 9731 movq(Address(rsp, 7 * wordSize), r8); 9732 movq(Address(rsp, 6 * wordSize), r9); 9733 movq(Address(rsp, 5 * wordSize), r10); 9734 movq(Address(rsp, 4 * wordSize), r11); 9735 movq(Address(rsp, 3 * wordSize), r12); 9736 movq(Address(rsp, 2 * wordSize), r13); 9737 movq(Address(rsp, wordSize), r14); 9738 movq(Address(rsp, 0), r15); 9739 } 9740 9741 // Resotres back legacy GPRs state from stack. 9742 void MacroAssembler::restore_legacy_gprs() { 9743 movq(r15, Address(rsp, 0)); 9744 movq(r14, Address(rsp, wordSize)); 9745 movq(r13, Address(rsp, 2 * wordSize)); 9746 movq(r12, Address(rsp, 3 * wordSize)); 9747 movq(r11, Address(rsp, 4 * wordSize)); 9748 movq(r10, Address(rsp, 5 * wordSize)); 9749 movq(r9, Address(rsp, 6 * wordSize)); 9750 movq(r8, Address(rsp, 7 * wordSize)); 9751 movq(rdi, Address(rsp, 8 * wordSize)); 9752 movq(rsi, Address(rsp, 9 * wordSize)); 9753 movq(rbp, Address(rsp, 10 * wordSize)); 9754 movq(rbx, Address(rsp, 12 * wordSize)); 9755 movq(rdx, Address(rsp, 13 * wordSize)); 9756 movq(rcx, Address(rsp, 14 * wordSize)); 9757 movq(rax, Address(rsp, 15 * wordSize)); 9758 addq(rsp, 16 * wordSize); 9759 } 9760 9761 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 9762 if (VM_Version::supports_apx_f()) { 9763 esetzucc(comparison, dst); 9764 } else { 9765 setb(comparison, dst); 9766 movzbl(dst, dst); 9767 } 9768 }