1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/assembler.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "code/aotCodeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "crc32c.h" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "gc/shared/collectedHeap.inline.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "interpreter/bytecodeHistogram.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "interpreter/interpreterRuntime.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/macros.hpp" 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #define STOP(error) stop(error) 63 #else 64 #define BLOCK_COMMENT(str) block_comment(str) 65 #define STOP(error) block_comment(error); stop(error) 66 #endif 67 68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 69 70 #ifdef ASSERT 71 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 72 #endif 73 74 static const Assembler::Condition reverse[] = { 75 Assembler::noOverflow /* overflow = 0x0 */ , 76 Assembler::overflow /* noOverflow = 0x1 */ , 77 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 78 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 79 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 80 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 81 Assembler::above /* belowEqual = 0x6 */ , 82 Assembler::belowEqual /* above = 0x7 */ , 83 Assembler::positive /* negative = 0x8 */ , 84 Assembler::negative /* positive = 0x9 */ , 85 Assembler::noParity /* parity = 0xa */ , 86 Assembler::parity /* noParity = 0xb */ , 87 Assembler::greaterEqual /* less = 0xc */ , 88 Assembler::less /* greaterEqual = 0xd */ , 89 Assembler::greater /* lessEqual = 0xe */ , 90 Assembler::lessEqual /* greater = 0xf, */ 91 92 }; 93 94 95 // Implementation of MacroAssembler 96 97 Address MacroAssembler::as_Address(AddressLiteral adr) { 98 // amd64 always does this as a pc-rel 99 // we can be absolute or disp based on the instruction type 100 // jmp/call are displacements others are absolute 101 assert(!adr.is_lval(), "must be rval"); 102 assert(reachable(adr), "must be"); 103 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 104 105 } 106 107 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 108 AddressLiteral base = adr.base(); 109 lea(rscratch, base); 110 Address index = adr.index(); 111 assert(index._disp == 0, "must not have disp"); // maybe it can? 112 Address array(rscratch, index._index, index._scale, index._disp); 113 return array; 114 } 115 116 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 117 Label L, E; 118 119 #ifdef _WIN64 120 // Windows always allocates space for it's register args 121 assert(num_args <= 4, "only register arguments supported"); 122 subq(rsp, frame::arg_reg_save_area_bytes); 123 #endif 124 125 // Align stack if necessary 126 testl(rsp, 15); 127 jcc(Assembler::zero, L); 128 129 subq(rsp, 8); 130 call(RuntimeAddress(entry_point)); 131 addq(rsp, 8); 132 jmp(E); 133 134 bind(L); 135 call(RuntimeAddress(entry_point)); 136 137 bind(E); 138 139 #ifdef _WIN64 140 // restore stack pointer 141 addq(rsp, frame::arg_reg_save_area_bytes); 142 #endif 143 } 144 145 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 146 assert(!src2.is_lval(), "should use cmpptr"); 147 assert(rscratch != noreg || always_reachable(src2), "missing"); 148 149 if (reachable(src2)) { 150 cmpq(src1, as_Address(src2)); 151 } else { 152 lea(rscratch, src2); 153 Assembler::cmpq(src1, Address(rscratch, 0)); 154 } 155 } 156 157 int MacroAssembler::corrected_idivq(Register reg) { 158 // Full implementation of Java ldiv and lrem; checks for special 159 // case as described in JVM spec., p.243 & p.271. The function 160 // returns the (pc) offset of the idivl instruction - may be needed 161 // for implicit exceptions. 162 // 163 // normal case special case 164 // 165 // input : rax: dividend min_long 166 // reg: divisor (may not be eax/edx) -1 167 // 168 // output: rax: quotient (= rax idiv reg) min_long 169 // rdx: remainder (= rax irem reg) 0 170 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 171 static const int64_t min_long = 0x8000000000000000; 172 Label normal_case, special_case; 173 174 // check for special case 175 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 176 jcc(Assembler::notEqual, normal_case); 177 xorl(rdx, rdx); // prepare rdx for possible special case (where 178 // remainder = 0) 179 cmpq(reg, -1); 180 jcc(Assembler::equal, special_case); 181 182 // handle normal case 183 bind(normal_case); 184 cdqq(); 185 int idivq_offset = offset(); 186 idivq(reg); 187 188 // normal and special case exit 189 bind(special_case); 190 191 return idivq_offset; 192 } 193 194 void MacroAssembler::decrementq(Register reg, int value) { 195 if (value == min_jint) { subq(reg, value); return; } 196 if (value < 0) { incrementq(reg, -value); return; } 197 if (value == 0) { ; return; } 198 if (value == 1 && UseIncDec) { decq(reg) ; return; } 199 /* else */ { subq(reg, value) ; return; } 200 } 201 202 void MacroAssembler::decrementq(Address dst, int value) { 203 if (value == min_jint) { subq(dst, value); return; } 204 if (value < 0) { incrementq(dst, -value); return; } 205 if (value == 0) { ; return; } 206 if (value == 1 && UseIncDec) { decq(dst) ; return; } 207 /* else */ { subq(dst, value) ; return; } 208 } 209 210 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 211 assert(rscratch != noreg || always_reachable(dst), "missing"); 212 213 if (reachable(dst)) { 214 incrementq(as_Address(dst)); 215 } else { 216 lea(rscratch, dst); 217 incrementq(Address(rscratch, 0)); 218 } 219 } 220 221 void MacroAssembler::incrementq(Register reg, int value) { 222 if (value == min_jint) { addq(reg, value); return; } 223 if (value < 0) { decrementq(reg, -value); return; } 224 if (value == 0) { ; return; } 225 if (value == 1 && UseIncDec) { incq(reg) ; return; } 226 /* else */ { addq(reg, value) ; return; } 227 } 228 229 void MacroAssembler::incrementq(Address dst, int value) { 230 if (value == min_jint) { addq(dst, value); return; } 231 if (value < 0) { decrementq(dst, -value); return; } 232 if (value == 0) { ; return; } 233 if (value == 1 && UseIncDec) { incq(dst) ; return; } 234 /* else */ { addq(dst, value) ; return; } 235 } 236 237 // 32bit can do a case table jump in one instruction but we no longer allow the base 238 // to be installed in the Address class 239 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 240 lea(rscratch, entry.base()); 241 Address dispatch = entry.index(); 242 assert(dispatch._base == noreg, "must be"); 243 dispatch._base = rscratch; 244 jmp(dispatch); 245 } 246 247 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 248 ShouldNotReachHere(); // 64bit doesn't use two regs 249 cmpq(x_lo, y_lo); 250 } 251 252 void MacroAssembler::lea(Register dst, AddressLiteral src) { 253 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 254 } 255 256 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 257 lea(rscratch, adr); 258 movptr(dst, rscratch); 259 } 260 261 void MacroAssembler::leave() { 262 // %%% is this really better? Why not on 32bit too? 263 emit_int8((unsigned char)0xC9); // LEAVE 264 } 265 266 void MacroAssembler::lneg(Register hi, Register lo) { 267 ShouldNotReachHere(); // 64bit doesn't use two regs 268 negq(lo); 269 } 270 271 void MacroAssembler::movoop(Register dst, jobject obj) { 272 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 273 } 274 275 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 276 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 277 movq(dst, rscratch); 278 } 279 280 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 281 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 282 } 283 284 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 285 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 286 movq(dst, rscratch); 287 } 288 289 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 290 if (src.is_lval()) { 291 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 292 } else { 293 if (reachable(src)) { 294 movq(dst, as_Address(src)); 295 } else { 296 lea(dst, src); 297 movq(dst, Address(dst, 0)); 298 } 299 } 300 } 301 302 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 303 movq(as_Address(dst, rscratch), src); 304 } 305 306 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 307 movq(dst, as_Address(src, dst /*rscratch*/)); 308 } 309 310 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 311 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 312 if (is_simm32(src)) { 313 movptr(dst, checked_cast<int32_t>(src)); 314 } else { 315 mov64(rscratch, src); 316 movq(dst, rscratch); 317 } 318 } 319 320 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 321 movoop(rscratch, obj); 322 push(rscratch); 323 } 324 325 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 326 mov_metadata(rscratch, obj); 327 push(rscratch); 328 } 329 330 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 331 lea(rscratch, src); 332 if (src.is_lval()) { 333 push(rscratch); 334 } else { 335 pushq(Address(rscratch, 0)); 336 } 337 } 338 339 static void pass_arg0(MacroAssembler* masm, Register arg) { 340 if (c_rarg0 != arg ) { 341 masm->mov(c_rarg0, arg); 342 } 343 } 344 345 static void pass_arg1(MacroAssembler* masm, Register arg) { 346 if (c_rarg1 != arg ) { 347 masm->mov(c_rarg1, arg); 348 } 349 } 350 351 static void pass_arg2(MacroAssembler* masm, Register arg) { 352 if (c_rarg2 != arg ) { 353 masm->mov(c_rarg2, arg); 354 } 355 } 356 357 static void pass_arg3(MacroAssembler* masm, Register arg) { 358 if (c_rarg3 != arg ) { 359 masm->mov(c_rarg3, arg); 360 } 361 } 362 363 void MacroAssembler::stop(const char* msg) { 364 if (ShowMessageBoxOnError) { 365 address rip = pc(); 366 pusha(); // get regs on stack 367 lea(c_rarg1, InternalAddress(rip)); 368 movq(c_rarg2, rsp); // pass pointer to regs array 369 } 370 // Skip AOT caching C strings in scratch buffer. 371 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg); 372 lea(c_rarg0, ExternalAddress((address) str)); 373 andq(rsp, -16); // align stack as required by ABI 374 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 375 hlt(); 376 } 377 378 void MacroAssembler::warn(const char* msg) { 379 push(rbp); 380 movq(rbp, rsp); 381 andq(rsp, -16); // align stack as required by push_CPU_state and call 382 push_CPU_state(); // keeps alignment at 16 bytes 383 384 #ifdef _WIN64 385 // Windows always allocates space for its register args 386 subq(rsp, frame::arg_reg_save_area_bytes); 387 #endif 388 lea(c_rarg0, ExternalAddress((address) msg)); 389 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 390 391 #ifdef _WIN64 392 // restore stack pointer 393 addq(rsp, frame::arg_reg_save_area_bytes); 394 #endif 395 pop_CPU_state(); 396 mov(rsp, rbp); 397 pop(rbp); 398 } 399 400 void MacroAssembler::print_state() { 401 address rip = pc(); 402 pusha(); // get regs on stack 403 push(rbp); 404 movq(rbp, rsp); 405 andq(rsp, -16); // align stack as required by push_CPU_state and call 406 push_CPU_state(); // keeps alignment at 16 bytes 407 408 lea(c_rarg0, InternalAddress(rip)); 409 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 410 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 411 412 pop_CPU_state(); 413 mov(rsp, rbp); 414 pop(rbp); 415 popa(); 416 } 417 418 #ifndef PRODUCT 419 extern "C" void findpc(intptr_t x); 420 #endif 421 422 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 423 // In order to get locks to work, we need to fake a in_VM state 424 if (ShowMessageBoxOnError) { 425 JavaThread* thread = JavaThread::current(); 426 JavaThreadState saved_state = thread->thread_state(); 427 thread->set_thread_state(_thread_in_vm); 428 #ifndef PRODUCT 429 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 430 ttyLocker ttyl; 431 BytecodeCounter::print(); 432 } 433 #endif 434 // To see where a verify_oop failed, get $ebx+40/X for this frame. 435 // XXX correct this offset for amd64 436 // This is the value of eip which points to where verify_oop will return. 437 if (os::message_box(msg, "Execution stopped, print registers?")) { 438 print_state64(pc, regs); 439 BREAKPOINT; 440 } 441 } 442 fatal("DEBUG MESSAGE: %s", msg); 443 } 444 445 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 446 ttyLocker ttyl; 447 DebuggingContext debugging{}; 448 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 449 #ifndef PRODUCT 450 tty->cr(); 451 findpc(pc); 452 tty->cr(); 453 #endif 454 #define PRINT_REG(rax, value) \ 455 { tty->print("%s = ", #rax); os::print_location(tty, value); } 456 PRINT_REG(rax, regs[15]); 457 PRINT_REG(rbx, regs[12]); 458 PRINT_REG(rcx, regs[14]); 459 PRINT_REG(rdx, regs[13]); 460 PRINT_REG(rdi, regs[8]); 461 PRINT_REG(rsi, regs[9]); 462 PRINT_REG(rbp, regs[10]); 463 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 464 PRINT_REG(rsp, (intptr_t)(®s[16])); 465 PRINT_REG(r8 , regs[7]); 466 PRINT_REG(r9 , regs[6]); 467 PRINT_REG(r10, regs[5]); 468 PRINT_REG(r11, regs[4]); 469 PRINT_REG(r12, regs[3]); 470 PRINT_REG(r13, regs[2]); 471 PRINT_REG(r14, regs[1]); 472 PRINT_REG(r15, regs[0]); 473 #undef PRINT_REG 474 // Print some words near the top of the stack. 475 int64_t* rsp = ®s[16]; 476 int64_t* dump_sp = rsp; 477 for (int col1 = 0; col1 < 8; col1++) { 478 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 479 os::print_location(tty, *dump_sp++); 480 } 481 for (int row = 0; row < 25; row++) { 482 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 483 for (int col = 0; col < 4; col++) { 484 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 485 } 486 tty->cr(); 487 } 488 // Print some instructions around pc: 489 Disassembler::decode((address)pc-64, (address)pc); 490 tty->print_cr("--------"); 491 Disassembler::decode((address)pc, (address)pc+32); 492 } 493 494 // The java_calling_convention describes stack locations as ideal slots on 495 // a frame with no abi restrictions. Since we must observe abi restrictions 496 // (like the placement of the register window) the slots must be biased by 497 // the following value. 498 static int reg2offset_in(VMReg r) { 499 // Account for saved rbp and return address 500 // This should really be in_preserve_stack_slots 501 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 502 } 503 504 static int reg2offset_out(VMReg r) { 505 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 506 } 507 508 // A long move 509 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 510 511 // The calling conventions assures us that each VMregpair is either 512 // all really one physical register or adjacent stack slots. 513 514 if (src.is_single_phys_reg() ) { 515 if (dst.is_single_phys_reg()) { 516 if (dst.first() != src.first()) { 517 mov(dst.first()->as_Register(), src.first()->as_Register()); 518 } 519 } else { 520 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 521 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 522 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 523 } 524 } else if (dst.is_single_phys_reg()) { 525 assert(src.is_single_reg(), "not a stack pair"); 526 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 527 } else { 528 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 529 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 530 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 531 } 532 } 533 534 // A double move 535 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 536 537 // The calling conventions assures us that each VMregpair is either 538 // all really one physical register or adjacent stack slots. 539 540 if (src.is_single_phys_reg() ) { 541 if (dst.is_single_phys_reg()) { 542 // In theory these overlap but the ordering is such that this is likely a nop 543 if ( src.first() != dst.first()) { 544 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 545 } 546 } else { 547 assert(dst.is_single_reg(), "not a stack pair"); 548 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 549 } 550 } else if (dst.is_single_phys_reg()) { 551 assert(src.is_single_reg(), "not a stack pair"); 552 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 553 } else { 554 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 555 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 556 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 557 } 558 } 559 560 561 // A float arg may have to do float reg int reg conversion 562 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 563 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 564 565 // The calling conventions assures us that each VMregpair is either 566 // all really one physical register or adjacent stack slots. 567 568 if (src.first()->is_stack()) { 569 if (dst.first()->is_stack()) { 570 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 571 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 572 } else { 573 // stack to reg 574 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 575 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 576 } 577 } else if (dst.first()->is_stack()) { 578 // reg to stack 579 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 580 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 581 } else { 582 // reg to reg 583 // In theory these overlap but the ordering is such that this is likely a nop 584 if ( src.first() != dst.first()) { 585 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 586 } 587 } 588 } 589 590 // On 64 bit we will store integer like items to the stack as 591 // 64 bits items (x86_32/64 abi) even though java would only store 592 // 32bits for a parameter. On 32bit it will simply be 32 bits 593 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 594 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 595 if (src.first()->is_stack()) { 596 if (dst.first()->is_stack()) { 597 // stack to stack 598 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 599 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 600 } else { 601 // stack to reg 602 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 603 } 604 } else if (dst.first()->is_stack()) { 605 // reg to stack 606 // Do we really have to sign extend??? 607 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 608 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 609 } else { 610 // Do we really have to sign extend??? 611 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 612 if (dst.first() != src.first()) { 613 movq(dst.first()->as_Register(), src.first()->as_Register()); 614 } 615 } 616 } 617 618 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 619 if (src.first()->is_stack()) { 620 if (dst.first()->is_stack()) { 621 // stack to stack 622 movq(rax, Address(rbp, reg2offset_in(src.first()))); 623 movq(Address(rsp, reg2offset_out(dst.first())), rax); 624 } else { 625 // stack to reg 626 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 627 } 628 } else if (dst.first()->is_stack()) { 629 // reg to stack 630 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 631 } else { 632 if (dst.first() != src.first()) { 633 movq(dst.first()->as_Register(), src.first()->as_Register()); 634 } 635 } 636 } 637 638 // An oop arg. Must pass a handle not the oop itself 639 void MacroAssembler::object_move(OopMap* map, 640 int oop_handle_offset, 641 int framesize_in_slots, 642 VMRegPair src, 643 VMRegPair dst, 644 bool is_receiver, 645 int* receiver_offset) { 646 647 // must pass a handle. First figure out the location we use as a handle 648 649 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 650 651 // See if oop is null if it is we need no handle 652 653 if (src.first()->is_stack()) { 654 655 // Oop is already on the stack as an argument 656 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 657 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 658 if (is_receiver) { 659 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 660 } 661 662 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 663 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 664 // conditionally move a null 665 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 666 } else { 667 668 // Oop is in a register we must store it to the space we reserve 669 // on the stack for oop_handles and pass a handle if oop is non-null 670 671 const Register rOop = src.first()->as_Register(); 672 int oop_slot; 673 if (rOop == j_rarg0) 674 oop_slot = 0; 675 else if (rOop == j_rarg1) 676 oop_slot = 1; 677 else if (rOop == j_rarg2) 678 oop_slot = 2; 679 else if (rOop == j_rarg3) 680 oop_slot = 3; 681 else if (rOop == j_rarg4) 682 oop_slot = 4; 683 else { 684 assert(rOop == j_rarg5, "wrong register"); 685 oop_slot = 5; 686 } 687 688 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 689 int offset = oop_slot*VMRegImpl::stack_slot_size; 690 691 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 692 // Store oop in handle area, may be null 693 movptr(Address(rsp, offset), rOop); 694 if (is_receiver) { 695 *receiver_offset = offset; 696 } 697 698 cmpptr(rOop, NULL_WORD); 699 lea(rHandle, Address(rsp, offset)); 700 // conditionally move a null from the handle area where it was just stored 701 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 702 } 703 704 // If arg is on the stack then place it otherwise it is already in correct reg. 705 if (dst.first()->is_stack()) { 706 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 707 } 708 } 709 710 void MacroAssembler::addptr(Register dst, int32_t imm32) { 711 addq(dst, imm32); 712 } 713 714 void MacroAssembler::addptr(Register dst, Register src) { 715 addq(dst, src); 716 } 717 718 void MacroAssembler::addptr(Address dst, Register src) { 719 addq(dst, src); 720 } 721 722 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 723 assert(rscratch != noreg || always_reachable(src), "missing"); 724 725 if (reachable(src)) { 726 Assembler::addsd(dst, as_Address(src)); 727 } else { 728 lea(rscratch, src); 729 Assembler::addsd(dst, Address(rscratch, 0)); 730 } 731 } 732 733 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 734 assert(rscratch != noreg || always_reachable(src), "missing"); 735 736 if (reachable(src)) { 737 addss(dst, as_Address(src)); 738 } else { 739 lea(rscratch, src); 740 addss(dst, Address(rscratch, 0)); 741 } 742 } 743 744 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 745 assert(rscratch != noreg || always_reachable(src), "missing"); 746 747 if (reachable(src)) { 748 Assembler::addpd(dst, as_Address(src)); 749 } else { 750 lea(rscratch, src); 751 Assembler::addpd(dst, Address(rscratch, 0)); 752 } 753 } 754 755 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 756 // Stub code is generated once and never copied. 757 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 758 void MacroAssembler::align64() { 759 align(64, (uint)(uintptr_t)pc()); 760 } 761 762 void MacroAssembler::align32() { 763 align(32, (uint)(uintptr_t)pc()); 764 } 765 766 void MacroAssembler::align(uint modulus) { 767 // 8273459: Ensure alignment is possible with current segment alignment 768 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 769 align(modulus, offset()); 770 } 771 772 void MacroAssembler::align(uint modulus, uint target) { 773 if (target % modulus != 0) { 774 nop(modulus - (target % modulus)); 775 } 776 } 777 778 void MacroAssembler::push_f(XMMRegister r) { 779 subptr(rsp, wordSize); 780 movflt(Address(rsp, 0), r); 781 } 782 783 void MacroAssembler::pop_f(XMMRegister r) { 784 movflt(r, Address(rsp, 0)); 785 addptr(rsp, wordSize); 786 } 787 788 void MacroAssembler::push_d(XMMRegister r) { 789 subptr(rsp, 2 * wordSize); 790 movdbl(Address(rsp, 0), r); 791 } 792 793 void MacroAssembler::pop_d(XMMRegister r) { 794 movdbl(r, Address(rsp, 0)); 795 addptr(rsp, 2 * Interpreter::stackElementSize); 796 } 797 798 void MacroAssembler::push_ppx(Register src) { 799 if (VM_Version::supports_apx_f()) { 800 pushp(src); 801 } else { 802 Assembler::push(src); 803 } 804 } 805 806 void MacroAssembler::pop_ppx(Register dst) { 807 if (VM_Version::supports_apx_f()) { 808 popp(dst); 809 } else { 810 Assembler::pop(dst); 811 } 812 } 813 814 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 815 // Used in sign-masking with aligned address. 816 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 817 assert(rscratch != noreg || always_reachable(src), "missing"); 818 819 if (UseAVX > 2 && 820 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 821 (dst->encoding() >= 16)) { 822 vpand(dst, dst, src, AVX_512bit, rscratch); 823 } else if (reachable(src)) { 824 Assembler::andpd(dst, as_Address(src)); 825 } else { 826 lea(rscratch, src); 827 Assembler::andpd(dst, Address(rscratch, 0)); 828 } 829 } 830 831 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 832 // Used in sign-masking with aligned address. 833 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 834 assert(rscratch != noreg || always_reachable(src), "missing"); 835 836 if (reachable(src)) { 837 Assembler::andps(dst, as_Address(src)); 838 } else { 839 lea(rscratch, src); 840 Assembler::andps(dst, Address(rscratch, 0)); 841 } 842 } 843 844 void MacroAssembler::andptr(Register dst, int32_t imm32) { 845 andq(dst, imm32); 846 } 847 848 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 849 assert(rscratch != noreg || always_reachable(src), "missing"); 850 851 if (reachable(src)) { 852 andq(dst, as_Address(src)); 853 } else { 854 lea(rscratch, src); 855 andq(dst, Address(rscratch, 0)); 856 } 857 } 858 859 void MacroAssembler::atomic_incl(Address counter_addr) { 860 lock(); 861 incrementl(counter_addr); 862 } 863 864 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 865 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 866 867 if (reachable(counter_addr)) { 868 atomic_incl(as_Address(counter_addr)); 869 } else { 870 lea(rscratch, counter_addr); 871 atomic_incl(Address(rscratch, 0)); 872 } 873 } 874 875 void MacroAssembler::atomic_incq(Address counter_addr) { 876 lock(); 877 incrementq(counter_addr); 878 } 879 880 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 881 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 882 883 if (reachable(counter_addr)) { 884 atomic_incq(as_Address(counter_addr)); 885 } else { 886 lea(rscratch, counter_addr); 887 atomic_incq(Address(rscratch, 0)); 888 } 889 } 890 891 // Writes to stack successive pages until offset reached to check for 892 // stack overflow + shadow pages. This clobbers tmp. 893 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 894 movptr(tmp, rsp); 895 // Bang stack for total size given plus shadow page size. 896 // Bang one page at a time because large size can bang beyond yellow and 897 // red zones. 898 Label loop; 899 bind(loop); 900 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 901 subptr(tmp, (int)os::vm_page_size()); 902 subl(size, (int)os::vm_page_size()); 903 jcc(Assembler::greater, loop); 904 905 // Bang down shadow pages too. 906 // At this point, (tmp-0) is the last address touched, so don't 907 // touch it again. (It was touched as (tmp-pagesize) but then tmp 908 // was post-decremented.) Skip this address by starting at i=1, and 909 // touch a few more pages below. N.B. It is important to touch all 910 // the way down including all pages in the shadow zone. 911 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 912 // this could be any sized move but this is can be a debugging crumb 913 // so the bigger the better. 914 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 915 } 916 } 917 918 void MacroAssembler::reserved_stack_check() { 919 // testing if reserved zone needs to be enabled 920 Label no_reserved_zone_enabling; 921 922 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset())); 923 jcc(Assembler::below, no_reserved_zone_enabling); 924 925 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread); 926 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 927 should_not_reach_here(); 928 929 bind(no_reserved_zone_enabling); 930 } 931 932 void MacroAssembler::c2bool(Register x) { 933 // implements x == 0 ? 0 : 1 934 // note: must only look at least-significant byte of x 935 // since C-style booleans are stored in one byte 936 // only! (was bug) 937 andl(x, 0xFF); 938 setb(Assembler::notZero, x); 939 } 940 941 // Wouldn't need if AddressLiteral version had new name 942 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 943 Assembler::call(L, rtype); 944 } 945 946 void MacroAssembler::call(Register entry) { 947 Assembler::call(entry); 948 } 949 950 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 951 assert(rscratch != noreg || always_reachable(entry), "missing"); 952 953 if (reachable(entry)) { 954 Assembler::call_literal(entry.target(), entry.rspec()); 955 } else { 956 lea(rscratch, entry); 957 Assembler::call(rscratch); 958 } 959 } 960 961 void MacroAssembler::ic_call(address entry, jint method_index) { 962 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 963 // Needs full 64-bit immediate for later patching. 964 mov64(rax, (int64_t)Universe::non_oop_word()); 965 call(AddressLiteral(entry, rh)); 966 } 967 968 int MacroAssembler::ic_check_size() { 969 return UseCompactObjectHeaders ? 17 : 14; 970 } 971 972 int MacroAssembler::ic_check(int end_alignment) { 973 Register receiver = j_rarg0; 974 Register data = rax; 975 Register temp = rscratch1; 976 977 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 978 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 979 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 980 // before the inline cache check here, and not after 981 align(end_alignment, offset() + ic_check_size()); 982 983 int uep_offset = offset(); 984 985 if (UseCompactObjectHeaders) { 986 load_narrow_klass_compact(temp, receiver); 987 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 988 } else if (UseCompressedClassPointers) { 989 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 990 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 991 } else { 992 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 993 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 994 } 995 996 // if inline cache check fails, then jump to runtime routine 997 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 998 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment); 999 1000 return uep_offset; 1001 } 1002 1003 void MacroAssembler::emit_static_call_stub() { 1004 // Static stub relocation also tags the Method* in the code-stream. 1005 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1006 // This is recognized as unresolved by relocs/nativeinst/ic code. 1007 jump(RuntimeAddress(pc())); 1008 } 1009 1010 // Implementation of call_VM versions 1011 1012 void MacroAssembler::call_VM(Register oop_result, 1013 address entry_point, 1014 bool check_exceptions) { 1015 Label C, E; 1016 call(C, relocInfo::none); 1017 jmp(E); 1018 1019 bind(C); 1020 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1021 ret(0); 1022 1023 bind(E); 1024 } 1025 1026 void MacroAssembler::call_VM(Register oop_result, 1027 address entry_point, 1028 Register arg_1, 1029 bool check_exceptions) { 1030 Label C, E; 1031 call(C, relocInfo::none); 1032 jmp(E); 1033 1034 bind(C); 1035 pass_arg1(this, arg_1); 1036 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1037 ret(0); 1038 1039 bind(E); 1040 } 1041 1042 void MacroAssembler::call_VM(Register oop_result, 1043 address entry_point, 1044 Register arg_1, 1045 Register arg_2, 1046 bool check_exceptions) { 1047 Label C, E; 1048 call(C, relocInfo::none); 1049 jmp(E); 1050 1051 bind(C); 1052 1053 assert_different_registers(arg_1, c_rarg2); 1054 1055 pass_arg2(this, arg_2); 1056 pass_arg1(this, arg_1); 1057 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1058 ret(0); 1059 1060 bind(E); 1061 } 1062 1063 void MacroAssembler::call_VM(Register oop_result, 1064 address entry_point, 1065 Register arg_1, 1066 Register arg_2, 1067 Register arg_3, 1068 bool check_exceptions) { 1069 Label C, E; 1070 call(C, relocInfo::none); 1071 jmp(E); 1072 1073 bind(C); 1074 1075 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1076 assert_different_registers(arg_2, c_rarg3); 1077 pass_arg3(this, arg_3); 1078 pass_arg2(this, arg_2); 1079 pass_arg1(this, arg_1); 1080 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1081 ret(0); 1082 1083 bind(E); 1084 } 1085 1086 void MacroAssembler::call_VM(Register oop_result, 1087 Register last_java_sp, 1088 address entry_point, 1089 int number_of_arguments, 1090 bool check_exceptions) { 1091 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1092 } 1093 1094 void MacroAssembler::call_VM(Register oop_result, 1095 Register last_java_sp, 1096 address entry_point, 1097 Register arg_1, 1098 bool check_exceptions) { 1099 pass_arg1(this, arg_1); 1100 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1101 } 1102 1103 void MacroAssembler::call_VM(Register oop_result, 1104 Register last_java_sp, 1105 address entry_point, 1106 Register arg_1, 1107 Register arg_2, 1108 bool check_exceptions) { 1109 1110 assert_different_registers(arg_1, c_rarg2); 1111 pass_arg2(this, arg_2); 1112 pass_arg1(this, arg_1); 1113 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1114 } 1115 1116 void MacroAssembler::call_VM(Register oop_result, 1117 Register last_java_sp, 1118 address entry_point, 1119 Register arg_1, 1120 Register arg_2, 1121 Register arg_3, 1122 bool check_exceptions) { 1123 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1124 assert_different_registers(arg_2, c_rarg3); 1125 pass_arg3(this, arg_3); 1126 pass_arg2(this, arg_2); 1127 pass_arg1(this, arg_1); 1128 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1129 } 1130 1131 void MacroAssembler::super_call_VM(Register oop_result, 1132 Register last_java_sp, 1133 address entry_point, 1134 int number_of_arguments, 1135 bool check_exceptions) { 1136 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1137 } 1138 1139 void MacroAssembler::super_call_VM(Register oop_result, 1140 Register last_java_sp, 1141 address entry_point, 1142 Register arg_1, 1143 bool check_exceptions) { 1144 pass_arg1(this, arg_1); 1145 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1146 } 1147 1148 void MacroAssembler::super_call_VM(Register oop_result, 1149 Register last_java_sp, 1150 address entry_point, 1151 Register arg_1, 1152 Register arg_2, 1153 bool check_exceptions) { 1154 1155 assert_different_registers(arg_1, c_rarg2); 1156 pass_arg2(this, arg_2); 1157 pass_arg1(this, arg_1); 1158 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1159 } 1160 1161 void MacroAssembler::super_call_VM(Register oop_result, 1162 Register last_java_sp, 1163 address entry_point, 1164 Register arg_1, 1165 Register arg_2, 1166 Register arg_3, 1167 bool check_exceptions) { 1168 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1169 assert_different_registers(arg_2, c_rarg3); 1170 pass_arg3(this, arg_3); 1171 pass_arg2(this, arg_2); 1172 pass_arg1(this, arg_1); 1173 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1174 } 1175 1176 void MacroAssembler::call_VM_base(Register oop_result, 1177 Register last_java_sp, 1178 address entry_point, 1179 int number_of_arguments, 1180 bool check_exceptions) { 1181 Register java_thread = r15_thread; 1182 1183 // determine last_java_sp register 1184 if (!last_java_sp->is_valid()) { 1185 last_java_sp = rsp; 1186 } 1187 // debugging support 1188 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1189 #ifdef ASSERT 1190 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1191 // r12 is the heapbase. 1192 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 1193 #endif // ASSERT 1194 1195 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1196 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1197 1198 // push java thread (becomes first argument of C function) 1199 1200 mov(c_rarg0, r15_thread); 1201 1202 // set last Java frame before call 1203 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1204 1205 // Only interpreter should have to set fp 1206 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1); 1207 1208 // do the call, remove parameters 1209 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1210 1211 #ifdef ASSERT 1212 // Check that thread register is not clobbered. 1213 guarantee(java_thread != rax, "change this code"); 1214 push(rax); 1215 { Label L; 1216 get_thread_slow(rax); 1217 cmpptr(java_thread, rax); 1218 jcc(Assembler::equal, L); 1219 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?"); 1220 bind(L); 1221 } 1222 pop(rax); 1223 #endif 1224 1225 // reset last Java frame 1226 // Only interpreter should have to clear fp 1227 reset_last_Java_frame(true); 1228 1229 // C++ interp handles this in the interpreter 1230 check_and_handle_popframe(); 1231 check_and_handle_earlyret(); 1232 1233 if (check_exceptions) { 1234 // check for pending exceptions (java_thread is set upon return) 1235 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD); 1236 // This used to conditionally jump to forward_exception however it is 1237 // possible if we relocate that the branch will not reach. So we must jump 1238 // around so we can always reach 1239 1240 Label ok; 1241 jcc(Assembler::equal, ok); 1242 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1243 bind(ok); 1244 } 1245 1246 // get oop result if there is one and reset the value in the thread 1247 if (oop_result->is_valid()) { 1248 get_vm_result_oop(oop_result); 1249 } 1250 } 1251 1252 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1253 // Calculate the value for last_Java_sp somewhat subtle. 1254 // call_VM does an intermediate call which places a return address on 1255 // the stack just under the stack pointer as the user finished with it. 1256 // This allows use to retrieve last_Java_pc from last_Java_sp[-1]. 1257 1258 // We've pushed one address, correct last_Java_sp 1259 lea(rax, Address(rsp, wordSize)); 1260 1261 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions); 1262 } 1263 1264 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1265 void MacroAssembler::call_VM_leaf0(address entry_point) { 1266 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1267 } 1268 1269 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1270 call_VM_leaf_base(entry_point, number_of_arguments); 1271 } 1272 1273 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1274 pass_arg0(this, arg_0); 1275 call_VM_leaf(entry_point, 1); 1276 } 1277 1278 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1279 1280 assert_different_registers(arg_0, c_rarg1); 1281 pass_arg1(this, arg_1); 1282 pass_arg0(this, arg_0); 1283 call_VM_leaf(entry_point, 2); 1284 } 1285 1286 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1287 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1288 assert_different_registers(arg_1, c_rarg2); 1289 pass_arg2(this, arg_2); 1290 pass_arg1(this, arg_1); 1291 pass_arg0(this, arg_0); 1292 call_VM_leaf(entry_point, 3); 1293 } 1294 1295 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1296 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1297 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1298 assert_different_registers(arg_2, c_rarg3); 1299 pass_arg3(this, arg_3); 1300 pass_arg2(this, arg_2); 1301 pass_arg1(this, arg_1); 1302 pass_arg0(this, arg_0); 1303 call_VM_leaf(entry_point, 3); 1304 } 1305 1306 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1307 pass_arg0(this, arg_0); 1308 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1309 } 1310 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1312 assert_different_registers(arg_0, c_rarg1); 1313 pass_arg1(this, arg_1); 1314 pass_arg0(this, arg_0); 1315 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1316 } 1317 1318 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1319 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1320 assert_different_registers(arg_1, c_rarg2); 1321 pass_arg2(this, arg_2); 1322 pass_arg1(this, arg_1); 1323 pass_arg0(this, arg_0); 1324 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1325 } 1326 1327 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1328 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1329 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1330 assert_different_registers(arg_2, c_rarg3); 1331 pass_arg3(this, arg_3); 1332 pass_arg2(this, arg_2); 1333 pass_arg1(this, arg_1); 1334 pass_arg0(this, arg_0); 1335 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1336 } 1337 1338 void MacroAssembler::get_vm_result_oop(Register oop_result) { 1339 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset())); 1340 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD); 1341 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1342 } 1343 1344 void MacroAssembler::get_vm_result_metadata(Register metadata_result) { 1345 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset())); 1346 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD); 1347 } 1348 1349 void MacroAssembler::check_and_handle_earlyret() { 1350 } 1351 1352 void MacroAssembler::check_and_handle_popframe() { 1353 } 1354 1355 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1356 assert(rscratch != noreg || always_reachable(src1), "missing"); 1357 1358 if (reachable(src1)) { 1359 cmpl(as_Address(src1), imm); 1360 } else { 1361 lea(rscratch, src1); 1362 cmpl(Address(rscratch, 0), imm); 1363 } 1364 } 1365 1366 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1367 assert(!src2.is_lval(), "use cmpptr"); 1368 assert(rscratch != noreg || always_reachable(src2), "missing"); 1369 1370 if (reachable(src2)) { 1371 cmpl(src1, as_Address(src2)); 1372 } else { 1373 lea(rscratch, src2); 1374 cmpl(src1, Address(rscratch, 0)); 1375 } 1376 } 1377 1378 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1379 Assembler::cmpl(src1, imm); 1380 } 1381 1382 void MacroAssembler::cmp32(Register src1, Address src2) { 1383 Assembler::cmpl(src1, src2); 1384 } 1385 1386 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1387 ucomisd(opr1, opr2); 1388 1389 Label L; 1390 if (unordered_is_less) { 1391 movl(dst, -1); 1392 jcc(Assembler::parity, L); 1393 jcc(Assembler::below , L); 1394 movl(dst, 0); 1395 jcc(Assembler::equal , L); 1396 increment(dst); 1397 } else { // unordered is greater 1398 movl(dst, 1); 1399 jcc(Assembler::parity, L); 1400 jcc(Assembler::above , L); 1401 movl(dst, 0); 1402 jcc(Assembler::equal , L); 1403 decrementl(dst); 1404 } 1405 bind(L); 1406 } 1407 1408 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1409 ucomiss(opr1, opr2); 1410 1411 Label L; 1412 if (unordered_is_less) { 1413 movl(dst, -1); 1414 jcc(Assembler::parity, L); 1415 jcc(Assembler::below , L); 1416 movl(dst, 0); 1417 jcc(Assembler::equal , L); 1418 increment(dst); 1419 } else { // unordered is greater 1420 movl(dst, 1); 1421 jcc(Assembler::parity, L); 1422 jcc(Assembler::above , L); 1423 movl(dst, 0); 1424 jcc(Assembler::equal , L); 1425 decrementl(dst); 1426 } 1427 bind(L); 1428 } 1429 1430 1431 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1432 assert(rscratch != noreg || always_reachable(src1), "missing"); 1433 1434 if (reachable(src1)) { 1435 cmpb(as_Address(src1), imm); 1436 } else { 1437 lea(rscratch, src1); 1438 cmpb(Address(rscratch, 0), imm); 1439 } 1440 } 1441 1442 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1443 assert(rscratch != noreg || always_reachable(src2), "missing"); 1444 1445 if (src2.is_lval()) { 1446 movptr(rscratch, src2); 1447 Assembler::cmpq(src1, rscratch); 1448 } else if (reachable(src2)) { 1449 cmpq(src1, as_Address(src2)); 1450 } else { 1451 lea(rscratch, src2); 1452 Assembler::cmpq(src1, Address(rscratch, 0)); 1453 } 1454 } 1455 1456 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1457 assert(src2.is_lval(), "not a mem-mem compare"); 1458 // moves src2's literal address 1459 movptr(rscratch, src2); 1460 Assembler::cmpq(src1, rscratch); 1461 } 1462 1463 void MacroAssembler::cmpoop(Register src1, Register src2) { 1464 cmpptr(src1, src2); 1465 } 1466 1467 void MacroAssembler::cmpoop(Register src1, Address src2) { 1468 cmpptr(src1, src2); 1469 } 1470 1471 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1472 movoop(rscratch, src2); 1473 cmpptr(src1, rscratch); 1474 } 1475 1476 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1477 assert(rscratch != noreg || always_reachable(adr), "missing"); 1478 1479 if (reachable(adr)) { 1480 lock(); 1481 cmpxchgptr(reg, as_Address(adr)); 1482 } else { 1483 lea(rscratch, adr); 1484 lock(); 1485 cmpxchgptr(reg, Address(rscratch, 0)); 1486 } 1487 } 1488 1489 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1490 cmpxchgq(reg, adr); 1491 } 1492 1493 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1494 assert(rscratch != noreg || always_reachable(src), "missing"); 1495 1496 if (reachable(src)) { 1497 Assembler::comisd(dst, as_Address(src)); 1498 } else { 1499 lea(rscratch, src); 1500 Assembler::comisd(dst, Address(rscratch, 0)); 1501 } 1502 } 1503 1504 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1505 assert(rscratch != noreg || always_reachable(src), "missing"); 1506 1507 if (reachable(src)) { 1508 Assembler::comiss(dst, as_Address(src)); 1509 } else { 1510 lea(rscratch, src); 1511 Assembler::comiss(dst, Address(rscratch, 0)); 1512 } 1513 } 1514 1515 1516 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1517 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1518 1519 Condition negated_cond = negate_condition(cond); 1520 Label L; 1521 jcc(negated_cond, L); 1522 pushf(); // Preserve flags 1523 atomic_incl(counter_addr, rscratch); 1524 popf(); 1525 bind(L); 1526 } 1527 1528 int MacroAssembler::corrected_idivl(Register reg) { 1529 // Full implementation of Java idiv and irem; checks for 1530 // special case as described in JVM spec., p.243 & p.271. 1531 // The function returns the (pc) offset of the idivl 1532 // instruction - may be needed for implicit exceptions. 1533 // 1534 // normal case special case 1535 // 1536 // input : rax,: dividend min_int 1537 // reg: divisor (may not be rax,/rdx) -1 1538 // 1539 // output: rax,: quotient (= rax, idiv reg) min_int 1540 // rdx: remainder (= rax, irem reg) 0 1541 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1542 const int min_int = 0x80000000; 1543 Label normal_case, special_case; 1544 1545 // check for special case 1546 cmpl(rax, min_int); 1547 jcc(Assembler::notEqual, normal_case); 1548 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1549 cmpl(reg, -1); 1550 jcc(Assembler::equal, special_case); 1551 1552 // handle normal case 1553 bind(normal_case); 1554 cdql(); 1555 int idivl_offset = offset(); 1556 idivl(reg); 1557 1558 // normal and special case exit 1559 bind(special_case); 1560 1561 return idivl_offset; 1562 } 1563 1564 1565 1566 void MacroAssembler::decrementl(Register reg, int value) { 1567 if (value == min_jint) {subl(reg, value) ; return; } 1568 if (value < 0) { incrementl(reg, -value); return; } 1569 if (value == 0) { ; return; } 1570 if (value == 1 && UseIncDec) { decl(reg) ; return; } 1571 /* else */ { subl(reg, value) ; return; } 1572 } 1573 1574 void MacroAssembler::decrementl(Address dst, int value) { 1575 if (value == min_jint) {subl(dst, value) ; return; } 1576 if (value < 0) { incrementl(dst, -value); return; } 1577 if (value == 0) { ; return; } 1578 if (value == 1 && UseIncDec) { decl(dst) ; return; } 1579 /* else */ { subl(dst, value) ; return; } 1580 } 1581 1582 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 1583 assert(shift_value > 0, "illegal shift value"); 1584 Label _is_positive; 1585 testl (reg, reg); 1586 jcc (Assembler::positive, _is_positive); 1587 int offset = (1 << shift_value) - 1 ; 1588 1589 if (offset == 1) { 1590 incrementl(reg); 1591 } else { 1592 addl(reg, offset); 1593 } 1594 1595 bind (_is_positive); 1596 sarl(reg, shift_value); 1597 } 1598 1599 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1600 assert(rscratch != noreg || always_reachable(src), "missing"); 1601 1602 if (reachable(src)) { 1603 Assembler::divsd(dst, as_Address(src)); 1604 } else { 1605 lea(rscratch, src); 1606 Assembler::divsd(dst, Address(rscratch, 0)); 1607 } 1608 } 1609 1610 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1611 assert(rscratch != noreg || always_reachable(src), "missing"); 1612 1613 if (reachable(src)) { 1614 Assembler::divss(dst, as_Address(src)); 1615 } else { 1616 lea(rscratch, src); 1617 Assembler::divss(dst, Address(rscratch, 0)); 1618 } 1619 } 1620 1621 void MacroAssembler::enter() { 1622 push(rbp); 1623 mov(rbp, rsp); 1624 } 1625 1626 void MacroAssembler::post_call_nop() { 1627 if (!Continuations::enabled()) { 1628 return; 1629 } 1630 InstructionMark im(this); 1631 relocate(post_call_nop_Relocation::spec()); 1632 InlineSkippedInstructionsCounter skipCounter(this); 1633 emit_int8((uint8_t)0x0f); 1634 emit_int8((uint8_t)0x1f); 1635 emit_int8((uint8_t)0x84); 1636 emit_int8((uint8_t)0x00); 1637 emit_int32(0x00); 1638 } 1639 1640 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1641 assert(rscratch != noreg || always_reachable(src), "missing"); 1642 if (reachable(src)) { 1643 Assembler::mulpd(dst, as_Address(src)); 1644 } else { 1645 lea(rscratch, src); 1646 Assembler::mulpd(dst, Address(rscratch, 0)); 1647 } 1648 } 1649 1650 // dst = c = a * b + c 1651 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1652 Assembler::vfmadd231sd(c, a, b); 1653 if (dst != c) { 1654 movdbl(dst, c); 1655 } 1656 } 1657 1658 // dst = c = a * b + c 1659 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1660 Assembler::vfmadd231ss(c, a, b); 1661 if (dst != c) { 1662 movflt(dst, c); 1663 } 1664 } 1665 1666 // dst = c = a * b + c 1667 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1668 Assembler::vfmadd231pd(c, a, b, vector_len); 1669 if (dst != c) { 1670 vmovdqu(dst, c); 1671 } 1672 } 1673 1674 // dst = c = a * b + c 1675 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1676 Assembler::vfmadd231ps(c, a, b, vector_len); 1677 if (dst != c) { 1678 vmovdqu(dst, c); 1679 } 1680 } 1681 1682 // dst = c = a * b + c 1683 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1684 Assembler::vfmadd231pd(c, a, b, vector_len); 1685 if (dst != c) { 1686 vmovdqu(dst, c); 1687 } 1688 } 1689 1690 // dst = c = a * b + c 1691 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1692 Assembler::vfmadd231ps(c, a, b, vector_len); 1693 if (dst != c) { 1694 vmovdqu(dst, c); 1695 } 1696 } 1697 1698 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 1699 assert(rscratch != noreg || always_reachable(dst), "missing"); 1700 1701 if (reachable(dst)) { 1702 incrementl(as_Address(dst)); 1703 } else { 1704 lea(rscratch, dst); 1705 incrementl(Address(rscratch, 0)); 1706 } 1707 } 1708 1709 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 1710 incrementl(as_Address(dst, rscratch)); 1711 } 1712 1713 void MacroAssembler::incrementl(Register reg, int value) { 1714 if (value == min_jint) {addl(reg, value) ; return; } 1715 if (value < 0) { decrementl(reg, -value); return; } 1716 if (value == 0) { ; return; } 1717 if (value == 1 && UseIncDec) { incl(reg) ; return; } 1718 /* else */ { addl(reg, value) ; return; } 1719 } 1720 1721 void MacroAssembler::incrementl(Address dst, int value) { 1722 if (value == min_jint) {addl(dst, value) ; return; } 1723 if (value < 0) { decrementl(dst, -value); return; } 1724 if (value == 0) { ; return; } 1725 if (value == 1 && UseIncDec) { incl(dst) ; return; } 1726 /* else */ { addl(dst, value) ; return; } 1727 } 1728 1729 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 1730 assert(rscratch != noreg || always_reachable(dst), "missing"); 1731 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 1732 if (reachable(dst)) { 1733 jmp_literal(dst.target(), dst.rspec()); 1734 } else { 1735 lea(rscratch, dst); 1736 jmp(rscratch); 1737 } 1738 } 1739 1740 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 1741 assert(rscratch != noreg || always_reachable(dst), "missing"); 1742 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 1743 if (reachable(dst)) { 1744 InstructionMark im(this); 1745 relocate(dst.reloc()); 1746 const int short_size = 2; 1747 const int long_size = 6; 1748 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 1749 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 1750 // 0111 tttn #8-bit disp 1751 emit_int8(0x70 | cc); 1752 emit_int8((offs - short_size) & 0xFF); 1753 } else { 1754 // 0000 1111 1000 tttn #32-bit disp 1755 emit_int8(0x0F); 1756 emit_int8((unsigned char)(0x80 | cc)); 1757 emit_int32(offs - long_size); 1758 } 1759 } else { 1760 #ifdef ASSERT 1761 warning("reversing conditional branch"); 1762 #endif /* ASSERT */ 1763 Label skip; 1764 jccb(reverse[cc], skip); 1765 lea(rscratch, dst); 1766 Assembler::jmp(rscratch); 1767 bind(skip); 1768 } 1769 } 1770 1771 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) { 1772 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); 1773 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing"); 1774 1775 stmxcsr(mxcsr_save); 1776 movl(tmp, mxcsr_save); 1777 if (EnableX86ECoreOpts) { 1778 // The mxcsr_std has status bits set for performance on ECore 1779 orl(tmp, 0x003f); 1780 } else { 1781 // Mask out status bits (only check control and mask bits) 1782 andl(tmp, 0xFFC0); 1783 } 1784 cmp32(tmp, mxcsr_std, rscratch); 1785 } 1786 1787 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 1788 assert(rscratch != noreg || always_reachable(src), "missing"); 1789 1790 if (reachable(src)) { 1791 Assembler::ldmxcsr(as_Address(src)); 1792 } else { 1793 lea(rscratch, src); 1794 Assembler::ldmxcsr(Address(rscratch, 0)); 1795 } 1796 } 1797 1798 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1799 int off = offset(); 1800 movsbl(dst, src); // movsxb 1801 return off; 1802 } 1803 1804 // Note: load_signed_short used to be called load_signed_word. 1805 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 1806 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 1807 // The term "word" in HotSpot means a 32- or 64-bit machine word. 1808 int MacroAssembler::load_signed_short(Register dst, Address src) { 1809 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 1810 // version but this is what 64bit has always done. This seems to imply 1811 // that users are only using 32bits worth. 1812 int off = offset(); 1813 movswl(dst, src); // movsxw 1814 return off; 1815 } 1816 1817 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1818 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1819 // and "3.9 Partial Register Penalties", p. 22). 1820 int off = offset(); 1821 movzbl(dst, src); // movzxb 1822 return off; 1823 } 1824 1825 // Note: load_unsigned_short used to be called load_unsigned_word. 1826 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1827 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1828 // and "3.9 Partial Register Penalties", p. 22). 1829 int off = offset(); 1830 movzwl(dst, src); // movzxw 1831 return off; 1832 } 1833 1834 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1835 switch (size_in_bytes) { 1836 case 8: movq(dst, src); break; 1837 case 4: movl(dst, src); break; 1838 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1839 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1840 default: ShouldNotReachHere(); 1841 } 1842 } 1843 1844 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1845 switch (size_in_bytes) { 1846 case 8: movq(dst, src); break; 1847 case 4: movl(dst, src); break; 1848 case 2: movw(dst, src); break; 1849 case 1: movb(dst, src); break; 1850 default: ShouldNotReachHere(); 1851 } 1852 } 1853 1854 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 1855 assert(rscratch != noreg || always_reachable(dst), "missing"); 1856 1857 if (reachable(dst)) { 1858 movl(as_Address(dst), src); 1859 } else { 1860 lea(rscratch, dst); 1861 movl(Address(rscratch, 0), src); 1862 } 1863 } 1864 1865 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 1866 if (reachable(src)) { 1867 movl(dst, as_Address(src)); 1868 } else { 1869 lea(dst, src); 1870 movl(dst, Address(dst, 0)); 1871 } 1872 } 1873 1874 // C++ bool manipulation 1875 1876 void MacroAssembler::movbool(Register dst, Address src) { 1877 if(sizeof(bool) == 1) 1878 movb(dst, src); 1879 else if(sizeof(bool) == 2) 1880 movw(dst, src); 1881 else if(sizeof(bool) == 4) 1882 movl(dst, src); 1883 else 1884 // unsupported 1885 ShouldNotReachHere(); 1886 } 1887 1888 void MacroAssembler::movbool(Address dst, bool boolconst) { 1889 if(sizeof(bool) == 1) 1890 movb(dst, (int) boolconst); 1891 else if(sizeof(bool) == 2) 1892 movw(dst, (int) boolconst); 1893 else if(sizeof(bool) == 4) 1894 movl(dst, (int) boolconst); 1895 else 1896 // unsupported 1897 ShouldNotReachHere(); 1898 } 1899 1900 void MacroAssembler::movbool(Address dst, Register src) { 1901 if(sizeof(bool) == 1) 1902 movb(dst, src); 1903 else if(sizeof(bool) == 2) 1904 movw(dst, src); 1905 else if(sizeof(bool) == 4) 1906 movl(dst, src); 1907 else 1908 // unsupported 1909 ShouldNotReachHere(); 1910 } 1911 1912 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1913 assert(rscratch != noreg || always_reachable(src), "missing"); 1914 1915 if (reachable(src)) { 1916 movdl(dst, as_Address(src)); 1917 } else { 1918 lea(rscratch, src); 1919 movdl(dst, Address(rscratch, 0)); 1920 } 1921 } 1922 1923 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 1924 assert(rscratch != noreg || always_reachable(src), "missing"); 1925 1926 if (reachable(src)) { 1927 movq(dst, as_Address(src)); 1928 } else { 1929 lea(rscratch, src); 1930 movq(dst, Address(rscratch, 0)); 1931 } 1932 } 1933 1934 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1935 assert(rscratch != noreg || always_reachable(src), "missing"); 1936 1937 if (reachable(src)) { 1938 if (UseXmmLoadAndClearUpper) { 1939 movsd (dst, as_Address(src)); 1940 } else { 1941 movlpd(dst, as_Address(src)); 1942 } 1943 } else { 1944 lea(rscratch, src); 1945 if (UseXmmLoadAndClearUpper) { 1946 movsd (dst, Address(rscratch, 0)); 1947 } else { 1948 movlpd(dst, Address(rscratch, 0)); 1949 } 1950 } 1951 } 1952 1953 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 1954 assert(rscratch != noreg || always_reachable(src), "missing"); 1955 1956 if (reachable(src)) { 1957 movss(dst, as_Address(src)); 1958 } else { 1959 lea(rscratch, src); 1960 movss(dst, Address(rscratch, 0)); 1961 } 1962 } 1963 1964 void MacroAssembler::movptr(Register dst, Register src) { 1965 movq(dst, src); 1966 } 1967 1968 void MacroAssembler::movptr(Register dst, Address src) { 1969 movq(dst, src); 1970 } 1971 1972 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 1973 void MacroAssembler::movptr(Register dst, intptr_t src) { 1974 if (is_uimm32(src)) { 1975 movl(dst, checked_cast<uint32_t>(src)); 1976 } else if (is_simm32(src)) { 1977 movq(dst, checked_cast<int32_t>(src)); 1978 } else { 1979 mov64(dst, src); 1980 } 1981 } 1982 1983 void MacroAssembler::movptr(Address dst, Register src) { 1984 movq(dst, src); 1985 } 1986 1987 void MacroAssembler::movptr(Address dst, int32_t src) { 1988 movslq(dst, src); 1989 } 1990 1991 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 1992 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1993 Assembler::movdqu(dst, src); 1994 } 1995 1996 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 1997 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1998 Assembler::movdqu(dst, src); 1999 } 2000 2001 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2002 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2003 Assembler::movdqu(dst, src); 2004 } 2005 2006 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2007 assert(rscratch != noreg || always_reachable(src), "missing"); 2008 2009 if (reachable(src)) { 2010 movdqu(dst, as_Address(src)); 2011 } else { 2012 lea(rscratch, src); 2013 movdqu(dst, Address(rscratch, 0)); 2014 } 2015 } 2016 2017 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2018 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2019 Assembler::vmovdqu(dst, src); 2020 } 2021 2022 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2023 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2024 Assembler::vmovdqu(dst, src); 2025 } 2026 2027 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2028 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2029 Assembler::vmovdqu(dst, src); 2030 } 2031 2032 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2033 assert(rscratch != noreg || always_reachable(src), "missing"); 2034 2035 if (reachable(src)) { 2036 vmovdqu(dst, as_Address(src)); 2037 } 2038 else { 2039 lea(rscratch, src); 2040 vmovdqu(dst, Address(rscratch, 0)); 2041 } 2042 } 2043 2044 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2045 assert(rscratch != noreg || always_reachable(src), "missing"); 2046 2047 if (vector_len == AVX_512bit) { 2048 evmovdquq(dst, src, AVX_512bit, rscratch); 2049 } else if (vector_len == AVX_256bit) { 2050 vmovdqu(dst, src, rscratch); 2051 } else { 2052 movdqu(dst, src, rscratch); 2053 } 2054 } 2055 2056 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2057 if (vector_len == AVX_512bit) { 2058 evmovdquq(dst, src, AVX_512bit); 2059 } else if (vector_len == AVX_256bit) { 2060 vmovdqu(dst, src); 2061 } else { 2062 movdqu(dst, src); 2063 } 2064 } 2065 2066 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) { 2067 if (vector_len == AVX_512bit) { 2068 evmovdquq(dst, src, AVX_512bit); 2069 } else if (vector_len == AVX_256bit) { 2070 vmovdqu(dst, src); 2071 } else { 2072 movdqu(dst, src); 2073 } 2074 } 2075 2076 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) { 2077 if (vector_len == AVX_512bit) { 2078 evmovdquq(dst, src, AVX_512bit); 2079 } else if (vector_len == AVX_256bit) { 2080 vmovdqu(dst, src); 2081 } else { 2082 movdqu(dst, src); 2083 } 2084 } 2085 2086 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2087 assert(rscratch != noreg || always_reachable(src), "missing"); 2088 2089 if (reachable(src)) { 2090 vmovdqa(dst, as_Address(src)); 2091 } 2092 else { 2093 lea(rscratch, src); 2094 vmovdqa(dst, Address(rscratch, 0)); 2095 } 2096 } 2097 2098 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2099 assert(rscratch != noreg || always_reachable(src), "missing"); 2100 2101 if (vector_len == AVX_512bit) { 2102 evmovdqaq(dst, src, AVX_512bit, rscratch); 2103 } else if (vector_len == AVX_256bit) { 2104 vmovdqa(dst, src, rscratch); 2105 } else { 2106 movdqa(dst, src, rscratch); 2107 } 2108 } 2109 2110 void MacroAssembler::kmov(KRegister dst, Address src) { 2111 if (VM_Version::supports_avx512bw()) { 2112 kmovql(dst, src); 2113 } else { 2114 assert(VM_Version::supports_evex(), ""); 2115 kmovwl(dst, src); 2116 } 2117 } 2118 2119 void MacroAssembler::kmov(Address dst, KRegister src) { 2120 if (VM_Version::supports_avx512bw()) { 2121 kmovql(dst, src); 2122 } else { 2123 assert(VM_Version::supports_evex(), ""); 2124 kmovwl(dst, src); 2125 } 2126 } 2127 2128 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2129 if (VM_Version::supports_avx512bw()) { 2130 kmovql(dst, src); 2131 } else { 2132 assert(VM_Version::supports_evex(), ""); 2133 kmovwl(dst, src); 2134 } 2135 } 2136 2137 void MacroAssembler::kmov(Register dst, KRegister src) { 2138 if (VM_Version::supports_avx512bw()) { 2139 kmovql(dst, src); 2140 } else { 2141 assert(VM_Version::supports_evex(), ""); 2142 kmovwl(dst, src); 2143 } 2144 } 2145 2146 void MacroAssembler::kmov(KRegister dst, Register src) { 2147 if (VM_Version::supports_avx512bw()) { 2148 kmovql(dst, src); 2149 } else { 2150 assert(VM_Version::supports_evex(), ""); 2151 kmovwl(dst, src); 2152 } 2153 } 2154 2155 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2156 assert(rscratch != noreg || always_reachable(src), "missing"); 2157 2158 if (reachable(src)) { 2159 kmovql(dst, as_Address(src)); 2160 } else { 2161 lea(rscratch, src); 2162 kmovql(dst, Address(rscratch, 0)); 2163 } 2164 } 2165 2166 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2167 assert(rscratch != noreg || always_reachable(src), "missing"); 2168 2169 if (reachable(src)) { 2170 kmovwl(dst, as_Address(src)); 2171 } else { 2172 lea(rscratch, src); 2173 kmovwl(dst, Address(rscratch, 0)); 2174 } 2175 } 2176 2177 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2178 int vector_len, Register rscratch) { 2179 assert(rscratch != noreg || always_reachable(src), "missing"); 2180 2181 if (reachable(src)) { 2182 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2183 } else { 2184 lea(rscratch, src); 2185 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2186 } 2187 } 2188 2189 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2190 int vector_len, Register rscratch) { 2191 assert(rscratch != noreg || always_reachable(src), "missing"); 2192 2193 if (reachable(src)) { 2194 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2195 } else { 2196 lea(rscratch, src); 2197 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2198 } 2199 } 2200 2201 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2202 assert(rscratch != noreg || always_reachable(src), "missing"); 2203 2204 if (reachable(src)) { 2205 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2206 } else { 2207 lea(rscratch, src); 2208 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2209 } 2210 } 2211 2212 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2213 assert(rscratch != noreg || always_reachable(src), "missing"); 2214 2215 if (reachable(src)) { 2216 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2217 } else { 2218 lea(rscratch, src); 2219 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2220 } 2221 } 2222 2223 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2224 assert(rscratch != noreg || always_reachable(src), "missing"); 2225 2226 if (reachable(src)) { 2227 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2228 } else { 2229 lea(rscratch, src); 2230 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2231 } 2232 } 2233 2234 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2235 assert(rscratch != noreg || always_reachable(src), "missing"); 2236 2237 if (reachable(src)) { 2238 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len); 2239 } else { 2240 lea(rscratch, src); 2241 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len); 2242 } 2243 } 2244 2245 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2246 assert(rscratch != noreg || always_reachable(src), "missing"); 2247 2248 if (reachable(src)) { 2249 Assembler::evmovdqaq(dst, as_Address(src), vector_len); 2250 } else { 2251 lea(rscratch, src); 2252 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len); 2253 } 2254 } 2255 2256 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2257 assert(rscratch != noreg || always_reachable(src), "missing"); 2258 2259 if (reachable(src)) { 2260 Assembler::movapd(dst, as_Address(src)); 2261 } else { 2262 lea(rscratch, src); 2263 Assembler::movapd(dst, Address(rscratch, 0)); 2264 } 2265 } 2266 2267 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2268 assert(rscratch != noreg || always_reachable(src), "missing"); 2269 2270 if (reachable(src)) { 2271 Assembler::movdqa(dst, as_Address(src)); 2272 } else { 2273 lea(rscratch, src); 2274 Assembler::movdqa(dst, Address(rscratch, 0)); 2275 } 2276 } 2277 2278 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2279 assert(rscratch != noreg || always_reachable(src), "missing"); 2280 2281 if (reachable(src)) { 2282 Assembler::movsd(dst, as_Address(src)); 2283 } else { 2284 lea(rscratch, src); 2285 Assembler::movsd(dst, Address(rscratch, 0)); 2286 } 2287 } 2288 2289 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2290 assert(rscratch != noreg || always_reachable(src), "missing"); 2291 2292 if (reachable(src)) { 2293 Assembler::movss(dst, as_Address(src)); 2294 } else { 2295 lea(rscratch, src); 2296 Assembler::movss(dst, Address(rscratch, 0)); 2297 } 2298 } 2299 2300 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2301 assert(rscratch != noreg || always_reachable(src), "missing"); 2302 2303 if (reachable(src)) { 2304 Assembler::movddup(dst, as_Address(src)); 2305 } else { 2306 lea(rscratch, src); 2307 Assembler::movddup(dst, Address(rscratch, 0)); 2308 } 2309 } 2310 2311 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2312 assert(rscratch != noreg || always_reachable(src), "missing"); 2313 2314 if (reachable(src)) { 2315 Assembler::vmovddup(dst, as_Address(src), vector_len); 2316 } else { 2317 lea(rscratch, src); 2318 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2319 } 2320 } 2321 2322 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2323 assert(rscratch != noreg || always_reachable(src), "missing"); 2324 2325 if (reachable(src)) { 2326 Assembler::mulsd(dst, as_Address(src)); 2327 } else { 2328 lea(rscratch, src); 2329 Assembler::mulsd(dst, Address(rscratch, 0)); 2330 } 2331 } 2332 2333 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2334 assert(rscratch != noreg || always_reachable(src), "missing"); 2335 2336 if (reachable(src)) { 2337 Assembler::mulss(dst, as_Address(src)); 2338 } else { 2339 lea(rscratch, src); 2340 Assembler::mulss(dst, Address(rscratch, 0)); 2341 } 2342 } 2343 2344 void MacroAssembler::null_check(Register reg, int offset) { 2345 if (needs_explicit_null_check(offset)) { 2346 // provoke OS null exception if reg is null by 2347 // accessing M[reg] w/o changing any (non-CC) registers 2348 // NOTE: cmpl is plenty here to provoke a segv 2349 cmpptr(rax, Address(reg, 0)); 2350 // Note: should probably use testl(rax, Address(reg, 0)); 2351 // may be shorter code (however, this version of 2352 // testl needs to be implemented first) 2353 } else { 2354 // nothing to do, (later) access of M[reg + offset] 2355 // will provoke OS null exception if reg is null 2356 } 2357 } 2358 2359 void MacroAssembler::os_breakpoint() { 2360 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2361 // (e.g., MSVC can't call ps() otherwise) 2362 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2363 } 2364 2365 void MacroAssembler::unimplemented(const char* what) { 2366 const char* buf = nullptr; 2367 { 2368 ResourceMark rm; 2369 stringStream ss; 2370 ss.print("unimplemented: %s", what); 2371 buf = code_string(ss.as_string()); 2372 } 2373 stop(buf); 2374 } 2375 2376 #define XSTATE_BV 0x200 2377 2378 void MacroAssembler::pop_CPU_state() { 2379 pop_FPU_state(); 2380 pop_IU_state(); 2381 } 2382 2383 void MacroAssembler::pop_FPU_state() { 2384 fxrstor(Address(rsp, 0)); 2385 addptr(rsp, FPUStateSizeInWords * wordSize); 2386 } 2387 2388 void MacroAssembler::pop_IU_state() { 2389 popa(); 2390 addq(rsp, 8); 2391 popf(); 2392 } 2393 2394 // Save Integer and Float state 2395 // Warning: Stack must be 16 byte aligned (64bit) 2396 void MacroAssembler::push_CPU_state() { 2397 push_IU_state(); 2398 push_FPU_state(); 2399 } 2400 2401 void MacroAssembler::push_FPU_state() { 2402 subptr(rsp, FPUStateSizeInWords * wordSize); 2403 fxsave(Address(rsp, 0)); 2404 } 2405 2406 void MacroAssembler::push_IU_state() { 2407 // Push flags first because pusha kills them 2408 pushf(); 2409 // Make sure rsp stays 16-byte aligned 2410 subq(rsp, 8); 2411 pusha(); 2412 } 2413 2414 void MacroAssembler::push_cont_fastpath() { 2415 if (!Continuations::enabled()) return; 2416 2417 Label L_done; 2418 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2419 jccb(Assembler::belowEqual, L_done); 2420 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp); 2421 bind(L_done); 2422 } 2423 2424 void MacroAssembler::pop_cont_fastpath() { 2425 if (!Continuations::enabled()) return; 2426 2427 Label L_done; 2428 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2429 jccb(Assembler::below, L_done); 2430 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0); 2431 bind(L_done); 2432 } 2433 2434 void MacroAssembler::inc_held_monitor_count() { 2435 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2436 } 2437 2438 void MacroAssembler::dec_held_monitor_count() { 2439 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2440 } 2441 2442 #ifdef ASSERT 2443 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 2444 Label no_cont; 2445 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 2446 testl(cont, cont); 2447 jcc(Assembler::zero, no_cont); 2448 stop(name); 2449 bind(no_cont); 2450 } 2451 #endif 2452 2453 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register 2454 // we must set sp to zero to clear frame 2455 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 2456 // must clear fp, so that compiled frames are not confused; it is 2457 // possible that we need it only for debugging 2458 if (clear_fp) { 2459 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 2460 } 2461 // Always clear the pc because it could have been set by make_walkable() 2462 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 2463 vzeroupper(); 2464 } 2465 2466 void MacroAssembler::round_to(Register reg, int modulus) { 2467 addptr(reg, modulus - 1); 2468 andptr(reg, -modulus); 2469 } 2470 2471 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) { 2472 if (at_return) { 2473 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 2474 // we may safely use rsp instead to perform the stack watermark check. 2475 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset())); 2476 jcc(Assembler::above, slow_path); 2477 return; 2478 } 2479 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 2480 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 2481 } 2482 2483 // Calls to C land 2484 // 2485 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 2486 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 2487 // has to be reset to 0. This is required to allow proper stack traversal. 2488 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2489 Register last_java_fp, 2490 address last_java_pc, 2491 Register rscratch) { 2492 vzeroupper(); 2493 // determine last_java_sp register 2494 if (!last_java_sp->is_valid()) { 2495 last_java_sp = rsp; 2496 } 2497 // last_java_fp is optional 2498 if (last_java_fp->is_valid()) { 2499 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 2500 } 2501 // last_java_pc is optional 2502 if (last_java_pc != nullptr) { 2503 Address java_pc(r15_thread, 2504 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 2505 lea(java_pc, InternalAddress(last_java_pc), rscratch); 2506 } 2507 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 2508 } 2509 2510 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2511 Register last_java_fp, 2512 Label &L, 2513 Register scratch) { 2514 lea(scratch, L); 2515 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch); 2516 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch); 2517 } 2518 2519 void MacroAssembler::shlptr(Register dst, int imm8) { 2520 shlq(dst, imm8); 2521 } 2522 2523 void MacroAssembler::shrptr(Register dst, int imm8) { 2524 shrq(dst, imm8); 2525 } 2526 2527 void MacroAssembler::sign_extend_byte(Register reg) { 2528 movsbl(reg, reg); // movsxb 2529 } 2530 2531 void MacroAssembler::sign_extend_short(Register reg) { 2532 movswl(reg, reg); // movsxw 2533 } 2534 2535 void MacroAssembler::testl(Address dst, int32_t imm32) { 2536 if (imm32 >= 0 && is8bit(imm32)) { 2537 testb(dst, imm32); 2538 } else { 2539 Assembler::testl(dst, imm32); 2540 } 2541 } 2542 2543 void MacroAssembler::testl(Register dst, int32_t imm32) { 2544 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 2545 testb(dst, imm32); 2546 } else { 2547 Assembler::testl(dst, imm32); 2548 } 2549 } 2550 2551 void MacroAssembler::testl(Register dst, AddressLiteral src) { 2552 assert(always_reachable(src), "Address should be reachable"); 2553 testl(dst, as_Address(src)); 2554 } 2555 2556 void MacroAssembler::testq(Address dst, int32_t imm32) { 2557 if (imm32 >= 0) { 2558 testl(dst, imm32); 2559 } else { 2560 Assembler::testq(dst, imm32); 2561 } 2562 } 2563 2564 void MacroAssembler::testq(Register dst, int32_t imm32) { 2565 if (imm32 >= 0) { 2566 testl(dst, imm32); 2567 } else { 2568 Assembler::testq(dst, imm32); 2569 } 2570 } 2571 2572 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 2573 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2574 Assembler::pcmpeqb(dst, src); 2575 } 2576 2577 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 2578 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2579 Assembler::pcmpeqw(dst, src); 2580 } 2581 2582 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2583 assert((dst->encoding() < 16),"XMM register should be 0-15"); 2584 Assembler::pcmpestri(dst, src, imm8); 2585 } 2586 2587 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2588 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2589 Assembler::pcmpestri(dst, src, imm8); 2590 } 2591 2592 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2593 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2594 Assembler::pmovzxbw(dst, src); 2595 } 2596 2597 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 2598 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2599 Assembler::pmovzxbw(dst, src); 2600 } 2601 2602 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 2603 assert((src->encoding() < 16),"XMM register should be 0-15"); 2604 Assembler::pmovmskb(dst, src); 2605 } 2606 2607 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 2608 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2609 Assembler::ptest(dst, src); 2610 } 2611 2612 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2613 assert(rscratch != noreg || always_reachable(src), "missing"); 2614 2615 if (reachable(src)) { 2616 Assembler::sqrtss(dst, as_Address(src)); 2617 } else { 2618 lea(rscratch, src); 2619 Assembler::sqrtss(dst, Address(rscratch, 0)); 2620 } 2621 } 2622 2623 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2624 assert(rscratch != noreg || always_reachable(src), "missing"); 2625 2626 if (reachable(src)) { 2627 Assembler::subsd(dst, as_Address(src)); 2628 } else { 2629 lea(rscratch, src); 2630 Assembler::subsd(dst, Address(rscratch, 0)); 2631 } 2632 } 2633 2634 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 2635 assert(rscratch != noreg || always_reachable(src), "missing"); 2636 2637 if (reachable(src)) { 2638 Assembler::roundsd(dst, as_Address(src), rmode); 2639 } else { 2640 lea(rscratch, src); 2641 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 2642 } 2643 } 2644 2645 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2646 assert(rscratch != noreg || always_reachable(src), "missing"); 2647 2648 if (reachable(src)) { 2649 Assembler::subss(dst, as_Address(src)); 2650 } else { 2651 lea(rscratch, src); 2652 Assembler::subss(dst, Address(rscratch, 0)); 2653 } 2654 } 2655 2656 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2657 assert(rscratch != noreg || always_reachable(src), "missing"); 2658 2659 if (reachable(src)) { 2660 Assembler::ucomisd(dst, as_Address(src)); 2661 } else { 2662 lea(rscratch, src); 2663 Assembler::ucomisd(dst, Address(rscratch, 0)); 2664 } 2665 } 2666 2667 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2668 assert(rscratch != noreg || always_reachable(src), "missing"); 2669 2670 if (reachable(src)) { 2671 Assembler::ucomiss(dst, as_Address(src)); 2672 } else { 2673 lea(rscratch, src); 2674 Assembler::ucomiss(dst, Address(rscratch, 0)); 2675 } 2676 } 2677 2678 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2679 assert(rscratch != noreg || always_reachable(src), "missing"); 2680 2681 // Used in sign-bit flipping with aligned address. 2682 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2683 2684 if (UseAVX > 2 && 2685 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2686 (dst->encoding() >= 16)) { 2687 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2688 } else if (reachable(src)) { 2689 Assembler::xorpd(dst, as_Address(src)); 2690 } else { 2691 lea(rscratch, src); 2692 Assembler::xorpd(dst, Address(rscratch, 0)); 2693 } 2694 } 2695 2696 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 2697 if (UseAVX > 2 && 2698 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2699 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2700 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2701 } else { 2702 Assembler::xorpd(dst, src); 2703 } 2704 } 2705 2706 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 2707 if (UseAVX > 2 && 2708 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2709 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2710 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2711 } else { 2712 Assembler::xorps(dst, src); 2713 } 2714 } 2715 2716 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 2717 assert(rscratch != noreg || always_reachable(src), "missing"); 2718 2719 // Used in sign-bit flipping with aligned address. 2720 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2721 2722 if (UseAVX > 2 && 2723 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2724 (dst->encoding() >= 16)) { 2725 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2726 } else if (reachable(src)) { 2727 Assembler::xorps(dst, as_Address(src)); 2728 } else { 2729 lea(rscratch, src); 2730 Assembler::xorps(dst, Address(rscratch, 0)); 2731 } 2732 } 2733 2734 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 2735 assert(rscratch != noreg || always_reachable(src), "missing"); 2736 2737 // Used in sign-bit flipping with aligned address. 2738 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 2739 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 2740 if (reachable(src)) { 2741 Assembler::pshufb(dst, as_Address(src)); 2742 } else { 2743 lea(rscratch, src); 2744 Assembler::pshufb(dst, Address(rscratch, 0)); 2745 } 2746 } 2747 2748 // AVX 3-operands instructions 2749 2750 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2751 assert(rscratch != noreg || always_reachable(src), "missing"); 2752 2753 if (reachable(src)) { 2754 vaddsd(dst, nds, as_Address(src)); 2755 } else { 2756 lea(rscratch, src); 2757 vaddsd(dst, nds, Address(rscratch, 0)); 2758 } 2759 } 2760 2761 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2762 assert(rscratch != noreg || always_reachable(src), "missing"); 2763 2764 if (reachable(src)) { 2765 vaddss(dst, nds, as_Address(src)); 2766 } else { 2767 lea(rscratch, src); 2768 vaddss(dst, nds, Address(rscratch, 0)); 2769 } 2770 } 2771 2772 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2773 assert(UseAVX > 0, "requires some form of AVX"); 2774 assert(rscratch != noreg || always_reachable(src), "missing"); 2775 2776 if (reachable(src)) { 2777 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 2778 } else { 2779 lea(rscratch, src); 2780 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 2781 } 2782 } 2783 2784 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2785 assert(UseAVX > 0, "requires some form of AVX"); 2786 assert(rscratch != noreg || always_reachable(src), "missing"); 2787 2788 if (reachable(src)) { 2789 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 2790 } else { 2791 lea(rscratch, src); 2792 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 2793 } 2794 } 2795 2796 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2797 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2798 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2799 2800 vandps(dst, nds, negate_field, vector_len, rscratch); 2801 } 2802 2803 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2804 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2805 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2806 2807 vandpd(dst, nds, negate_field, vector_len, rscratch); 2808 } 2809 2810 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2811 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2812 Assembler::vpaddb(dst, nds, src, vector_len); 2813 } 2814 2815 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2816 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2817 Assembler::vpaddb(dst, nds, src, vector_len); 2818 } 2819 2820 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2821 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2822 Assembler::vpaddw(dst, nds, src, vector_len); 2823 } 2824 2825 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2826 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2827 Assembler::vpaddw(dst, nds, src, vector_len); 2828 } 2829 2830 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2831 assert(rscratch != noreg || always_reachable(src), "missing"); 2832 2833 if (reachable(src)) { 2834 Assembler::vpand(dst, nds, as_Address(src), vector_len); 2835 } else { 2836 lea(rscratch, src); 2837 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 2838 } 2839 } 2840 2841 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2842 assert(rscratch != noreg || always_reachable(src), "missing"); 2843 2844 if (reachable(src)) { 2845 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 2846 } else { 2847 lea(rscratch, src); 2848 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 2849 } 2850 } 2851 2852 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2853 assert(rscratch != noreg || always_reachable(src), "missing"); 2854 2855 if (reachable(src)) { 2856 Assembler::vbroadcasti128(dst, as_Address(src), vector_len); 2857 } else { 2858 lea(rscratch, src); 2859 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len); 2860 } 2861 } 2862 2863 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2864 assert(rscratch != noreg || always_reachable(src), "missing"); 2865 2866 if (reachable(src)) { 2867 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 2868 } else { 2869 lea(rscratch, src); 2870 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 2871 } 2872 } 2873 2874 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2875 assert(rscratch != noreg || always_reachable(src), "missing"); 2876 2877 if (reachable(src)) { 2878 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 2879 } else { 2880 lea(rscratch, src); 2881 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 2882 } 2883 } 2884 2885 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2886 assert(rscratch != noreg || always_reachable(src), "missing"); 2887 2888 if (reachable(src)) { 2889 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 2890 } else { 2891 lea(rscratch, src); 2892 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 2893 } 2894 } 2895 2896 // Vector float blend 2897 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2898 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2899 // WARN: Allow dst == (src1|src2), mask == scratch 2900 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2901 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 2902 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2903 if (blend_emulation && scratch_available && dst_available) { 2904 if (compute_mask) { 2905 vpsrad(scratch, mask, 32, vector_len); 2906 mask = scratch; 2907 } 2908 if (dst == src1) { 2909 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 2910 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2911 } else { 2912 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2913 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 2914 } 2915 vpor(dst, dst, scratch, vector_len); 2916 } else { 2917 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 2918 } 2919 } 2920 2921 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2922 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2923 // WARN: Allow dst == (src1|src2), mask == scratch 2924 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2925 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 2926 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2927 if (blend_emulation && scratch_available && dst_available) { 2928 if (compute_mask) { 2929 vpxor(scratch, scratch, scratch, vector_len); 2930 vpcmpgtq(scratch, scratch, mask, vector_len); 2931 mask = scratch; 2932 } 2933 if (dst == src1) { 2934 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 2935 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2936 } else { 2937 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2938 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 2939 } 2940 vpor(dst, dst, scratch, vector_len); 2941 } else { 2942 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 2943 } 2944 } 2945 2946 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2947 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2948 Assembler::vpcmpeqb(dst, nds, src, vector_len); 2949 } 2950 2951 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 2952 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2953 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 2954 } 2955 2956 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2957 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2958 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2959 } 2960 2961 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2962 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2963 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2964 } 2965 2966 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2967 assert(rscratch != noreg || always_reachable(src), "missing"); 2968 2969 if (reachable(src)) { 2970 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 2971 } else { 2972 lea(rscratch, src); 2973 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 2974 } 2975 } 2976 2977 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2978 int comparison, bool is_signed, int vector_len, Register rscratch) { 2979 assert(rscratch != noreg || always_reachable(src), "missing"); 2980 2981 if (reachable(src)) { 2982 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2983 } else { 2984 lea(rscratch, src); 2985 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2986 } 2987 } 2988 2989 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2990 int comparison, bool is_signed, int vector_len, Register rscratch) { 2991 assert(rscratch != noreg || always_reachable(src), "missing"); 2992 2993 if (reachable(src)) { 2994 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2995 } else { 2996 lea(rscratch, src); 2997 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2998 } 2999 } 3000 3001 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3002 int comparison, bool is_signed, int vector_len, Register rscratch) { 3003 assert(rscratch != noreg || always_reachable(src), "missing"); 3004 3005 if (reachable(src)) { 3006 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3007 } else { 3008 lea(rscratch, src); 3009 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3010 } 3011 } 3012 3013 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3014 int comparison, bool is_signed, int vector_len, Register rscratch) { 3015 assert(rscratch != noreg || always_reachable(src), "missing"); 3016 3017 if (reachable(src)) { 3018 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3019 } else { 3020 lea(rscratch, src); 3021 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3022 } 3023 } 3024 3025 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3026 if (width == Assembler::Q) { 3027 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3028 } else { 3029 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3030 } 3031 } 3032 3033 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3034 int eq_cond_enc = 0x29; 3035 int gt_cond_enc = 0x37; 3036 if (width != Assembler::Q) { 3037 eq_cond_enc = 0x74 + width; 3038 gt_cond_enc = 0x64 + width; 3039 } 3040 switch (cond) { 3041 case eq: 3042 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3043 break; 3044 case neq: 3045 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3046 vallones(xtmp, vector_len); 3047 vpxor(dst, xtmp, dst, vector_len); 3048 break; 3049 case le: 3050 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3051 vallones(xtmp, vector_len); 3052 vpxor(dst, xtmp, dst, vector_len); 3053 break; 3054 case nlt: 3055 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3056 vallones(xtmp, vector_len); 3057 vpxor(dst, xtmp, dst, vector_len); 3058 break; 3059 case lt: 3060 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3061 break; 3062 case nle: 3063 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3064 break; 3065 default: 3066 assert(false, "Should not reach here"); 3067 } 3068 } 3069 3070 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3071 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3072 Assembler::vpmovzxbw(dst, src, vector_len); 3073 } 3074 3075 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3076 assert((src->encoding() < 16),"XMM register should be 0-15"); 3077 Assembler::vpmovmskb(dst, src, vector_len); 3078 } 3079 3080 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3081 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3082 Assembler::vpmullw(dst, nds, src, vector_len); 3083 } 3084 3085 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3086 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3087 Assembler::vpmullw(dst, nds, src, vector_len); 3088 } 3089 3090 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3091 assert((UseAVX > 0), "AVX support is needed"); 3092 assert(rscratch != noreg || always_reachable(src), "missing"); 3093 3094 if (reachable(src)) { 3095 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3096 } else { 3097 lea(rscratch, src); 3098 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3099 } 3100 } 3101 3102 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3103 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3104 Assembler::vpsubb(dst, nds, src, vector_len); 3105 } 3106 3107 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3108 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3109 Assembler::vpsubb(dst, nds, src, vector_len); 3110 } 3111 3112 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3113 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3114 Assembler::vpsubw(dst, nds, src, vector_len); 3115 } 3116 3117 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3118 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3119 Assembler::vpsubw(dst, nds, src, vector_len); 3120 } 3121 3122 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3123 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3124 Assembler::vpsraw(dst, nds, shift, vector_len); 3125 } 3126 3127 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3128 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3129 Assembler::vpsraw(dst, nds, shift, vector_len); 3130 } 3131 3132 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3133 assert(UseAVX > 2,""); 3134 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3135 vector_len = 2; 3136 } 3137 Assembler::evpsraq(dst, nds, shift, vector_len); 3138 } 3139 3140 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3141 assert(UseAVX > 2,""); 3142 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3143 vector_len = 2; 3144 } 3145 Assembler::evpsraq(dst, nds, shift, vector_len); 3146 } 3147 3148 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3149 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3150 Assembler::vpsrlw(dst, nds, shift, vector_len); 3151 } 3152 3153 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3154 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3155 Assembler::vpsrlw(dst, nds, shift, vector_len); 3156 } 3157 3158 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3159 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3160 Assembler::vpsllw(dst, nds, shift, vector_len); 3161 } 3162 3163 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3164 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3165 Assembler::vpsllw(dst, nds, shift, vector_len); 3166 } 3167 3168 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3169 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3170 Assembler::vptest(dst, src); 3171 } 3172 3173 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3174 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3175 Assembler::punpcklbw(dst, src); 3176 } 3177 3178 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3179 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3180 Assembler::pshufd(dst, src, mode); 3181 } 3182 3183 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3184 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3185 Assembler::pshuflw(dst, src, mode); 3186 } 3187 3188 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3189 assert(rscratch != noreg || always_reachable(src), "missing"); 3190 3191 if (reachable(src)) { 3192 vandpd(dst, nds, as_Address(src), vector_len); 3193 } else { 3194 lea(rscratch, src); 3195 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3196 } 3197 } 3198 3199 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3200 assert(rscratch != noreg || always_reachable(src), "missing"); 3201 3202 if (reachable(src)) { 3203 vandps(dst, nds, as_Address(src), vector_len); 3204 } else { 3205 lea(rscratch, src); 3206 vandps(dst, nds, Address(rscratch, 0), vector_len); 3207 } 3208 } 3209 3210 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3211 bool merge, int vector_len, Register rscratch) { 3212 assert(rscratch != noreg || always_reachable(src), "missing"); 3213 3214 if (reachable(src)) { 3215 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3216 } else { 3217 lea(rscratch, src); 3218 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3219 } 3220 } 3221 3222 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3223 assert(rscratch != noreg || always_reachable(src), "missing"); 3224 3225 if (reachable(src)) { 3226 vdivsd(dst, nds, as_Address(src)); 3227 } else { 3228 lea(rscratch, src); 3229 vdivsd(dst, nds, Address(rscratch, 0)); 3230 } 3231 } 3232 3233 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3234 assert(rscratch != noreg || always_reachable(src), "missing"); 3235 3236 if (reachable(src)) { 3237 vdivss(dst, nds, as_Address(src)); 3238 } else { 3239 lea(rscratch, src); 3240 vdivss(dst, nds, Address(rscratch, 0)); 3241 } 3242 } 3243 3244 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3245 assert(rscratch != noreg || always_reachable(src), "missing"); 3246 3247 if (reachable(src)) { 3248 vmulsd(dst, nds, as_Address(src)); 3249 } else { 3250 lea(rscratch, src); 3251 vmulsd(dst, nds, Address(rscratch, 0)); 3252 } 3253 } 3254 3255 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3256 assert(rscratch != noreg || always_reachable(src), "missing"); 3257 3258 if (reachable(src)) { 3259 vmulss(dst, nds, as_Address(src)); 3260 } else { 3261 lea(rscratch, src); 3262 vmulss(dst, nds, Address(rscratch, 0)); 3263 } 3264 } 3265 3266 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3267 assert(rscratch != noreg || always_reachable(src), "missing"); 3268 3269 if (reachable(src)) { 3270 vsubsd(dst, nds, as_Address(src)); 3271 } else { 3272 lea(rscratch, src); 3273 vsubsd(dst, nds, Address(rscratch, 0)); 3274 } 3275 } 3276 3277 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3278 assert(rscratch != noreg || always_reachable(src), "missing"); 3279 3280 if (reachable(src)) { 3281 vsubss(dst, nds, as_Address(src)); 3282 } else { 3283 lea(rscratch, src); 3284 vsubss(dst, nds, Address(rscratch, 0)); 3285 } 3286 } 3287 3288 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3289 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3290 assert(rscratch != noreg || always_reachable(src), "missing"); 3291 3292 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3293 } 3294 3295 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3296 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3297 assert(rscratch != noreg || always_reachable(src), "missing"); 3298 3299 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3300 } 3301 3302 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3303 assert(rscratch != noreg || always_reachable(src), "missing"); 3304 3305 if (reachable(src)) { 3306 vxorpd(dst, nds, as_Address(src), vector_len); 3307 } else { 3308 lea(rscratch, src); 3309 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3310 } 3311 } 3312 3313 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3314 assert(rscratch != noreg || always_reachable(src), "missing"); 3315 3316 if (reachable(src)) { 3317 vxorps(dst, nds, as_Address(src), vector_len); 3318 } else { 3319 lea(rscratch, src); 3320 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3321 } 3322 } 3323 3324 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3325 assert(rscratch != noreg || always_reachable(src), "missing"); 3326 3327 if (UseAVX > 1 || (vector_len < 1)) { 3328 if (reachable(src)) { 3329 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3330 } else { 3331 lea(rscratch, src); 3332 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3333 } 3334 } else { 3335 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3336 } 3337 } 3338 3339 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3340 assert(rscratch != noreg || always_reachable(src), "missing"); 3341 3342 if (reachable(src)) { 3343 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3344 } else { 3345 lea(rscratch, src); 3346 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3347 } 3348 } 3349 3350 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3351 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3352 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3353 // The inverted mask is sign-extended 3354 andptr(possibly_non_local, inverted_mask); 3355 } 3356 3357 void MacroAssembler::resolve_jobject(Register value, 3358 Register tmp) { 3359 Register thread = r15_thread; 3360 assert_different_registers(value, thread, tmp); 3361 Label done, tagged, weak_tagged; 3362 testptr(value, value); 3363 jcc(Assembler::zero, done); // Use null as-is. 3364 testptr(value, JNIHandles::tag_mask); // Test for tag. 3365 jcc(Assembler::notZero, tagged); 3366 3367 // Resolve local handle 3368 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp); 3369 verify_oop(value); 3370 jmp(done); 3371 3372 bind(tagged); 3373 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 3374 jcc(Assembler::notZero, weak_tagged); 3375 3376 // Resolve global handle 3377 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3378 verify_oop(value); 3379 jmp(done); 3380 3381 bind(weak_tagged); 3382 // Resolve jweak. 3383 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3384 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp); 3385 verify_oop(value); 3386 3387 bind(done); 3388 } 3389 3390 void MacroAssembler::resolve_global_jobject(Register value, 3391 Register tmp) { 3392 Register thread = r15_thread; 3393 assert_different_registers(value, thread, tmp); 3394 Label done; 3395 3396 testptr(value, value); 3397 jcc(Assembler::zero, done); // Use null as-is. 3398 3399 #ifdef ASSERT 3400 { 3401 Label valid_global_tag; 3402 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 3403 jcc(Assembler::notZero, valid_global_tag); 3404 stop("non global jobject using resolve_global_jobject"); 3405 bind(valid_global_tag); 3406 } 3407 #endif 3408 3409 // Resolve global handle 3410 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3411 verify_oop(value); 3412 3413 bind(done); 3414 } 3415 3416 void MacroAssembler::subptr(Register dst, int32_t imm32) { 3417 subq(dst, imm32); 3418 } 3419 3420 // Force generation of a 4 byte immediate value even if it fits into 8bit 3421 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 3422 subq_imm32(dst, imm32); 3423 } 3424 3425 void MacroAssembler::subptr(Register dst, Register src) { 3426 subq(dst, src); 3427 } 3428 3429 // C++ bool manipulation 3430 void MacroAssembler::testbool(Register dst) { 3431 if(sizeof(bool) == 1) 3432 testb(dst, 0xff); 3433 else if(sizeof(bool) == 2) { 3434 // testw implementation needed for two byte bools 3435 ShouldNotReachHere(); 3436 } else if(sizeof(bool) == 4) 3437 testl(dst, dst); 3438 else 3439 // unsupported 3440 ShouldNotReachHere(); 3441 } 3442 3443 void MacroAssembler::testptr(Register dst, Register src) { 3444 testq(dst, src); 3445 } 3446 3447 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3448 void MacroAssembler::tlab_allocate(Register obj, 3449 Register var_size_in_bytes, 3450 int con_size_in_bytes, 3451 Register t1, 3452 Register t2, 3453 Label& slow_case) { 3454 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3455 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 3456 } 3457 3458 RegSet MacroAssembler::call_clobbered_gp_registers() { 3459 RegSet regs; 3460 regs += RegSet::of(rax, rcx, rdx); 3461 #ifndef _WINDOWS 3462 regs += RegSet::of(rsi, rdi); 3463 #endif 3464 regs += RegSet::range(r8, r11); 3465 if (UseAPX) { 3466 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 3467 } 3468 return regs; 3469 } 3470 3471 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 3472 int num_xmm_registers = XMMRegister::available_xmm_registers(); 3473 #if defined(_WINDOWS) 3474 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 3475 if (num_xmm_registers > 16) { 3476 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 3477 } 3478 return result; 3479 #else 3480 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 3481 #endif 3482 } 3483 3484 // C1 only ever uses the first double/float of the XMM register. 3485 static int xmm_save_size() { return sizeof(double); } 3486 3487 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3488 masm->movdbl(Address(rsp, offset), reg); 3489 } 3490 3491 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3492 masm->movdbl(reg, Address(rsp, offset)); 3493 } 3494 3495 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 3496 bool save_fpu, int& gp_area_size, int& xmm_area_size) { 3497 3498 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 3499 StackAlignmentInBytes); 3500 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0; 3501 3502 return gp_area_size + xmm_area_size; 3503 } 3504 3505 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 3506 block_comment("push_call_clobbered_registers start"); 3507 // Regular registers 3508 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 3509 3510 int gp_area_size; 3511 int xmm_area_size; 3512 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 3513 gp_area_size, xmm_area_size); 3514 subptr(rsp, total_save_size); 3515 3516 push_set(gp_registers_to_push, 0); 3517 3518 if (save_fpu) { 3519 push_set(call_clobbered_xmm_registers(), gp_area_size); 3520 } 3521 3522 block_comment("push_call_clobbered_registers end"); 3523 } 3524 3525 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 3526 block_comment("pop_call_clobbered_registers start"); 3527 3528 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 3529 3530 int gp_area_size; 3531 int xmm_area_size; 3532 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 3533 gp_area_size, xmm_area_size); 3534 3535 if (restore_fpu) { 3536 pop_set(call_clobbered_xmm_registers(), gp_area_size); 3537 } 3538 3539 pop_set(gp_registers_to_pop, 0); 3540 3541 addptr(rsp, total_save_size); 3542 3543 vzeroupper(); 3544 3545 block_comment("pop_call_clobbered_registers end"); 3546 } 3547 3548 void MacroAssembler::push_set(XMMRegSet set, int offset) { 3549 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 3550 int spill_offset = offset; 3551 3552 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 3553 save_xmm_register(this, spill_offset, *it); 3554 spill_offset += xmm_save_size(); 3555 } 3556 } 3557 3558 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 3559 int restore_size = set.size() * xmm_save_size(); 3560 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 3561 3562 int restore_offset = offset + restore_size - xmm_save_size(); 3563 3564 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 3565 restore_xmm_register(this, restore_offset, *it); 3566 restore_offset -= xmm_save_size(); 3567 } 3568 } 3569 3570 void MacroAssembler::push_set(RegSet set, int offset) { 3571 int spill_offset; 3572 if (offset == -1) { 3573 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3574 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 3575 subptr(rsp, aligned_size); 3576 spill_offset = 0; 3577 } else { 3578 spill_offset = offset; 3579 } 3580 3581 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 3582 movptr(Address(rsp, spill_offset), *it); 3583 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3584 } 3585 } 3586 3587 void MacroAssembler::pop_set(RegSet set, int offset) { 3588 3589 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3590 int restore_size = set.size() * gp_reg_size; 3591 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 3592 3593 int restore_offset; 3594 if (offset == -1) { 3595 restore_offset = restore_size - gp_reg_size; 3596 } else { 3597 restore_offset = offset + restore_size - gp_reg_size; 3598 } 3599 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 3600 movptr(*it, Address(rsp, restore_offset)); 3601 restore_offset -= gp_reg_size; 3602 } 3603 3604 if (offset == -1) { 3605 addptr(rsp, aligned_size); 3606 } 3607 } 3608 3609 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 3610 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 3611 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 3612 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 3613 Label done; 3614 3615 testptr(length_in_bytes, length_in_bytes); 3616 jcc(Assembler::zero, done); 3617 3618 // initialize topmost word, divide index by 2, check if odd and test if zero 3619 // note: for the remaining code to work, index must be a multiple of BytesPerWord 3620 #ifdef ASSERT 3621 { 3622 Label L; 3623 testptr(length_in_bytes, BytesPerWord - 1); 3624 jcc(Assembler::zero, L); 3625 stop("length must be a multiple of BytesPerWord"); 3626 bind(L); 3627 } 3628 #endif 3629 Register index = length_in_bytes; 3630 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 3631 if (UseIncDec) { 3632 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 3633 } else { 3634 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 3635 shrptr(index, 1); 3636 } 3637 3638 // initialize remaining object fields: index is a multiple of 2 now 3639 { 3640 Label loop; 3641 bind(loop); 3642 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 3643 decrement(index); 3644 jcc(Assembler::notZero, loop); 3645 } 3646 3647 bind(done); 3648 } 3649 3650 // Look up the method for a megamorphic invokeinterface call. 3651 // The target method is determined by <intf_klass, itable_index>. 3652 // The receiver klass is in recv_klass. 3653 // On success, the result will be in method_result, and execution falls through. 3654 // On failure, execution transfers to the given label. 3655 void MacroAssembler::lookup_interface_method(Register recv_klass, 3656 Register intf_klass, 3657 RegisterOrConstant itable_index, 3658 Register method_result, 3659 Register scan_temp, 3660 Label& L_no_such_interface, 3661 bool return_method) { 3662 assert_different_registers(recv_klass, intf_klass, scan_temp); 3663 assert_different_registers(method_result, intf_klass, scan_temp); 3664 assert(recv_klass != method_result || !return_method, 3665 "recv_klass can be destroyed when method isn't needed"); 3666 3667 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 3668 "caller must use same register for non-constant itable index as for method"); 3669 3670 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 3671 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3672 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3673 int scan_step = itableOffsetEntry::size() * wordSize; 3674 int vte_size = vtableEntry::size_in_bytes(); 3675 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3676 assert(vte_size == wordSize, "else adjust times_vte_scale"); 3677 3678 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3679 3680 // Could store the aligned, prescaled offset in the klass. 3681 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 3682 3683 if (return_method) { 3684 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 3685 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3686 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 3687 } 3688 3689 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 3690 // if (scan->interface() == intf) { 3691 // result = (klass + scan->offset() + itable_index); 3692 // } 3693 // } 3694 Label search, found_method; 3695 3696 for (int peel = 1; peel >= 0; peel--) { 3697 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 3698 cmpptr(intf_klass, method_result); 3699 3700 if (peel) { 3701 jccb(Assembler::equal, found_method); 3702 } else { 3703 jccb(Assembler::notEqual, search); 3704 // (invert the test to fall through to found_method...) 3705 } 3706 3707 if (!peel) break; 3708 3709 bind(search); 3710 3711 // Check that the previous entry is non-null. A null entry means that 3712 // the receiver class doesn't implement the interface, and wasn't the 3713 // same as when the caller was compiled. 3714 testptr(method_result, method_result); 3715 jcc(Assembler::zero, L_no_such_interface); 3716 addptr(scan_temp, scan_step); 3717 } 3718 3719 bind(found_method); 3720 3721 if (return_method) { 3722 // Got a hit. 3723 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 3724 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 3725 } 3726 } 3727 3728 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 3729 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 3730 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 3731 // The target method is determined by <holder_klass, itable_index>. 3732 // The receiver klass is in recv_klass. 3733 // On success, the result will be in method_result, and execution falls through. 3734 // On failure, execution transfers to the given label. 3735 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 3736 Register holder_klass, 3737 Register resolved_klass, 3738 Register method_result, 3739 Register scan_temp, 3740 Register temp_reg2, 3741 Register receiver, 3742 int itable_index, 3743 Label& L_no_such_interface) { 3744 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 3745 Register temp_itbl_klass = method_result; 3746 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 3747 3748 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3749 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3750 int scan_step = itableOffsetEntry::size() * wordSize; 3751 int vte_size = vtableEntry::size_in_bytes(); 3752 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 3753 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 3754 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3755 assert(vte_size == wordSize, "adjust times_vte_scale"); 3756 3757 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 3758 3759 // temp_itbl_klass = recv_klass.itable[0] 3760 // scan_temp = &recv_klass.itable[0] + step 3761 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3762 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 3763 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 3764 xorptr(temp_reg, temp_reg); 3765 3766 // Initial checks: 3767 // - if (holder_klass != resolved_klass), go to "scan for resolved" 3768 // - if (itable[0] == 0), no such interface 3769 // - if (itable[0] == holder_klass), shortcut to "holder found" 3770 cmpptr(holder_klass, resolved_klass); 3771 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 3772 testptr(temp_itbl_klass, temp_itbl_klass); 3773 jccb(Assembler::zero, L_no_such_interface); 3774 cmpptr(holder_klass, temp_itbl_klass); 3775 jccb(Assembler::equal, L_holder_found); 3776 3777 // Loop: Look for holder_klass record in itable 3778 // do { 3779 // tmp = itable[index]; 3780 // index += step; 3781 // if (tmp == holder_klass) { 3782 // goto L_holder_found; // Found! 3783 // } 3784 // } while (tmp != 0); 3785 // goto L_no_such_interface // Not found. 3786 Label L_scan_holder; 3787 bind(L_scan_holder); 3788 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3789 addptr(scan_temp, scan_step); 3790 cmpptr(holder_klass, temp_itbl_klass); 3791 jccb(Assembler::equal, L_holder_found); 3792 testptr(temp_itbl_klass, temp_itbl_klass); 3793 jccb(Assembler::notZero, L_scan_holder); 3794 3795 jmpb(L_no_such_interface); 3796 3797 // Loop: Look for resolved_class record in itable 3798 // do { 3799 // tmp = itable[index]; 3800 // index += step; 3801 // if (tmp == holder_klass) { 3802 // // Also check if we have met a holder klass 3803 // holder_tmp = itable[index-step-ioffset]; 3804 // } 3805 // if (tmp == resolved_klass) { 3806 // goto L_resolved_found; // Found! 3807 // } 3808 // } while (tmp != 0); 3809 // goto L_no_such_interface // Not found. 3810 // 3811 Label L_loop_scan_resolved; 3812 bind(L_loop_scan_resolved); 3813 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3814 addptr(scan_temp, scan_step); 3815 bind(L_loop_scan_resolved_entry); 3816 cmpptr(holder_klass, temp_itbl_klass); 3817 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3818 cmpptr(resolved_klass, temp_itbl_klass); 3819 jccb(Assembler::equal, L_resolved_found); 3820 testptr(temp_itbl_klass, temp_itbl_klass); 3821 jccb(Assembler::notZero, L_loop_scan_resolved); 3822 3823 jmpb(L_no_such_interface); 3824 3825 Label L_ready; 3826 3827 // See if we already have a holder klass. If not, go and scan for it. 3828 bind(L_resolved_found); 3829 testptr(temp_reg, temp_reg); 3830 jccb(Assembler::zero, L_scan_holder); 3831 jmpb(L_ready); 3832 3833 bind(L_holder_found); 3834 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3835 3836 // Finally, temp_reg contains holder_klass vtable offset 3837 bind(L_ready); 3838 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3839 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 3840 load_klass(scan_temp, receiver, noreg); 3841 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3842 } else { 3843 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3844 } 3845 } 3846 3847 3848 // virtual method calling 3849 void MacroAssembler::lookup_virtual_method(Register recv_klass, 3850 RegisterOrConstant vtable_index, 3851 Register method_result) { 3852 const ByteSize base = Klass::vtable_start_offset(); 3853 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 3854 Address vtable_entry_addr(recv_klass, 3855 vtable_index, Address::times_ptr, 3856 base + vtableEntry::method_offset()); 3857 movptr(method_result, vtable_entry_addr); 3858 } 3859 3860 3861 void MacroAssembler::check_klass_subtype(Register sub_klass, 3862 Register super_klass, 3863 Register temp_reg, 3864 Label& L_success) { 3865 Label L_failure; 3866 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 3867 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 3868 bind(L_failure); 3869 } 3870 3871 3872 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 3873 Register super_klass, 3874 Register temp_reg, 3875 Label* L_success, 3876 Label* L_failure, 3877 Label* L_slow_path, 3878 RegisterOrConstant super_check_offset) { 3879 assert_different_registers(sub_klass, super_klass, temp_reg); 3880 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 3881 if (super_check_offset.is_register()) { 3882 assert_different_registers(sub_klass, super_klass, 3883 super_check_offset.as_register()); 3884 } else if (must_load_sco) { 3885 assert(temp_reg != noreg, "supply either a temp or a register offset"); 3886 } 3887 3888 Label L_fallthrough; 3889 int label_nulls = 0; 3890 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3891 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3892 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 3893 assert(label_nulls <= 1, "at most one null in the batch"); 3894 3895 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3896 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 3897 Address super_check_offset_addr(super_klass, sco_offset); 3898 3899 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 3900 // range of a jccb. If this routine grows larger, reconsider at 3901 // least some of these. 3902 #define local_jcc(assembler_cond, label) \ 3903 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 3904 else jcc( assembler_cond, label) /*omit semi*/ 3905 3906 // Hacked jmp, which may only be used just before L_fallthrough. 3907 #define final_jmp(label) \ 3908 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3909 else jmp(label) /*omit semi*/ 3910 3911 // If the pointers are equal, we are done (e.g., String[] elements). 3912 // This self-check enables sharing of secondary supertype arrays among 3913 // non-primary types such as array-of-interface. Otherwise, each such 3914 // type would need its own customized SSA. 3915 // We move this check to the front of the fast path because many 3916 // type checks are in fact trivially successful in this manner, 3917 // so we get a nicely predicted branch right at the start of the check. 3918 cmpptr(sub_klass, super_klass); 3919 local_jcc(Assembler::equal, *L_success); 3920 3921 // Check the supertype display: 3922 if (must_load_sco) { 3923 // Positive movl does right thing on LP64. 3924 movl(temp_reg, super_check_offset_addr); 3925 super_check_offset = RegisterOrConstant(temp_reg); 3926 } 3927 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 3928 cmpptr(super_klass, super_check_addr); // load displayed supertype 3929 3930 // This check has worked decisively for primary supers. 3931 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3932 // (Secondary supers are interfaces and very deeply nested subtypes.) 3933 // This works in the same check above because of a tricky aliasing 3934 // between the super_cache and the primary super display elements. 3935 // (The 'super_check_addr' can address either, as the case requires.) 3936 // Note that the cache is updated below if it does not help us find 3937 // what we need immediately. 3938 // So if it was a primary super, we can just fail immediately. 3939 // Otherwise, it's the slow path for us (no success at this point). 3940 3941 if (super_check_offset.is_register()) { 3942 local_jcc(Assembler::equal, *L_success); 3943 cmpl(super_check_offset.as_register(), sc_offset); 3944 if (L_failure == &L_fallthrough) { 3945 local_jcc(Assembler::equal, *L_slow_path); 3946 } else { 3947 local_jcc(Assembler::notEqual, *L_failure); 3948 final_jmp(*L_slow_path); 3949 } 3950 } else if (super_check_offset.as_constant() == sc_offset) { 3951 // Need a slow path; fast failure is impossible. 3952 if (L_slow_path == &L_fallthrough) { 3953 local_jcc(Assembler::equal, *L_success); 3954 } else { 3955 local_jcc(Assembler::notEqual, *L_slow_path); 3956 final_jmp(*L_success); 3957 } 3958 } else { 3959 // No slow path; it's a fast decision. 3960 if (L_failure == &L_fallthrough) { 3961 local_jcc(Assembler::equal, *L_success); 3962 } else { 3963 local_jcc(Assembler::notEqual, *L_failure); 3964 final_jmp(*L_success); 3965 } 3966 } 3967 3968 bind(L_fallthrough); 3969 3970 #undef local_jcc 3971 #undef final_jmp 3972 } 3973 3974 3975 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 3976 Register super_klass, 3977 Register temp_reg, 3978 Register temp2_reg, 3979 Label* L_success, 3980 Label* L_failure, 3981 bool set_cond_codes) { 3982 assert_different_registers(sub_klass, super_klass, temp_reg); 3983 if (temp2_reg != noreg) 3984 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 3985 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 3986 3987 Label L_fallthrough; 3988 int label_nulls = 0; 3989 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3990 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3991 assert(label_nulls <= 1, "at most one null in the batch"); 3992 3993 // a couple of useful fields in sub_klass: 3994 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3995 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3996 Address secondary_supers_addr(sub_klass, ss_offset); 3997 Address super_cache_addr( sub_klass, sc_offset); 3998 3999 // Do a linear scan of the secondary super-klass chain. 4000 // This code is rarely used, so simplicity is a virtue here. 4001 // The repne_scan instruction uses fixed registers, which we must spill. 4002 // Don't worry too much about pre-existing connections with the input regs. 4003 4004 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4005 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4006 4007 // Get super_klass value into rax (even if it was in rdi or rcx). 4008 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4009 if (super_klass != rax) { 4010 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4011 mov(rax, super_klass); 4012 } 4013 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4014 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4015 4016 #ifndef PRODUCT 4017 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4018 ExternalAddress pst_counter_addr((address) pst_counter); 4019 lea(rcx, pst_counter_addr); 4020 incrementl(Address(rcx, 0)); 4021 #endif //PRODUCT 4022 4023 // We will consult the secondary-super array. 4024 movptr(rdi, secondary_supers_addr); 4025 // Load the array length. (Positive movl does right thing on LP64.) 4026 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4027 // Skip to start of data. 4028 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4029 4030 // Scan RCX words at [RDI] for an occurrence of RAX. 4031 // Set NZ/Z based on last compare. 4032 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4033 // not change flags (only scas instruction which is repeated sets flags). 4034 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4035 4036 testptr(rax,rax); // Set Z = 0 4037 repne_scan(); 4038 4039 // Unspill the temp. registers: 4040 if (pushed_rdi) pop(rdi); 4041 if (pushed_rcx) pop(rcx); 4042 if (pushed_rax) pop(rax); 4043 4044 if (set_cond_codes) { 4045 // Special hack for the AD files: rdi is guaranteed non-zero. 4046 assert(!pushed_rdi, "rdi must be left non-null"); 4047 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4048 } 4049 4050 if (L_failure == &L_fallthrough) 4051 jccb(Assembler::notEqual, *L_failure); 4052 else jcc(Assembler::notEqual, *L_failure); 4053 4054 // Success. Cache the super we found and proceed in triumph. 4055 movptr(super_cache_addr, super_klass); 4056 4057 if (L_success != &L_fallthrough) { 4058 jmp(*L_success); 4059 } 4060 4061 #undef IS_A_TEMP 4062 4063 bind(L_fallthrough); 4064 } 4065 4066 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4067 Register super_klass, 4068 Register temp_reg, 4069 Register temp2_reg, 4070 Label* L_success, 4071 Label* L_failure, 4072 bool set_cond_codes) { 4073 assert(set_cond_codes == false, "must be false on 64-bit x86"); 4074 check_klass_subtype_slow_path 4075 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg, 4076 L_success, L_failure); 4077 } 4078 4079 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4080 Register super_klass, 4081 Register temp_reg, 4082 Register temp2_reg, 4083 Register temp3_reg, 4084 Register temp4_reg, 4085 Label* L_success, 4086 Label* L_failure) { 4087 if (UseSecondarySupersTable) { 4088 check_klass_subtype_slow_path_table 4089 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg, 4090 L_success, L_failure); 4091 } else { 4092 check_klass_subtype_slow_path_linear 4093 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false); 4094 } 4095 } 4096 4097 Register MacroAssembler::allocate_if_noreg(Register r, 4098 RegSetIterator<Register> &available_regs, 4099 RegSet ®s_to_push) { 4100 if (!r->is_valid()) { 4101 r = *available_regs++; 4102 regs_to_push += r; 4103 } 4104 return r; 4105 } 4106 4107 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 4108 Register super_klass, 4109 Register temp_reg, 4110 Register temp2_reg, 4111 Register temp3_reg, 4112 Register result_reg, 4113 Label* L_success, 4114 Label* L_failure) { 4115 // NB! Callers may assume that, when temp2_reg is a valid register, 4116 // this code sets it to a nonzero value. 4117 bool temp2_reg_was_valid = temp2_reg->is_valid(); 4118 4119 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 4120 4121 Label L_fallthrough; 4122 int label_nulls = 0; 4123 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4124 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4125 assert(label_nulls <= 1, "at most one null in the batch"); 4126 4127 BLOCK_COMMENT("check_klass_subtype_slow_path_table"); 4128 4129 RegSetIterator<Register> available_regs 4130 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin(); 4131 4132 RegSet pushed_regs; 4133 4134 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 4135 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 4136 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 4137 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 4138 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs); 4139 4140 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg); 4141 4142 { 4143 4144 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4145 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4146 subptr(rsp, aligned_size); 4147 push_set(pushed_regs, 0); 4148 4149 lookup_secondary_supers_table_var(sub_klass, 4150 super_klass, 4151 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg); 4152 cmpq(result_reg, 0); 4153 4154 // Unspill the temp. registers: 4155 pop_set(pushed_regs, 0); 4156 // Increment SP but do not clobber flags. 4157 lea(rsp, Address(rsp, aligned_size)); 4158 } 4159 4160 if (temp2_reg_was_valid) { 4161 movq(temp2_reg, 1); 4162 } 4163 4164 jcc(Assembler::notEqual, *L_failure); 4165 4166 if (L_success != &L_fallthrough) { 4167 jmp(*L_success); 4168 } 4169 4170 bind(L_fallthrough); 4171 } 4172 4173 // population_count variant for running without the POPCNT 4174 // instruction, which was introduced with SSE4.2 in 2008. 4175 void MacroAssembler::population_count(Register dst, Register src, 4176 Register scratch1, Register scratch2) { 4177 assert_different_registers(src, scratch1, scratch2); 4178 if (UsePopCountInstruction) { 4179 Assembler::popcntq(dst, src); 4180 } else { 4181 assert_different_registers(src, scratch1, scratch2); 4182 assert_different_registers(dst, scratch1, scratch2); 4183 Label loop, done; 4184 4185 mov(scratch1, src); 4186 // dst = 0; 4187 // while(scratch1 != 0) { 4188 // dst++; 4189 // scratch1 &= (scratch1 - 1); 4190 // } 4191 xorl(dst, dst); 4192 testq(scratch1, scratch1); 4193 jccb(Assembler::equal, done); 4194 { 4195 bind(loop); 4196 incq(dst); 4197 movq(scratch2, scratch1); 4198 decq(scratch2); 4199 andq(scratch1, scratch2); 4200 jccb(Assembler::notEqual, loop); 4201 } 4202 bind(done); 4203 } 4204 #ifdef ASSERT 4205 mov64(scratch1, 0xCafeBabeDeadBeef); 4206 movq(scratch2, scratch1); 4207 #endif 4208 } 4209 4210 // Ensure that the inline code and the stub are using the same registers. 4211 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 4212 do { \ 4213 assert(r_super_klass == rax, "mismatch"); \ 4214 assert(r_array_base == rbx, "mismatch"); \ 4215 assert(r_array_length == rcx, "mismatch"); \ 4216 assert(r_array_index == rdx, "mismatch"); \ 4217 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 4218 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 4219 assert(result == rdi || result == noreg, "mismatch"); \ 4220 } while(0) 4221 4222 // Versions of salq and rorq that don't need count to be in rcx 4223 4224 void MacroAssembler::salq(Register dest, Register count) { 4225 if (count == rcx) { 4226 Assembler::salq(dest); 4227 } else { 4228 assert_different_registers(rcx, dest); 4229 xchgq(rcx, count); 4230 Assembler::salq(dest); 4231 xchgq(rcx, count); 4232 } 4233 } 4234 4235 void MacroAssembler::rorq(Register dest, Register count) { 4236 if (count == rcx) { 4237 Assembler::rorq(dest); 4238 } else { 4239 assert_different_registers(rcx, dest); 4240 xchgq(rcx, count); 4241 Assembler::rorq(dest); 4242 xchgq(rcx, count); 4243 } 4244 } 4245 4246 // Return true: we succeeded in generating this code 4247 // 4248 // At runtime, return 0 in result if r_super_klass is a superclass of 4249 // r_sub_klass, otherwise return nonzero. Use this if you know the 4250 // super_klass_slot of the class you're looking for. This is always 4251 // the case for instanceof and checkcast. 4252 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 4253 Register r_super_klass, 4254 Register temp1, 4255 Register temp2, 4256 Register temp3, 4257 Register temp4, 4258 Register result, 4259 u1 super_klass_slot) { 4260 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4261 4262 Label L_fallthrough, L_success, L_failure; 4263 4264 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4265 4266 const Register 4267 r_array_index = temp1, 4268 r_array_length = temp2, 4269 r_array_base = temp3, 4270 r_bitmap = temp4; 4271 4272 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4273 4274 xorq(result, result); // = 0 4275 4276 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4277 movq(r_array_index, r_bitmap); 4278 4279 // First check the bitmap to see if super_klass might be present. If 4280 // the bit is zero, we are certain that super_klass is not one of 4281 // the secondary supers. 4282 u1 bit = super_klass_slot; 4283 { 4284 // NB: If the count in a x86 shift instruction is 0, the flags are 4285 // not affected, so we do a testq instead. 4286 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 4287 if (shift_count != 0) { 4288 salq(r_array_index, shift_count); 4289 } else { 4290 testq(r_array_index, r_array_index); 4291 } 4292 } 4293 // We test the MSB of r_array_index, i.e. its sign bit 4294 jcc(Assembler::positive, L_failure); 4295 4296 // Get the first array index that can contain super_klass into r_array_index. 4297 if (bit != 0) { 4298 population_count(r_array_index, r_array_index, temp2, temp3); 4299 } else { 4300 movl(r_array_index, 1); 4301 } 4302 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4303 4304 // We will consult the secondary-super array. 4305 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4306 4307 // We're asserting that the first word in an Array<Klass*> is the 4308 // length, and the second word is the first word of the data. If 4309 // that ever changes, r_array_base will have to be adjusted here. 4310 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4311 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4312 4313 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4314 jccb(Assembler::equal, L_success); 4315 4316 // Is there another entry to check? Consult the bitmap. 4317 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 4318 jccb(Assembler::carryClear, L_failure); 4319 4320 // Linear probe. Rotate the bitmap so that the next bit to test is 4321 // in Bit 1. 4322 if (bit != 0) { 4323 rorq(r_bitmap, bit); 4324 } 4325 4326 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4327 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4328 // Kills: r_array_length. 4329 // Returns: result. 4330 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 4331 // Result (0/1) is in rdi 4332 jmpb(L_fallthrough); 4333 4334 bind(L_failure); 4335 incq(result); // 0 => 1 4336 4337 bind(L_success); 4338 // result = 0; 4339 4340 bind(L_fallthrough); 4341 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4342 4343 if (VerifySecondarySupers) { 4344 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4345 temp1, temp2, temp3); 4346 } 4347 } 4348 4349 // At runtime, return 0 in result if r_super_klass is a superclass of 4350 // r_sub_klass, otherwise return nonzero. Use this version of 4351 // lookup_secondary_supers_table() if you don't know ahead of time 4352 // which superclass will be searched for. Used by interpreter and 4353 // runtime stubs. It is larger and has somewhat greater latency than 4354 // the version above, which takes a constant super_klass_slot. 4355 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 4356 Register r_super_klass, 4357 Register temp1, 4358 Register temp2, 4359 Register temp3, 4360 Register temp4, 4361 Register result) { 4362 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4363 assert_different_registers(r_sub_klass, r_super_klass, rcx); 4364 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4); 4365 4366 Label L_fallthrough, L_success, L_failure; 4367 4368 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4369 4370 RegSetIterator<Register> available_regs = (temps - rcx).begin(); 4371 4372 // FIXME. Once we are sure that all paths reaching this point really 4373 // do pass rcx as one of our temps we can get rid of the following 4374 // workaround. 4375 assert(temps.contains(rcx), "fix this code"); 4376 4377 // We prefer to have our shift count in rcx. If rcx is one of our 4378 // temps, use it for slot. If not, pick any of our temps. 4379 Register slot; 4380 if (!temps.contains(rcx)) { 4381 slot = *available_regs++; 4382 } else { 4383 slot = rcx; 4384 } 4385 4386 const Register r_array_index = *available_regs++; 4387 const Register r_bitmap = *available_regs++; 4388 4389 // The logic above guarantees this property, but we state it here. 4390 assert_different_registers(r_array_index, r_bitmap, rcx); 4391 4392 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4393 movq(r_array_index, r_bitmap); 4394 4395 // First check the bitmap to see if super_klass might be present. If 4396 // the bit is zero, we are certain that super_klass is not one of 4397 // the secondary supers. 4398 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4399 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 4400 salq(r_array_index, slot); 4401 4402 testq(r_array_index, r_array_index); 4403 // We test the MSB of r_array_index, i.e. its sign bit 4404 jcc(Assembler::positive, L_failure); 4405 4406 const Register r_array_base = *available_regs++; 4407 4408 // Get the first array index that can contain super_klass into r_array_index. 4409 // Note: Clobbers r_array_base and slot. 4410 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot); 4411 4412 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4413 4414 // We will consult the secondary-super array. 4415 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4416 4417 // We're asserting that the first word in an Array<Klass*> is the 4418 // length, and the second word is the first word of the data. If 4419 // that ever changes, r_array_base will have to be adjusted here. 4420 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4421 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4422 4423 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4424 jccb(Assembler::equal, L_success); 4425 4426 // Restore slot to its true value 4427 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4428 4429 // Linear probe. Rotate the bitmap so that the next bit to test is 4430 // in Bit 1. 4431 rorq(r_bitmap, slot); 4432 4433 // Is there another entry to check? Consult the bitmap. 4434 btq(r_bitmap, 1); 4435 jccb(Assembler::carryClear, L_failure); 4436 4437 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4438 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4439 // Kills: r_array_length. 4440 // Returns: result. 4441 lookup_secondary_supers_table_slow_path(r_super_klass, 4442 r_array_base, 4443 r_array_index, 4444 r_bitmap, 4445 /*temp1*/result, 4446 /*temp2*/slot, 4447 &L_success, 4448 nullptr); 4449 4450 bind(L_failure); 4451 movq(result, 1); 4452 jmpb(L_fallthrough); 4453 4454 bind(L_success); 4455 xorq(result, result); // = 0 4456 4457 bind(L_fallthrough); 4458 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4459 4460 if (VerifySecondarySupers) { 4461 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4462 temp1, temp2, temp3); 4463 } 4464 } 4465 4466 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 4467 Label* L_success, Label* L_failure) { 4468 Label L_loop, L_fallthrough; 4469 { 4470 int label_nulls = 0; 4471 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4472 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4473 assert(label_nulls <= 1, "at most one null in the batch"); 4474 } 4475 bind(L_loop); 4476 cmpq(value, Address(addr, count, Address::times_8)); 4477 jcc(Assembler::equal, *L_success); 4478 addl(count, 1); 4479 cmpl(count, limit); 4480 jcc(Assembler::less, L_loop); 4481 4482 if (&L_fallthrough != L_failure) { 4483 jmp(*L_failure); 4484 } 4485 bind(L_fallthrough); 4486 } 4487 4488 // Called by code generated by check_klass_subtype_slow_path 4489 // above. This is called when there is a collision in the hashed 4490 // lookup in the secondary supers array. 4491 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 4492 Register r_array_base, 4493 Register r_array_index, 4494 Register r_bitmap, 4495 Register temp1, 4496 Register temp2, 4497 Label* L_success, 4498 Label* L_failure) { 4499 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 4500 4501 const Register 4502 r_array_length = temp1, 4503 r_sub_klass = noreg, 4504 result = noreg; 4505 4506 Label L_fallthrough; 4507 int label_nulls = 0; 4508 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4509 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4510 assert(label_nulls <= 1, "at most one null in the batch"); 4511 4512 // Load the array length. 4513 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4514 // And adjust the array base to point to the data. 4515 // NB! Effectively increments current slot index by 1. 4516 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 4517 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4518 4519 // Linear probe 4520 Label L_huge; 4521 4522 // The bitmap is full to bursting. 4523 // Implicit invariant: BITMAP_FULL implies (length > 0) 4524 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 4525 jcc(Assembler::greater, L_huge); 4526 4527 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 4528 // current slot (at secondary_supers[r_array_index]) has not yet 4529 // been inspected, and r_array_index may be out of bounds if we 4530 // wrapped around the end of the array. 4531 4532 { // This is conventional linear probing, but instead of terminating 4533 // when a null entry is found in the table, we maintain a bitmap 4534 // in which a 0 indicates missing entries. 4535 // The check above guarantees there are 0s in the bitmap, so the loop 4536 // eventually terminates. 4537 4538 xorl(temp2, temp2); // = 0; 4539 4540 Label L_again; 4541 bind(L_again); 4542 4543 // Check for array wraparound. 4544 cmpl(r_array_index, r_array_length); 4545 cmovl(Assembler::greaterEqual, r_array_index, temp2); 4546 4547 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4548 jcc(Assembler::equal, *L_success); 4549 4550 // If the next bit in bitmap is zero, we're done. 4551 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 4552 jcc(Assembler::carryClear, *L_failure); 4553 4554 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 4555 addl(r_array_index, 1); 4556 4557 jmp(L_again); 4558 } 4559 4560 { // Degenerate case: more than 64 secondary supers. 4561 // FIXME: We could do something smarter here, maybe a vectorized 4562 // comparison or a binary search, but is that worth any added 4563 // complexity? 4564 bind(L_huge); 4565 xorl(r_array_index, r_array_index); // = 0 4566 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 4567 L_success, 4568 (&L_fallthrough != L_failure ? L_failure : nullptr)); 4569 4570 bind(L_fallthrough); 4571 } 4572 } 4573 4574 struct VerifyHelperArguments { 4575 Klass* _super; 4576 Klass* _sub; 4577 intptr_t _linear_result; 4578 intptr_t _table_result; 4579 }; 4580 4581 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 4582 Klass::on_secondary_supers_verification_failure(args->_super, 4583 args->_sub, 4584 args->_linear_result, 4585 args->_table_result, 4586 msg); 4587 } 4588 4589 // Make sure that the hashed lookup and a linear scan agree. 4590 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 4591 Register r_super_klass, 4592 Register result, 4593 Register temp1, 4594 Register temp2, 4595 Register temp3) { 4596 const Register 4597 r_array_index = temp1, 4598 r_array_length = temp2, 4599 r_array_base = temp3, 4600 r_bitmap = noreg; 4601 4602 BLOCK_COMMENT("verify_secondary_supers_table {"); 4603 4604 Label L_success, L_failure, L_check, L_done; 4605 4606 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4607 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4608 // And adjust the array base to point to the data. 4609 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4610 4611 testl(r_array_length, r_array_length); // array_length == 0? 4612 jcc(Assembler::zero, L_failure); 4613 4614 movl(r_array_index, 0); 4615 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 4616 // fall through to L_failure 4617 4618 const Register linear_result = r_array_index; // reuse temp1 4619 4620 bind(L_failure); // not present 4621 movl(linear_result, 1); 4622 jmp(L_check); 4623 4624 bind(L_success); // present 4625 movl(linear_result, 0); 4626 4627 bind(L_check); 4628 cmpl(linear_result, result); 4629 jcc(Assembler::equal, L_done); 4630 4631 { // To avoid calling convention issues, build a record on the stack 4632 // and pass the pointer to that instead. 4633 push(result); 4634 push(linear_result); 4635 push(r_sub_klass); 4636 push(r_super_klass); 4637 movptr(c_rarg1, rsp); 4638 movptr(c_rarg0, (uintptr_t) "mismatch"); 4639 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 4640 should_not_reach_here(); 4641 } 4642 bind(L_done); 4643 4644 BLOCK_COMMENT("} verify_secondary_supers_table"); 4645 } 4646 4647 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 4648 4649 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) { 4650 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 4651 4652 Label L_fallthrough; 4653 if (L_fast_path == nullptr) { 4654 L_fast_path = &L_fallthrough; 4655 } else if (L_slow_path == nullptr) { 4656 L_slow_path = &L_fallthrough; 4657 } 4658 4659 // Fast path check: class is fully initialized. 4660 // init_state needs acquire, but x86 is TSO, and so we are already good. 4661 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 4662 jcc(Assembler::equal, *L_fast_path); 4663 4664 // Fast path check: current thread is initializer thread 4665 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset())); 4666 if (L_slow_path == &L_fallthrough) { 4667 jcc(Assembler::equal, *L_fast_path); 4668 bind(*L_slow_path); 4669 } else if (L_fast_path == &L_fallthrough) { 4670 jcc(Assembler::notEqual, *L_slow_path); 4671 bind(*L_fast_path); 4672 } else { 4673 Unimplemented(); 4674 } 4675 } 4676 4677 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 4678 if (VM_Version::supports_cmov()) { 4679 cmovl(cc, dst, src); 4680 } else { 4681 Label L; 4682 jccb(negate_condition(cc), L); 4683 movl(dst, src); 4684 bind(L); 4685 } 4686 } 4687 4688 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 4689 if (VM_Version::supports_cmov()) { 4690 cmovl(cc, dst, src); 4691 } else { 4692 Label L; 4693 jccb(negate_condition(cc), L); 4694 movl(dst, src); 4695 bind(L); 4696 } 4697 } 4698 4699 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 4700 if (!VerifyOops) return; 4701 4702 BLOCK_COMMENT("verify_oop {"); 4703 push(rscratch1); 4704 push(rax); // save rax 4705 push(reg); // pass register argument 4706 4707 // Pass register number to verify_oop_subroutine 4708 const char* b = nullptr; 4709 { 4710 ResourceMark rm; 4711 stringStream ss; 4712 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 4713 b = code_string(ss.as_string()); 4714 } 4715 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4716 pushptr(buffer.addr(), rscratch1); 4717 4718 // call indirectly to solve generation ordering problem 4719 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4720 call(rax); 4721 // Caller pops the arguments (oop, message) and restores rax, r10 4722 BLOCK_COMMENT("} verify_oop"); 4723 } 4724 4725 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 4726 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 4727 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 4728 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 4729 vpternlogd(dst, 0xFF, dst, dst, vector_len); 4730 } else if (VM_Version::supports_avx()) { 4731 vpcmpeqd(dst, dst, dst, vector_len); 4732 } else { 4733 pcmpeqd(dst, dst); 4734 } 4735 } 4736 4737 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 4738 int extra_slot_offset) { 4739 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 4740 int stackElementSize = Interpreter::stackElementSize; 4741 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 4742 #ifdef ASSERT 4743 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 4744 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 4745 #endif 4746 Register scale_reg = noreg; 4747 Address::ScaleFactor scale_factor = Address::no_scale; 4748 if (arg_slot.is_constant()) { 4749 offset += arg_slot.as_constant() * stackElementSize; 4750 } else { 4751 scale_reg = arg_slot.as_register(); 4752 scale_factor = Address::times(stackElementSize); 4753 } 4754 offset += wordSize; // return PC is on stack 4755 return Address(rsp, scale_reg, scale_factor, offset); 4756 } 4757 4758 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 4759 if (!VerifyOops) return; 4760 4761 push(rscratch1); 4762 push(rax); // save rax, 4763 // addr may contain rsp so we will have to adjust it based on the push 4764 // we just did (and on 64 bit we do two pushes) 4765 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 4766 // stores rax into addr which is backwards of what was intended. 4767 if (addr.uses(rsp)) { 4768 lea(rax, addr); 4769 pushptr(Address(rax, 2 * BytesPerWord)); 4770 } else { 4771 pushptr(addr); 4772 } 4773 4774 // Pass register number to verify_oop_subroutine 4775 const char* b = nullptr; 4776 { 4777 ResourceMark rm; 4778 stringStream ss; 4779 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 4780 b = code_string(ss.as_string()); 4781 } 4782 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4783 pushptr(buffer.addr(), rscratch1); 4784 4785 // call indirectly to solve generation ordering problem 4786 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4787 call(rax); 4788 // Caller pops the arguments (addr, message) and restores rax, r10. 4789 } 4790 4791 void MacroAssembler::verify_tlab() { 4792 #ifdef ASSERT 4793 if (UseTLAB && VerifyOops) { 4794 Label next, ok; 4795 Register t1 = rsi; 4796 4797 push(t1); 4798 4799 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4800 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset()))); 4801 jcc(Assembler::aboveEqual, next); 4802 STOP("assert(top >= start)"); 4803 should_not_reach_here(); 4804 4805 bind(next); 4806 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 4807 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4808 jcc(Assembler::aboveEqual, ok); 4809 STOP("assert(top <= end)"); 4810 should_not_reach_here(); 4811 4812 bind(ok); 4813 pop(t1); 4814 } 4815 #endif 4816 } 4817 4818 class ControlWord { 4819 public: 4820 int32_t _value; 4821 4822 int rounding_control() const { return (_value >> 10) & 3 ; } 4823 int precision_control() const { return (_value >> 8) & 3 ; } 4824 bool precision() const { return ((_value >> 5) & 1) != 0; } 4825 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4826 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4827 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4828 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4829 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4830 4831 void print() const { 4832 // rounding control 4833 const char* rc; 4834 switch (rounding_control()) { 4835 case 0: rc = "round near"; break; 4836 case 1: rc = "round down"; break; 4837 case 2: rc = "round up "; break; 4838 case 3: rc = "chop "; break; 4839 default: 4840 rc = nullptr; // silence compiler warnings 4841 fatal("Unknown rounding control: %d", rounding_control()); 4842 }; 4843 // precision control 4844 const char* pc; 4845 switch (precision_control()) { 4846 case 0: pc = "24 bits "; break; 4847 case 1: pc = "reserved"; break; 4848 case 2: pc = "53 bits "; break; 4849 case 3: pc = "64 bits "; break; 4850 default: 4851 pc = nullptr; // silence compiler warnings 4852 fatal("Unknown precision control: %d", precision_control()); 4853 }; 4854 // flags 4855 char f[9]; 4856 f[0] = ' '; 4857 f[1] = ' '; 4858 f[2] = (precision ()) ? 'P' : 'p'; 4859 f[3] = (underflow ()) ? 'U' : 'u'; 4860 f[4] = (overflow ()) ? 'O' : 'o'; 4861 f[5] = (zero_divide ()) ? 'Z' : 'z'; 4862 f[6] = (denormalized()) ? 'D' : 'd'; 4863 f[7] = (invalid ()) ? 'I' : 'i'; 4864 f[8] = '\x0'; 4865 // output 4866 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 4867 } 4868 4869 }; 4870 4871 class StatusWord { 4872 public: 4873 int32_t _value; 4874 4875 bool busy() const { return ((_value >> 15) & 1) != 0; } 4876 bool C3() const { return ((_value >> 14) & 1) != 0; } 4877 bool C2() const { return ((_value >> 10) & 1) != 0; } 4878 bool C1() const { return ((_value >> 9) & 1) != 0; } 4879 bool C0() const { return ((_value >> 8) & 1) != 0; } 4880 int top() const { return (_value >> 11) & 7 ; } 4881 bool error_status() const { return ((_value >> 7) & 1) != 0; } 4882 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 4883 bool precision() const { return ((_value >> 5) & 1) != 0; } 4884 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4885 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4886 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4887 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4888 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4889 4890 void print() const { 4891 // condition codes 4892 char c[5]; 4893 c[0] = (C3()) ? '3' : '-'; 4894 c[1] = (C2()) ? '2' : '-'; 4895 c[2] = (C1()) ? '1' : '-'; 4896 c[3] = (C0()) ? '0' : '-'; 4897 c[4] = '\x0'; 4898 // flags 4899 char f[9]; 4900 f[0] = (error_status()) ? 'E' : '-'; 4901 f[1] = (stack_fault ()) ? 'S' : '-'; 4902 f[2] = (precision ()) ? 'P' : '-'; 4903 f[3] = (underflow ()) ? 'U' : '-'; 4904 f[4] = (overflow ()) ? 'O' : '-'; 4905 f[5] = (zero_divide ()) ? 'Z' : '-'; 4906 f[6] = (denormalized()) ? 'D' : '-'; 4907 f[7] = (invalid ()) ? 'I' : '-'; 4908 f[8] = '\x0'; 4909 // output 4910 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 4911 } 4912 4913 }; 4914 4915 class TagWord { 4916 public: 4917 int32_t _value; 4918 4919 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 4920 4921 void print() const { 4922 printf("%04x", _value & 0xFFFF); 4923 } 4924 4925 }; 4926 4927 class FPU_Register { 4928 public: 4929 int32_t _m0; 4930 int32_t _m1; 4931 int16_t _ex; 4932 4933 bool is_indefinite() const { 4934 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 4935 } 4936 4937 void print() const { 4938 char sign = (_ex < 0) ? '-' : '+'; 4939 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 4940 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 4941 }; 4942 4943 }; 4944 4945 class FPU_State { 4946 public: 4947 enum { 4948 register_size = 10, 4949 number_of_registers = 8, 4950 register_mask = 7 4951 }; 4952 4953 ControlWord _control_word; 4954 StatusWord _status_word; 4955 TagWord _tag_word; 4956 int32_t _error_offset; 4957 int32_t _error_selector; 4958 int32_t _data_offset; 4959 int32_t _data_selector; 4960 int8_t _register[register_size * number_of_registers]; 4961 4962 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 4963 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 4964 4965 const char* tag_as_string(int tag) const { 4966 switch (tag) { 4967 case 0: return "valid"; 4968 case 1: return "zero"; 4969 case 2: return "special"; 4970 case 3: return "empty"; 4971 } 4972 ShouldNotReachHere(); 4973 return nullptr; 4974 } 4975 4976 void print() const { 4977 // print computation registers 4978 { int t = _status_word.top(); 4979 for (int i = 0; i < number_of_registers; i++) { 4980 int j = (i - t) & register_mask; 4981 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 4982 st(j)->print(); 4983 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 4984 } 4985 } 4986 printf("\n"); 4987 // print control registers 4988 printf("ctrl = "); _control_word.print(); printf("\n"); 4989 printf("stat = "); _status_word .print(); printf("\n"); 4990 printf("tags = "); _tag_word .print(); printf("\n"); 4991 } 4992 4993 }; 4994 4995 class Flag_Register { 4996 public: 4997 int32_t _value; 4998 4999 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5000 bool direction() const { return ((_value >> 10) & 1) != 0; } 5001 bool sign() const { return ((_value >> 7) & 1) != 0; } 5002 bool zero() const { return ((_value >> 6) & 1) != 0; } 5003 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5004 bool parity() const { return ((_value >> 2) & 1) != 0; } 5005 bool carry() const { return ((_value >> 0) & 1) != 0; } 5006 5007 void print() const { 5008 // flags 5009 char f[8]; 5010 f[0] = (overflow ()) ? 'O' : '-'; 5011 f[1] = (direction ()) ? 'D' : '-'; 5012 f[2] = (sign ()) ? 'S' : '-'; 5013 f[3] = (zero ()) ? 'Z' : '-'; 5014 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5015 f[5] = (parity ()) ? 'P' : '-'; 5016 f[6] = (carry ()) ? 'C' : '-'; 5017 f[7] = '\x0'; 5018 // output 5019 printf("%08x flags = %s", _value, f); 5020 } 5021 5022 }; 5023 5024 class IU_Register { 5025 public: 5026 int32_t _value; 5027 5028 void print() const { 5029 printf("%08x %11d", _value, _value); 5030 } 5031 5032 }; 5033 5034 class IU_State { 5035 public: 5036 Flag_Register _eflags; 5037 IU_Register _rdi; 5038 IU_Register _rsi; 5039 IU_Register _rbp; 5040 IU_Register _rsp; 5041 IU_Register _rbx; 5042 IU_Register _rdx; 5043 IU_Register _rcx; 5044 IU_Register _rax; 5045 5046 void print() const { 5047 // computation registers 5048 printf("rax, = "); _rax.print(); printf("\n"); 5049 printf("rbx, = "); _rbx.print(); printf("\n"); 5050 printf("rcx = "); _rcx.print(); printf("\n"); 5051 printf("rdx = "); _rdx.print(); printf("\n"); 5052 printf("rdi = "); _rdi.print(); printf("\n"); 5053 printf("rsi = "); _rsi.print(); printf("\n"); 5054 printf("rbp, = "); _rbp.print(); printf("\n"); 5055 printf("rsp = "); _rsp.print(); printf("\n"); 5056 printf("\n"); 5057 // control registers 5058 printf("flgs = "); _eflags.print(); printf("\n"); 5059 } 5060 }; 5061 5062 5063 class CPU_State { 5064 public: 5065 FPU_State _fpu_state; 5066 IU_State _iu_state; 5067 5068 void print() const { 5069 printf("--------------------------------------------------\n"); 5070 _iu_state .print(); 5071 printf("\n"); 5072 _fpu_state.print(); 5073 printf("--------------------------------------------------\n"); 5074 } 5075 5076 }; 5077 5078 5079 static void _print_CPU_state(CPU_State* state) { 5080 state->print(); 5081 }; 5082 5083 5084 void MacroAssembler::print_CPU_state() { 5085 push_CPU_state(); 5086 push(rsp); // pass CPU state 5087 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5088 addptr(rsp, wordSize); // discard argument 5089 pop_CPU_state(); 5090 } 5091 5092 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5093 // Either restore the MXCSR register after returning from the JNI Call 5094 // or verify that it wasn't changed (with -Xcheck:jni flag). 5095 if (VM_Version::supports_sse()) { 5096 if (RestoreMXCSROnJNICalls) { 5097 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5098 } else if (CheckJNICalls) { 5099 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5100 } 5101 } 5102 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5103 vzeroupper(); 5104 } 5105 5106 // ((OopHandle)result).resolve(); 5107 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5108 assert_different_registers(result, tmp); 5109 5110 // Only 64 bit platforms support GCs that require a tmp register 5111 // Only IN_HEAP loads require a thread_tmp register 5112 // OopHandle::resolve is an indirection like jobject. 5113 access_load_at(T_OBJECT, IN_NATIVE, 5114 result, Address(result, 0), tmp); 5115 } 5116 5117 // ((WeakHandle)result).resolve(); 5118 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5119 assert_different_registers(rresult, rtmp); 5120 Label resolved; 5121 5122 // A null weak handle resolves to null. 5123 cmpptr(rresult, 0); 5124 jcc(Assembler::equal, resolved); 5125 5126 // Only 64 bit platforms support GCs that require a tmp register 5127 // Only IN_HEAP loads require a thread_tmp register 5128 // WeakHandle::resolve is an indirection like jweak. 5129 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5130 rresult, Address(rresult, 0), rtmp); 5131 bind(resolved); 5132 } 5133 5134 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5135 // get mirror 5136 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5137 load_method_holder(mirror, method); 5138 movptr(mirror, Address(mirror, mirror_offset)); 5139 resolve_oop_handle(mirror, tmp); 5140 } 5141 5142 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5143 load_method_holder(rresult, rmethod); 5144 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5145 } 5146 5147 void MacroAssembler::load_method_holder(Register holder, Register method) { 5148 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5149 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5150 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5151 } 5152 5153 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5154 assert(UseCompactObjectHeaders, "expect compact object headers"); 5155 movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5156 shrq(dst, markWord::klass_shift); 5157 } 5158 5159 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5160 assert_different_registers(src, tmp); 5161 assert_different_registers(dst, tmp); 5162 5163 if (UseCompactObjectHeaders) { 5164 load_narrow_klass_compact(dst, src); 5165 decode_klass_not_null(dst, tmp); 5166 } else if (UseCompressedClassPointers) { 5167 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5168 decode_klass_not_null(dst, tmp); 5169 } else { 5170 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5171 } 5172 } 5173 5174 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5175 assert(!UseCompactObjectHeaders, "not with compact headers"); 5176 assert_different_registers(src, tmp); 5177 assert_different_registers(dst, tmp); 5178 if (UseCompressedClassPointers) { 5179 encode_klass_not_null(src, tmp); 5180 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5181 } else { 5182 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5183 } 5184 } 5185 5186 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { 5187 if (UseCompactObjectHeaders) { 5188 assert(tmp != noreg, "need tmp"); 5189 assert_different_registers(klass, obj, tmp); 5190 load_narrow_klass_compact(tmp, obj); 5191 cmpl(klass, tmp); 5192 } else if (UseCompressedClassPointers) { 5193 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5194 } else { 5195 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5196 } 5197 } 5198 5199 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5200 if (UseCompactObjectHeaders) { 5201 assert(tmp2 != noreg, "need tmp2"); 5202 assert_different_registers(obj1, obj2, tmp1, tmp2); 5203 load_narrow_klass_compact(tmp1, obj1); 5204 load_narrow_klass_compact(tmp2, obj2); 5205 cmpl(tmp1, tmp2); 5206 } else if (UseCompressedClassPointers) { 5207 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5208 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5209 } else { 5210 movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5211 cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5212 } 5213 } 5214 5215 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5216 Register tmp1) { 5217 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5218 decorators = AccessInternal::decorator_fixup(decorators, type); 5219 bool as_raw = (decorators & AS_RAW) != 0; 5220 if (as_raw) { 5221 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1); 5222 } else { 5223 bs->load_at(this, decorators, type, dst, src, tmp1); 5224 } 5225 } 5226 5227 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5228 Register tmp1, Register tmp2, Register tmp3) { 5229 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5230 decorators = AccessInternal::decorator_fixup(decorators, type); 5231 bool as_raw = (decorators & AS_RAW) != 0; 5232 if (as_raw) { 5233 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5234 } else { 5235 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5236 } 5237 } 5238 5239 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5240 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1); 5241 } 5242 5243 // Doesn't do verification, generates fixed size code 5244 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5245 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1); 5246 } 5247 5248 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5249 Register tmp2, Register tmp3, DecoratorSet decorators) { 5250 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5251 } 5252 5253 // Used for storing nulls. 5254 void MacroAssembler::store_heap_oop_null(Address dst) { 5255 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5256 } 5257 5258 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5259 assert(!UseCompactObjectHeaders, "Don't use with compact headers"); 5260 if (UseCompressedClassPointers) { 5261 // Store to klass gap in destination 5262 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5263 } 5264 } 5265 5266 #ifdef ASSERT 5267 void MacroAssembler::verify_heapbase(const char* msg) { 5268 assert (UseCompressedOops, "should be compressed"); 5269 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5270 if (CheckCompressedOops) { 5271 Label ok; 5272 ExternalAddress src2(CompressedOops::base_addr()); 5273 const bool is_src2_reachable = reachable(src2); 5274 if (!is_src2_reachable) { 5275 push(rscratch1); // cmpptr trashes rscratch1 5276 } 5277 cmpptr(r12_heapbase, src2, rscratch1); 5278 jcc(Assembler::equal, ok); 5279 STOP(msg); 5280 bind(ok); 5281 if (!is_src2_reachable) { 5282 pop(rscratch1); 5283 } 5284 } 5285 } 5286 #endif 5287 5288 // Algorithm must match oop.inline.hpp encode_heap_oop. 5289 void MacroAssembler::encode_heap_oop(Register r) { 5290 #ifdef ASSERT 5291 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5292 #endif 5293 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5294 if (CompressedOops::base() == nullptr) { 5295 if (CompressedOops::shift() != 0) { 5296 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5297 shrq(r, LogMinObjAlignmentInBytes); 5298 } 5299 return; 5300 } 5301 testq(r, r); 5302 cmovq(Assembler::equal, r, r12_heapbase); 5303 subq(r, r12_heapbase); 5304 shrq(r, LogMinObjAlignmentInBytes); 5305 } 5306 5307 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5308 #ifdef ASSERT 5309 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5310 if (CheckCompressedOops) { 5311 Label ok; 5312 testq(r, r); 5313 jcc(Assembler::notEqual, ok); 5314 STOP("null oop passed to encode_heap_oop_not_null"); 5315 bind(ok); 5316 } 5317 #endif 5318 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5319 if (CompressedOops::base() != nullptr) { 5320 subq(r, r12_heapbase); 5321 } 5322 if (CompressedOops::shift() != 0) { 5323 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5324 shrq(r, LogMinObjAlignmentInBytes); 5325 } 5326 } 5327 5328 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5329 #ifdef ASSERT 5330 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5331 if (CheckCompressedOops) { 5332 Label ok; 5333 testq(src, src); 5334 jcc(Assembler::notEqual, ok); 5335 STOP("null oop passed to encode_heap_oop_not_null2"); 5336 bind(ok); 5337 } 5338 #endif 5339 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5340 if (dst != src) { 5341 movq(dst, src); 5342 } 5343 if (CompressedOops::base() != nullptr) { 5344 subq(dst, r12_heapbase); 5345 } 5346 if (CompressedOops::shift() != 0) { 5347 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5348 shrq(dst, LogMinObjAlignmentInBytes); 5349 } 5350 } 5351 5352 void MacroAssembler::decode_heap_oop(Register r) { 5353 #ifdef ASSERT 5354 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5355 #endif 5356 if (CompressedOops::base() == nullptr) { 5357 if (CompressedOops::shift() != 0) { 5358 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5359 shlq(r, LogMinObjAlignmentInBytes); 5360 } 5361 } else { 5362 Label done; 5363 shlq(r, LogMinObjAlignmentInBytes); 5364 jccb(Assembler::equal, done); 5365 addq(r, r12_heapbase); 5366 bind(done); 5367 } 5368 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5369 } 5370 5371 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5372 // Note: it will change flags 5373 assert (UseCompressedOops, "should only be used for compressed headers"); 5374 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5375 // Cannot assert, unverified entry point counts instructions (see .ad file) 5376 // vtableStubs also counts instructions in pd_code_size_limit. 5377 // Also do not verify_oop as this is called by verify_oop. 5378 if (CompressedOops::shift() != 0) { 5379 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5380 shlq(r, LogMinObjAlignmentInBytes); 5381 if (CompressedOops::base() != nullptr) { 5382 addq(r, r12_heapbase); 5383 } 5384 } else { 5385 assert (CompressedOops::base() == nullptr, "sanity"); 5386 } 5387 } 5388 5389 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5390 // Note: it will change flags 5391 assert (UseCompressedOops, "should only be used for compressed headers"); 5392 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5393 // Cannot assert, unverified entry point counts instructions (see .ad file) 5394 // vtableStubs also counts instructions in pd_code_size_limit. 5395 // Also do not verify_oop as this is called by verify_oop. 5396 if (CompressedOops::shift() != 0) { 5397 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5398 if (LogMinObjAlignmentInBytes == Address::times_8) { 5399 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5400 } else { 5401 if (dst != src) { 5402 movq(dst, src); 5403 } 5404 shlq(dst, LogMinObjAlignmentInBytes); 5405 if (CompressedOops::base() != nullptr) { 5406 addq(dst, r12_heapbase); 5407 } 5408 } 5409 } else { 5410 assert (CompressedOops::base() == nullptr, "sanity"); 5411 if (dst != src) { 5412 movq(dst, src); 5413 } 5414 } 5415 } 5416 5417 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5418 BLOCK_COMMENT("encode_klass_not_null {"); 5419 assert_different_registers(r, tmp); 5420 if (CompressedKlassPointers::base() != nullptr) { 5421 if (AOTCodeCache::is_on_for_dump()) { 5422 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5423 } else { 5424 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5425 } 5426 subq(r, tmp); 5427 } 5428 if (CompressedKlassPointers::shift() != 0) { 5429 shrq(r, CompressedKlassPointers::shift()); 5430 } 5431 BLOCK_COMMENT("} encode_klass_not_null"); 5432 } 5433 5434 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5435 BLOCK_COMMENT("encode_and_move_klass_not_null {"); 5436 assert_different_registers(src, dst); 5437 if (CompressedKlassPointers::base() != nullptr) { 5438 if (AOTCodeCache::is_on_for_dump()) { 5439 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5440 negq(dst); 5441 } else { 5442 movptr(dst, -(intptr_t)CompressedKlassPointers::base()); 5443 } 5444 addq(dst, src); 5445 } else { 5446 movptr(dst, src); 5447 } 5448 if (CompressedKlassPointers::shift() != 0) { 5449 shrq(dst, CompressedKlassPointers::shift()); 5450 } 5451 BLOCK_COMMENT("} encode_and_move_klass_not_null"); 5452 } 5453 5454 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5455 BLOCK_COMMENT("decode_klass_not_null {"); 5456 assert_different_registers(r, tmp); 5457 // Note: it will change flags 5458 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5459 // Cannot assert, unverified entry point counts instructions (see .ad file) 5460 // vtableStubs also counts instructions in pd_code_size_limit. 5461 // Also do not verify_oop as this is called by verify_oop. 5462 if (CompressedKlassPointers::shift() != 0) { 5463 shlq(r, CompressedKlassPointers::shift()); 5464 } 5465 if (CompressedKlassPointers::base() != nullptr) { 5466 if (AOTCodeCache::is_on_for_dump()) { 5467 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5468 } else { 5469 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5470 } 5471 addq(r, tmp); 5472 } 5473 BLOCK_COMMENT("} decode_klass_not_null"); 5474 } 5475 5476 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5477 BLOCK_COMMENT("decode_and_move_klass_not_null {"); 5478 assert_different_registers(src, dst); 5479 // Note: it will change flags 5480 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5481 // Cannot assert, unverified entry point counts instructions (see .ad file) 5482 // vtableStubs also counts instructions in pd_code_size_limit. 5483 // Also do not verify_oop as this is called by verify_oop. 5484 5485 if (CompressedKlassPointers::base() == nullptr && 5486 CompressedKlassPointers::shift() == 0) { 5487 // The best case scenario is that there is no base or shift. Then it is already 5488 // a pointer that needs nothing but a register rename. 5489 movptr(dst, src); 5490 } else { 5491 if (CompressedKlassPointers::shift() <= Address::times_8) { 5492 if (CompressedKlassPointers::base() != nullptr) { 5493 if (AOTCodeCache::is_on_for_dump()) { 5494 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5495 } else { 5496 movptr(dst, (intptr_t)CompressedKlassPointers::base()); 5497 } 5498 } else { 5499 xorq(dst, dst); 5500 } 5501 if (CompressedKlassPointers::shift() != 0) { 5502 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?"); 5503 leaq(dst, Address(dst, src, Address::times_8, 0)); 5504 } else { 5505 addq(dst, src); 5506 } 5507 } else { 5508 if (CompressedKlassPointers::base() != nullptr) { 5509 if (AOTCodeCache::is_on_for_dump()) { 5510 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5511 shrq(dst, CompressedKlassPointers::shift()); 5512 } else { 5513 const intptr_t base_right_shifted = 5514 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5515 movptr(dst, base_right_shifted); 5516 } 5517 } else { 5518 xorq(dst, dst); 5519 } 5520 addq(dst, src); 5521 shlq(dst, CompressedKlassPointers::shift()); 5522 } 5523 } 5524 BLOCK_COMMENT("} decode_and_move_klass_not_null"); 5525 } 5526 5527 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5528 assert (UseCompressedOops, "should only be used for compressed headers"); 5529 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5530 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5531 int oop_index = oop_recorder()->find_index(obj); 5532 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5533 mov_narrow_oop(dst, oop_index, rspec); 5534 } 5535 5536 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5537 assert (UseCompressedOops, "should only be used for compressed headers"); 5538 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5539 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5540 int oop_index = oop_recorder()->find_index(obj); 5541 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5542 mov_narrow_oop(dst, oop_index, rspec); 5543 } 5544 5545 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5546 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5547 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5548 int klass_index = oop_recorder()->find_index(k); 5549 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5550 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5551 } 5552 5553 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 5554 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5555 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5556 int klass_index = oop_recorder()->find_index(k); 5557 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5558 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5559 } 5560 5561 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 5562 assert (UseCompressedOops, "should only be used for compressed headers"); 5563 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5564 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5565 int oop_index = oop_recorder()->find_index(obj); 5566 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5567 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5568 } 5569 5570 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 5571 assert (UseCompressedOops, "should only be used for compressed headers"); 5572 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5573 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5574 int oop_index = oop_recorder()->find_index(obj); 5575 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5576 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5577 } 5578 5579 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 5580 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5581 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5582 int klass_index = oop_recorder()->find_index(k); 5583 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5584 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5585 } 5586 5587 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 5588 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5589 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5590 int klass_index = oop_recorder()->find_index(k); 5591 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5592 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5593 } 5594 5595 void MacroAssembler::reinit_heapbase() { 5596 if (UseCompressedOops) { 5597 if (Universe::heap() != nullptr) { // GC was initialized 5598 if (CompressedOops::base() == nullptr) { 5599 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 5600 } else if (AOTCodeCache::is_on_for_dump()) { 5601 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 5602 } else { 5603 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 5604 } 5605 } else { 5606 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 5607 } 5608 } 5609 } 5610 5611 #if COMPILER2_OR_JVMCI 5612 5613 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 5614 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5615 // cnt - number of qwords (8-byte words). 5616 // base - start address, qword aligned. 5617 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 5618 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 5619 if (use64byteVector) { 5620 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 5621 } else if (MaxVectorSize >= 32) { 5622 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 5623 } else { 5624 pxor(xtmp, xtmp); 5625 } 5626 jmp(L_zero_64_bytes); 5627 5628 BIND(L_loop); 5629 if (MaxVectorSize >= 32) { 5630 fill64(base, 0, xtmp, use64byteVector); 5631 } else { 5632 movdqu(Address(base, 0), xtmp); 5633 movdqu(Address(base, 16), xtmp); 5634 movdqu(Address(base, 32), xtmp); 5635 movdqu(Address(base, 48), xtmp); 5636 } 5637 addptr(base, 64); 5638 5639 BIND(L_zero_64_bytes); 5640 subptr(cnt, 8); 5641 jccb(Assembler::greaterEqual, L_loop); 5642 5643 // Copy trailing 64 bytes 5644 if (use64byteVector) { 5645 addptr(cnt, 8); 5646 jccb(Assembler::equal, L_end); 5647 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 5648 jmp(L_end); 5649 } else { 5650 addptr(cnt, 4); 5651 jccb(Assembler::less, L_tail); 5652 if (MaxVectorSize >= 32) { 5653 vmovdqu(Address(base, 0), xtmp); 5654 } else { 5655 movdqu(Address(base, 0), xtmp); 5656 movdqu(Address(base, 16), xtmp); 5657 } 5658 } 5659 addptr(base, 32); 5660 subptr(cnt, 4); 5661 5662 BIND(L_tail); 5663 addptr(cnt, 4); 5664 jccb(Assembler::lessEqual, L_end); 5665 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 5666 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 5667 } else { 5668 decrement(cnt); 5669 5670 BIND(L_sloop); 5671 movq(Address(base, 0), xtmp); 5672 addptr(base, 8); 5673 decrement(cnt); 5674 jccb(Assembler::greaterEqual, L_sloop); 5675 } 5676 BIND(L_end); 5677 } 5678 5679 // Clearing constant sized memory using YMM/ZMM registers. 5680 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5681 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 5682 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 5683 5684 int vector64_count = (cnt & (~0x7)) >> 3; 5685 cnt = cnt & 0x7; 5686 const int fill64_per_loop = 4; 5687 const int max_unrolled_fill64 = 8; 5688 5689 // 64 byte initialization loop. 5690 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 5691 int start64 = 0; 5692 if (vector64_count > max_unrolled_fill64) { 5693 Label LOOP; 5694 Register index = rtmp; 5695 5696 start64 = vector64_count - (vector64_count % fill64_per_loop); 5697 5698 movl(index, 0); 5699 BIND(LOOP); 5700 for (int i = 0; i < fill64_per_loop; i++) { 5701 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 5702 } 5703 addl(index, fill64_per_loop * 64); 5704 cmpl(index, start64 * 64); 5705 jccb(Assembler::less, LOOP); 5706 } 5707 for (int i = start64; i < vector64_count; i++) { 5708 fill64(base, i * 64, xtmp, use64byteVector); 5709 } 5710 5711 // Clear remaining 64 byte tail. 5712 int disp = vector64_count * 64; 5713 if (cnt) { 5714 switch (cnt) { 5715 case 1: 5716 movq(Address(base, disp), xtmp); 5717 break; 5718 case 2: 5719 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 5720 break; 5721 case 3: 5722 movl(rtmp, 0x7); 5723 kmovwl(mask, rtmp); 5724 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 5725 break; 5726 case 4: 5727 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5728 break; 5729 case 5: 5730 if (use64byteVector) { 5731 movl(rtmp, 0x1F); 5732 kmovwl(mask, rtmp); 5733 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5734 } else { 5735 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5736 movq(Address(base, disp + 32), xtmp); 5737 } 5738 break; 5739 case 6: 5740 if (use64byteVector) { 5741 movl(rtmp, 0x3F); 5742 kmovwl(mask, rtmp); 5743 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5744 } else { 5745 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5746 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 5747 } 5748 break; 5749 case 7: 5750 if (use64byteVector) { 5751 movl(rtmp, 0x7F); 5752 kmovwl(mask, rtmp); 5753 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5754 } else { 5755 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5756 movl(rtmp, 0x7); 5757 kmovwl(mask, rtmp); 5758 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 5759 } 5760 break; 5761 default: 5762 fatal("Unexpected length : %d\n",cnt); 5763 break; 5764 } 5765 } 5766 } 5767 5768 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 5769 bool is_large, KRegister mask) { 5770 // cnt - number of qwords (8-byte words). 5771 // base - start address, qword aligned. 5772 // is_large - if optimizers know cnt is larger than InitArrayShortSize 5773 assert(base==rdi, "base register must be edi for rep stos"); 5774 assert(tmp==rax, "tmp register must be eax for rep stos"); 5775 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 5776 assert(InitArrayShortSize % BytesPerLong == 0, 5777 "InitArrayShortSize should be the multiple of BytesPerLong"); 5778 5779 Label DONE; 5780 if (!is_large || !UseXMMForObjInit) { 5781 xorptr(tmp, tmp); 5782 } 5783 5784 if (!is_large) { 5785 Label LOOP, LONG; 5786 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 5787 jccb(Assembler::greater, LONG); 5788 5789 decrement(cnt); 5790 jccb(Assembler::negative, DONE); // Zero length 5791 5792 // Use individual pointer-sized stores for small counts: 5793 BIND(LOOP); 5794 movptr(Address(base, cnt, Address::times_ptr), tmp); 5795 decrement(cnt); 5796 jccb(Assembler::greaterEqual, LOOP); 5797 jmpb(DONE); 5798 5799 BIND(LONG); 5800 } 5801 5802 // Use longer rep-prefixed ops for non-small counts: 5803 if (UseFastStosb) { 5804 shlptr(cnt, 3); // convert to number of bytes 5805 rep_stosb(); 5806 } else if (UseXMMForObjInit) { 5807 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 5808 } else { 5809 rep_stos(); 5810 } 5811 5812 BIND(DONE); 5813 } 5814 5815 #endif //COMPILER2_OR_JVMCI 5816 5817 5818 void MacroAssembler::generate_fill(BasicType t, bool aligned, 5819 Register to, Register value, Register count, 5820 Register rtmp, XMMRegister xtmp) { 5821 ShortBranchVerifier sbv(this); 5822 assert_different_registers(to, value, count, rtmp); 5823 Label L_exit; 5824 Label L_fill_2_bytes, L_fill_4_bytes; 5825 5826 #if defined(COMPILER2) 5827 if(MaxVectorSize >=32 && 5828 VM_Version::supports_avx512vlbw() && 5829 VM_Version::supports_bmi2()) { 5830 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 5831 return; 5832 } 5833 #endif 5834 5835 int shift = -1; 5836 switch (t) { 5837 case T_BYTE: 5838 shift = 2; 5839 break; 5840 case T_SHORT: 5841 shift = 1; 5842 break; 5843 case T_INT: 5844 shift = 0; 5845 break; 5846 default: ShouldNotReachHere(); 5847 } 5848 5849 if (t == T_BYTE) { 5850 andl(value, 0xff); 5851 movl(rtmp, value); 5852 shll(rtmp, 8); 5853 orl(value, rtmp); 5854 } 5855 if (t == T_SHORT) { 5856 andl(value, 0xffff); 5857 } 5858 if (t == T_BYTE || t == T_SHORT) { 5859 movl(rtmp, value); 5860 shll(rtmp, 16); 5861 orl(value, rtmp); 5862 } 5863 5864 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 5865 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 5866 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 5867 Label L_skip_align2; 5868 // align source address at 4 bytes address boundary 5869 if (t == T_BYTE) { 5870 Label L_skip_align1; 5871 // One byte misalignment happens only for byte arrays 5872 testptr(to, 1); 5873 jccb(Assembler::zero, L_skip_align1); 5874 movb(Address(to, 0), value); 5875 increment(to); 5876 decrement(count); 5877 BIND(L_skip_align1); 5878 } 5879 // Two bytes misalignment happens only for byte and short (char) arrays 5880 testptr(to, 2); 5881 jccb(Assembler::zero, L_skip_align2); 5882 movw(Address(to, 0), value); 5883 addptr(to, 2); 5884 subptr(count, 1<<(shift-1)); 5885 BIND(L_skip_align2); 5886 } 5887 { 5888 Label L_fill_32_bytes; 5889 if (!UseUnalignedLoadStores) { 5890 // align to 8 bytes, we know we are 4 byte aligned to start 5891 testptr(to, 4); 5892 jccb(Assembler::zero, L_fill_32_bytes); 5893 movl(Address(to, 0), value); 5894 addptr(to, 4); 5895 subptr(count, 1<<shift); 5896 } 5897 BIND(L_fill_32_bytes); 5898 { 5899 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 5900 movdl(xtmp, value); 5901 if (UseAVX >= 2 && UseUnalignedLoadStores) { 5902 Label L_check_fill_32_bytes; 5903 if (UseAVX > 2) { 5904 // Fill 64-byte chunks 5905 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 5906 5907 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 5908 cmpptr(count, VM_Version::avx3_threshold()); 5909 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 5910 5911 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 5912 5913 subptr(count, 16 << shift); 5914 jccb(Assembler::less, L_check_fill_32_bytes); 5915 align(16); 5916 5917 BIND(L_fill_64_bytes_loop_avx3); 5918 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 5919 addptr(to, 64); 5920 subptr(count, 16 << shift); 5921 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 5922 jmpb(L_check_fill_32_bytes); 5923 5924 BIND(L_check_fill_64_bytes_avx2); 5925 } 5926 // Fill 64-byte chunks 5927 Label L_fill_64_bytes_loop; 5928 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 5929 5930 subptr(count, 16 << shift); 5931 jcc(Assembler::less, L_check_fill_32_bytes); 5932 align(16); 5933 5934 BIND(L_fill_64_bytes_loop); 5935 vmovdqu(Address(to, 0), xtmp); 5936 vmovdqu(Address(to, 32), xtmp); 5937 addptr(to, 64); 5938 subptr(count, 16 << shift); 5939 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 5940 5941 BIND(L_check_fill_32_bytes); 5942 addptr(count, 8 << shift); 5943 jccb(Assembler::less, L_check_fill_8_bytes); 5944 vmovdqu(Address(to, 0), xtmp); 5945 addptr(to, 32); 5946 subptr(count, 8 << shift); 5947 5948 BIND(L_check_fill_8_bytes); 5949 // clean upper bits of YMM registers 5950 movdl(xtmp, value); 5951 pshufd(xtmp, xtmp, 0); 5952 } else { 5953 // Fill 32-byte chunks 5954 pshufd(xtmp, xtmp, 0); 5955 5956 subptr(count, 8 << shift); 5957 jcc(Assembler::less, L_check_fill_8_bytes); 5958 align(16); 5959 5960 BIND(L_fill_32_bytes_loop); 5961 5962 if (UseUnalignedLoadStores) { 5963 movdqu(Address(to, 0), xtmp); 5964 movdqu(Address(to, 16), xtmp); 5965 } else { 5966 movq(Address(to, 0), xtmp); 5967 movq(Address(to, 8), xtmp); 5968 movq(Address(to, 16), xtmp); 5969 movq(Address(to, 24), xtmp); 5970 } 5971 5972 addptr(to, 32); 5973 subptr(count, 8 << shift); 5974 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 5975 5976 BIND(L_check_fill_8_bytes); 5977 } 5978 addptr(count, 8 << shift); 5979 jccb(Assembler::zero, L_exit); 5980 jmpb(L_fill_8_bytes); 5981 5982 // 5983 // length is too short, just fill qwords 5984 // 5985 BIND(L_fill_8_bytes_loop); 5986 movq(Address(to, 0), xtmp); 5987 addptr(to, 8); 5988 BIND(L_fill_8_bytes); 5989 subptr(count, 1 << (shift + 1)); 5990 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 5991 } 5992 } 5993 // fill trailing 4 bytes 5994 BIND(L_fill_4_bytes); 5995 testl(count, 1<<shift); 5996 jccb(Assembler::zero, L_fill_2_bytes); 5997 movl(Address(to, 0), value); 5998 if (t == T_BYTE || t == T_SHORT) { 5999 Label L_fill_byte; 6000 addptr(to, 4); 6001 BIND(L_fill_2_bytes); 6002 // fill trailing 2 bytes 6003 testl(count, 1<<(shift-1)); 6004 jccb(Assembler::zero, L_fill_byte); 6005 movw(Address(to, 0), value); 6006 if (t == T_BYTE) { 6007 addptr(to, 2); 6008 BIND(L_fill_byte); 6009 // fill trailing byte 6010 testl(count, 1); 6011 jccb(Assembler::zero, L_exit); 6012 movb(Address(to, 0), value); 6013 } else { 6014 BIND(L_fill_byte); 6015 } 6016 } else { 6017 BIND(L_fill_2_bytes); 6018 } 6019 BIND(L_exit); 6020 } 6021 6022 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6023 switch(type) { 6024 case T_BYTE: 6025 case T_BOOLEAN: 6026 evpbroadcastb(dst, src, vector_len); 6027 break; 6028 case T_SHORT: 6029 case T_CHAR: 6030 evpbroadcastw(dst, src, vector_len); 6031 break; 6032 case T_INT: 6033 case T_FLOAT: 6034 evpbroadcastd(dst, src, vector_len); 6035 break; 6036 case T_LONG: 6037 case T_DOUBLE: 6038 evpbroadcastq(dst, src, vector_len); 6039 break; 6040 default: 6041 fatal("Unhandled type : %s", type2name(type)); 6042 break; 6043 } 6044 } 6045 6046 // Encode given char[]/byte[] to byte[] in ISO_8859_1 or ASCII 6047 // 6048 // @IntrinsicCandidate 6049 // int sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0( 6050 // char[] sa, int sp, byte[] da, int dp, int len) { 6051 // int i = 0; 6052 // for (; i < len; i++) { 6053 // char c = sa[sp++]; 6054 // if (c > '\u00FF') 6055 // break; 6056 // da[dp++] = (byte) c; 6057 // } 6058 // return i; 6059 // } 6060 // 6061 // @IntrinsicCandidate 6062 // int java.lang.StringCoding.encodeISOArray0( 6063 // byte[] sa, int sp, byte[] da, int dp, int len) { 6064 // int i = 0; 6065 // for (; i < len; i++) { 6066 // char c = StringUTF16.getChar(sa, sp++); 6067 // if (c > '\u00FF') 6068 // break; 6069 // da[dp++] = (byte) c; 6070 // } 6071 // return i; 6072 // } 6073 // 6074 // @IntrinsicCandidate 6075 // int java.lang.StringCoding.encodeAsciiArray0( 6076 // char[] sa, int sp, byte[] da, int dp, int len) { 6077 // int i = 0; 6078 // for (; i < len; i++) { 6079 // char c = sa[sp++]; 6080 // if (c >= '\u0080') 6081 // break; 6082 // da[dp++] = (byte) c; 6083 // } 6084 // return i; 6085 // } 6086 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6087 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6088 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6089 Register tmp5, Register result, bool ascii) { 6090 6091 // rsi: src 6092 // rdi: dst 6093 // rdx: len 6094 // rcx: tmp5 6095 // rax: result 6096 ShortBranchVerifier sbv(this); 6097 assert_different_registers(src, dst, len, tmp5, result); 6098 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6099 6100 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6101 int short_mask = ascii ? 0xff80 : 0xff00; 6102 6103 // set result 6104 xorl(result, result); 6105 // check for zero length 6106 testl(len, len); 6107 jcc(Assembler::zero, L_done); 6108 6109 movl(result, len); 6110 6111 // Setup pointers 6112 lea(src, Address(src, len, Address::times_2)); // char[] 6113 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6114 negptr(len); 6115 6116 if (UseSSE42Intrinsics || UseAVX >= 2) { 6117 Label L_copy_8_chars, L_copy_8_chars_exit; 6118 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6119 6120 if (UseAVX >= 2) { 6121 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6122 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6123 movdl(tmp1Reg, tmp5); 6124 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6125 jmp(L_chars_32_check); 6126 6127 bind(L_copy_32_chars); 6128 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6129 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6130 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6131 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6132 jccb(Assembler::notZero, L_copy_32_chars_exit); 6133 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6134 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6135 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6136 6137 bind(L_chars_32_check); 6138 addptr(len, 32); 6139 jcc(Assembler::lessEqual, L_copy_32_chars); 6140 6141 bind(L_copy_32_chars_exit); 6142 subptr(len, 16); 6143 jccb(Assembler::greater, L_copy_16_chars_exit); 6144 6145 } else if (UseSSE42Intrinsics) { 6146 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6147 movdl(tmp1Reg, tmp5); 6148 pshufd(tmp1Reg, tmp1Reg, 0); 6149 jmpb(L_chars_16_check); 6150 } 6151 6152 bind(L_copy_16_chars); 6153 if (UseAVX >= 2) { 6154 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6155 vptest(tmp2Reg, tmp1Reg); 6156 jcc(Assembler::notZero, L_copy_16_chars_exit); 6157 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6158 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6159 } else { 6160 if (UseAVX > 0) { 6161 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6162 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6163 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6164 } else { 6165 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6166 por(tmp2Reg, tmp3Reg); 6167 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6168 por(tmp2Reg, tmp4Reg); 6169 } 6170 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6171 jccb(Assembler::notZero, L_copy_16_chars_exit); 6172 packuswb(tmp3Reg, tmp4Reg); 6173 } 6174 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6175 6176 bind(L_chars_16_check); 6177 addptr(len, 16); 6178 jcc(Assembler::lessEqual, L_copy_16_chars); 6179 6180 bind(L_copy_16_chars_exit); 6181 if (UseAVX >= 2) { 6182 // clean upper bits of YMM registers 6183 vpxor(tmp2Reg, tmp2Reg); 6184 vpxor(tmp3Reg, tmp3Reg); 6185 vpxor(tmp4Reg, tmp4Reg); 6186 movdl(tmp1Reg, tmp5); 6187 pshufd(tmp1Reg, tmp1Reg, 0); 6188 } 6189 subptr(len, 8); 6190 jccb(Assembler::greater, L_copy_8_chars_exit); 6191 6192 bind(L_copy_8_chars); 6193 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6194 ptest(tmp3Reg, tmp1Reg); 6195 jccb(Assembler::notZero, L_copy_8_chars_exit); 6196 packuswb(tmp3Reg, tmp1Reg); 6197 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6198 addptr(len, 8); 6199 jccb(Assembler::lessEqual, L_copy_8_chars); 6200 6201 bind(L_copy_8_chars_exit); 6202 subptr(len, 8); 6203 jccb(Assembler::zero, L_done); 6204 } 6205 6206 bind(L_copy_1_char); 6207 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6208 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6209 jccb(Assembler::notZero, L_copy_1_char_exit); 6210 movb(Address(dst, len, Address::times_1, 0), tmp5); 6211 addptr(len, 1); 6212 jccb(Assembler::less, L_copy_1_char); 6213 6214 bind(L_copy_1_char_exit); 6215 addptr(result, len); // len is negative count of not processed elements 6216 6217 bind(L_done); 6218 } 6219 6220 /** 6221 * Helper for multiply_to_len(). 6222 */ 6223 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6224 addq(dest_lo, src1); 6225 adcq(dest_hi, 0); 6226 addq(dest_lo, src2); 6227 adcq(dest_hi, 0); 6228 } 6229 6230 /** 6231 * Multiply 64 bit by 64 bit first loop. 6232 */ 6233 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6234 Register y, Register y_idx, Register z, 6235 Register carry, Register product, 6236 Register idx, Register kdx) { 6237 // 6238 // jlong carry, x[], y[], z[]; 6239 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6240 // huge_128 product = y[idx] * x[xstart] + carry; 6241 // z[kdx] = (jlong)product; 6242 // carry = (jlong)(product >>> 64); 6243 // } 6244 // z[xstart] = carry; 6245 // 6246 6247 Label L_first_loop, L_first_loop_exit; 6248 Label L_one_x, L_one_y, L_multiply; 6249 6250 decrementl(xstart); 6251 jcc(Assembler::negative, L_one_x); 6252 6253 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6254 rorq(x_xstart, 32); // convert big-endian to little-endian 6255 6256 bind(L_first_loop); 6257 decrementl(idx); 6258 jcc(Assembler::negative, L_first_loop_exit); 6259 decrementl(idx); 6260 jcc(Assembler::negative, L_one_y); 6261 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6262 rorq(y_idx, 32); // convert big-endian to little-endian 6263 bind(L_multiply); 6264 movq(product, x_xstart); 6265 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6266 addq(product, carry); 6267 adcq(rdx, 0); 6268 subl(kdx, 2); 6269 movl(Address(z, kdx, Address::times_4, 4), product); 6270 shrq(product, 32); 6271 movl(Address(z, kdx, Address::times_4, 0), product); 6272 movq(carry, rdx); 6273 jmp(L_first_loop); 6274 6275 bind(L_one_y); 6276 movl(y_idx, Address(y, 0)); 6277 jmp(L_multiply); 6278 6279 bind(L_one_x); 6280 movl(x_xstart, Address(x, 0)); 6281 jmp(L_first_loop); 6282 6283 bind(L_first_loop_exit); 6284 } 6285 6286 /** 6287 * Multiply 64 bit by 64 bit and add 128 bit. 6288 */ 6289 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6290 Register yz_idx, Register idx, 6291 Register carry, Register product, int offset) { 6292 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6293 // z[kdx] = (jlong)product; 6294 6295 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6296 rorq(yz_idx, 32); // convert big-endian to little-endian 6297 movq(product, x_xstart); 6298 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6299 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6300 rorq(yz_idx, 32); // convert big-endian to little-endian 6301 6302 add2_with_carry(rdx, product, carry, yz_idx); 6303 6304 movl(Address(z, idx, Address::times_4, offset+4), product); 6305 shrq(product, 32); 6306 movl(Address(z, idx, Address::times_4, offset), product); 6307 6308 } 6309 6310 /** 6311 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6312 */ 6313 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6314 Register yz_idx, Register idx, Register jdx, 6315 Register carry, Register product, 6316 Register carry2) { 6317 // jlong carry, x[], y[], z[]; 6318 // int kdx = ystart+1; 6319 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6320 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6321 // z[kdx+idx+1] = (jlong)product; 6322 // jlong carry2 = (jlong)(product >>> 64); 6323 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6324 // z[kdx+idx] = (jlong)product; 6325 // carry = (jlong)(product >>> 64); 6326 // } 6327 // idx += 2; 6328 // if (idx > 0) { 6329 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6330 // z[kdx+idx] = (jlong)product; 6331 // carry = (jlong)(product >>> 64); 6332 // } 6333 // 6334 6335 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6336 6337 movl(jdx, idx); 6338 andl(jdx, 0xFFFFFFFC); 6339 shrl(jdx, 2); 6340 6341 bind(L_third_loop); 6342 subl(jdx, 1); 6343 jcc(Assembler::negative, L_third_loop_exit); 6344 subl(idx, 4); 6345 6346 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6347 movq(carry2, rdx); 6348 6349 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6350 movq(carry, rdx); 6351 jmp(L_third_loop); 6352 6353 bind (L_third_loop_exit); 6354 6355 andl (idx, 0x3); 6356 jcc(Assembler::zero, L_post_third_loop_done); 6357 6358 Label L_check_1; 6359 subl(idx, 2); 6360 jcc(Assembler::negative, L_check_1); 6361 6362 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6363 movq(carry, rdx); 6364 6365 bind (L_check_1); 6366 addl (idx, 0x2); 6367 andl (idx, 0x1); 6368 subl(idx, 1); 6369 jcc(Assembler::negative, L_post_third_loop_done); 6370 6371 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6372 movq(product, x_xstart); 6373 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6374 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6375 6376 add2_with_carry(rdx, product, yz_idx, carry); 6377 6378 movl(Address(z, idx, Address::times_4, 0), product); 6379 shrq(product, 32); 6380 6381 shlq(rdx, 32); 6382 orq(product, rdx); 6383 movq(carry, product); 6384 6385 bind(L_post_third_loop_done); 6386 } 6387 6388 /** 6389 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6390 * 6391 */ 6392 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6393 Register carry, Register carry2, 6394 Register idx, Register jdx, 6395 Register yz_idx1, Register yz_idx2, 6396 Register tmp, Register tmp3, Register tmp4) { 6397 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6398 6399 // jlong carry, x[], y[], z[]; 6400 // int kdx = ystart+1; 6401 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6402 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6403 // jlong carry2 = (jlong)(tmp3 >>> 64); 6404 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6405 // carry = (jlong)(tmp4 >>> 64); 6406 // z[kdx+idx+1] = (jlong)tmp3; 6407 // z[kdx+idx] = (jlong)tmp4; 6408 // } 6409 // idx += 2; 6410 // if (idx > 0) { 6411 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6412 // z[kdx+idx] = (jlong)yz_idx1; 6413 // carry = (jlong)(yz_idx1 >>> 64); 6414 // } 6415 // 6416 6417 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6418 6419 movl(jdx, idx); 6420 andl(jdx, 0xFFFFFFFC); 6421 shrl(jdx, 2); 6422 6423 bind(L_third_loop); 6424 subl(jdx, 1); 6425 jcc(Assembler::negative, L_third_loop_exit); 6426 subl(idx, 4); 6427 6428 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6429 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6430 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6431 rorxq(yz_idx2, yz_idx2, 32); 6432 6433 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6434 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6435 6436 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6437 rorxq(yz_idx1, yz_idx1, 32); 6438 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6439 rorxq(yz_idx2, yz_idx2, 32); 6440 6441 if (VM_Version::supports_adx()) { 6442 adcxq(tmp3, carry); 6443 adoxq(tmp3, yz_idx1); 6444 6445 adcxq(tmp4, tmp); 6446 adoxq(tmp4, yz_idx2); 6447 6448 movl(carry, 0); // does not affect flags 6449 adcxq(carry2, carry); 6450 adoxq(carry2, carry); 6451 } else { 6452 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6453 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6454 } 6455 movq(carry, carry2); 6456 6457 movl(Address(z, idx, Address::times_4, 12), tmp3); 6458 shrq(tmp3, 32); 6459 movl(Address(z, idx, Address::times_4, 8), tmp3); 6460 6461 movl(Address(z, idx, Address::times_4, 4), tmp4); 6462 shrq(tmp4, 32); 6463 movl(Address(z, idx, Address::times_4, 0), tmp4); 6464 6465 jmp(L_third_loop); 6466 6467 bind (L_third_loop_exit); 6468 6469 andl (idx, 0x3); 6470 jcc(Assembler::zero, L_post_third_loop_done); 6471 6472 Label L_check_1; 6473 subl(idx, 2); 6474 jcc(Assembler::negative, L_check_1); 6475 6476 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6477 rorxq(yz_idx1, yz_idx1, 32); 6478 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6479 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6480 rorxq(yz_idx2, yz_idx2, 32); 6481 6482 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6483 6484 movl(Address(z, idx, Address::times_4, 4), tmp3); 6485 shrq(tmp3, 32); 6486 movl(Address(z, idx, Address::times_4, 0), tmp3); 6487 movq(carry, tmp4); 6488 6489 bind (L_check_1); 6490 addl (idx, 0x2); 6491 andl (idx, 0x1); 6492 subl(idx, 1); 6493 jcc(Assembler::negative, L_post_third_loop_done); 6494 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6495 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6496 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6497 6498 add2_with_carry(carry2, tmp3, tmp4, carry); 6499 6500 movl(Address(z, idx, Address::times_4, 0), tmp3); 6501 shrq(tmp3, 32); 6502 6503 shlq(carry2, 32); 6504 orq(tmp3, carry2); 6505 movq(carry, tmp3); 6506 6507 bind(L_post_third_loop_done); 6508 } 6509 6510 /** 6511 * Code for BigInteger::multiplyToLen() intrinsic. 6512 * 6513 * rdi: x 6514 * rax: xlen 6515 * rsi: y 6516 * rcx: ylen 6517 * r8: z 6518 * r11: tmp0 6519 * r12: tmp1 6520 * r13: tmp2 6521 * r14: tmp3 6522 * r15: tmp4 6523 * rbx: tmp5 6524 * 6525 */ 6526 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 6527 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 6528 ShortBranchVerifier sbv(this); 6529 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 6530 6531 push(tmp0); 6532 push(tmp1); 6533 push(tmp2); 6534 push(tmp3); 6535 push(tmp4); 6536 push(tmp5); 6537 6538 push(xlen); 6539 6540 const Register idx = tmp1; 6541 const Register kdx = tmp2; 6542 const Register xstart = tmp3; 6543 6544 const Register y_idx = tmp4; 6545 const Register carry = tmp5; 6546 const Register product = xlen; 6547 const Register x_xstart = tmp0; 6548 6549 // First Loop. 6550 // 6551 // final static long LONG_MASK = 0xffffffffL; 6552 // int xstart = xlen - 1; 6553 // int ystart = ylen - 1; 6554 // long carry = 0; 6555 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6556 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6557 // z[kdx] = (int)product; 6558 // carry = product >>> 32; 6559 // } 6560 // z[xstart] = (int)carry; 6561 // 6562 6563 movl(idx, ylen); // idx = ylen; 6564 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 6565 xorq(carry, carry); // carry = 0; 6566 6567 Label L_done; 6568 6569 movl(xstart, xlen); 6570 decrementl(xstart); 6571 jcc(Assembler::negative, L_done); 6572 6573 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6574 6575 Label L_second_loop; 6576 testl(kdx, kdx); 6577 jcc(Assembler::zero, L_second_loop); 6578 6579 Label L_carry; 6580 subl(kdx, 1); 6581 jcc(Assembler::zero, L_carry); 6582 6583 movl(Address(z, kdx, Address::times_4, 0), carry); 6584 shrq(carry, 32); 6585 subl(kdx, 1); 6586 6587 bind(L_carry); 6588 movl(Address(z, kdx, Address::times_4, 0), carry); 6589 6590 // Second and third (nested) loops. 6591 // 6592 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6593 // carry = 0; 6594 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6595 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6596 // (z[k] & LONG_MASK) + carry; 6597 // z[k] = (int)product; 6598 // carry = product >>> 32; 6599 // } 6600 // z[i] = (int)carry; 6601 // } 6602 // 6603 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6604 6605 const Register jdx = tmp1; 6606 6607 bind(L_second_loop); 6608 xorl(carry, carry); // carry = 0; 6609 movl(jdx, ylen); // j = ystart+1 6610 6611 subl(xstart, 1); // i = xstart-1; 6612 jcc(Assembler::negative, L_done); 6613 6614 push (z); 6615 6616 Label L_last_x; 6617 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 6618 subl(xstart, 1); // i = xstart-1; 6619 jcc(Assembler::negative, L_last_x); 6620 6621 if (UseBMI2Instructions) { 6622 movq(rdx, Address(x, xstart, Address::times_4, 0)); 6623 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 6624 } else { 6625 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6626 rorq(x_xstart, 32); // convert big-endian to little-endian 6627 } 6628 6629 Label L_third_loop_prologue; 6630 bind(L_third_loop_prologue); 6631 6632 push (x); 6633 push (xstart); 6634 push (ylen); 6635 6636 6637 if (UseBMI2Instructions) { 6638 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 6639 } else { // !UseBMI2Instructions 6640 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6641 } 6642 6643 pop(ylen); 6644 pop(xlen); 6645 pop(x); 6646 pop(z); 6647 6648 movl(tmp3, xlen); 6649 addl(tmp3, 1); 6650 movl(Address(z, tmp3, Address::times_4, 0), carry); 6651 subl(tmp3, 1); 6652 jccb(Assembler::negative, L_done); 6653 6654 shrq(carry, 32); 6655 movl(Address(z, tmp3, Address::times_4, 0), carry); 6656 jmp(L_second_loop); 6657 6658 // Next infrequent code is moved outside loops. 6659 bind(L_last_x); 6660 if (UseBMI2Instructions) { 6661 movl(rdx, Address(x, 0)); 6662 } else { 6663 movl(x_xstart, Address(x, 0)); 6664 } 6665 jmp(L_third_loop_prologue); 6666 6667 bind(L_done); 6668 6669 pop(xlen); 6670 6671 pop(tmp5); 6672 pop(tmp4); 6673 pop(tmp3); 6674 pop(tmp2); 6675 pop(tmp1); 6676 pop(tmp0); 6677 } 6678 6679 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 6680 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 6681 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 6682 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 6683 Label VECTOR8_TAIL, VECTOR4_TAIL; 6684 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 6685 Label SAME_TILL_END, DONE; 6686 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 6687 6688 //scale is in rcx in both Win64 and Unix 6689 ShortBranchVerifier sbv(this); 6690 6691 shlq(length); 6692 xorq(result, result); 6693 6694 if ((AVX3Threshold == 0) && (UseAVX > 2) && 6695 VM_Version::supports_avx512vlbw()) { 6696 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 6697 6698 cmpq(length, 64); 6699 jcc(Assembler::less, VECTOR32_TAIL); 6700 6701 movq(tmp1, length); 6702 andq(tmp1, 0x3F); // tail count 6703 andq(length, ~(0x3F)); //vector count 6704 6705 bind(VECTOR64_LOOP); 6706 // AVX512 code to compare 64 byte vectors. 6707 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 6708 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 6709 kortestql(k7, k7); 6710 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 6711 addq(result, 64); 6712 subq(length, 64); 6713 jccb(Assembler::notZero, VECTOR64_LOOP); 6714 6715 //bind(VECTOR64_TAIL); 6716 testq(tmp1, tmp1); 6717 jcc(Assembler::zero, SAME_TILL_END); 6718 6719 //bind(VECTOR64_TAIL); 6720 // AVX512 code to compare up to 63 byte vectors. 6721 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 6722 shlxq(tmp2, tmp2, tmp1); 6723 notq(tmp2); 6724 kmovql(k3, tmp2); 6725 6726 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 6727 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 6728 6729 ktestql(k7, k3); 6730 jcc(Assembler::below, SAME_TILL_END); // not mismatch 6731 6732 bind(VECTOR64_NOT_EQUAL); 6733 kmovql(tmp1, k7); 6734 notq(tmp1); 6735 tzcntq(tmp1, tmp1); 6736 addq(result, tmp1); 6737 shrq(result); 6738 jmp(DONE); 6739 bind(VECTOR32_TAIL); 6740 } 6741 6742 cmpq(length, 8); 6743 jcc(Assembler::equal, VECTOR8_LOOP); 6744 jcc(Assembler::less, VECTOR4_TAIL); 6745 6746 if (UseAVX >= 2) { 6747 Label VECTOR16_TAIL, VECTOR32_LOOP; 6748 6749 cmpq(length, 16); 6750 jcc(Assembler::equal, VECTOR16_LOOP); 6751 jcc(Assembler::less, VECTOR8_LOOP); 6752 6753 cmpq(length, 32); 6754 jccb(Assembler::less, VECTOR16_TAIL); 6755 6756 subq(length, 32); 6757 bind(VECTOR32_LOOP); 6758 vmovdqu(rymm0, Address(obja, result)); 6759 vmovdqu(rymm1, Address(objb, result)); 6760 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 6761 vptest(rymm2, rymm2); 6762 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 6763 addq(result, 32); 6764 subq(length, 32); 6765 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 6766 addq(length, 32); 6767 jcc(Assembler::equal, SAME_TILL_END); 6768 //falling through if less than 32 bytes left //close the branch here. 6769 6770 bind(VECTOR16_TAIL); 6771 cmpq(length, 16); 6772 jccb(Assembler::less, VECTOR8_TAIL); 6773 bind(VECTOR16_LOOP); 6774 movdqu(rymm0, Address(obja, result)); 6775 movdqu(rymm1, Address(objb, result)); 6776 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 6777 ptest(rymm2, rymm2); 6778 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6779 addq(result, 16); 6780 subq(length, 16); 6781 jcc(Assembler::equal, SAME_TILL_END); 6782 //falling through if less than 16 bytes left 6783 } else {//regular intrinsics 6784 6785 cmpq(length, 16); 6786 jccb(Assembler::less, VECTOR8_TAIL); 6787 6788 subq(length, 16); 6789 bind(VECTOR16_LOOP); 6790 movdqu(rymm0, Address(obja, result)); 6791 movdqu(rymm1, Address(objb, result)); 6792 pxor(rymm0, rymm1); 6793 ptest(rymm0, rymm0); 6794 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6795 addq(result, 16); 6796 subq(length, 16); 6797 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 6798 addq(length, 16); 6799 jcc(Assembler::equal, SAME_TILL_END); 6800 //falling through if less than 16 bytes left 6801 } 6802 6803 bind(VECTOR8_TAIL); 6804 cmpq(length, 8); 6805 jccb(Assembler::less, VECTOR4_TAIL); 6806 bind(VECTOR8_LOOP); 6807 movq(tmp1, Address(obja, result)); 6808 movq(tmp2, Address(objb, result)); 6809 xorq(tmp1, tmp2); 6810 testq(tmp1, tmp1); 6811 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 6812 addq(result, 8); 6813 subq(length, 8); 6814 jcc(Assembler::equal, SAME_TILL_END); 6815 //falling through if less than 8 bytes left 6816 6817 bind(VECTOR4_TAIL); 6818 cmpq(length, 4); 6819 jccb(Assembler::less, BYTES_TAIL); 6820 bind(VECTOR4_LOOP); 6821 movl(tmp1, Address(obja, result)); 6822 xorl(tmp1, Address(objb, result)); 6823 testl(tmp1, tmp1); 6824 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 6825 addq(result, 4); 6826 subq(length, 4); 6827 jcc(Assembler::equal, SAME_TILL_END); 6828 //falling through if less than 4 bytes left 6829 6830 bind(BYTES_TAIL); 6831 bind(BYTES_LOOP); 6832 load_unsigned_byte(tmp1, Address(obja, result)); 6833 load_unsigned_byte(tmp2, Address(objb, result)); 6834 xorl(tmp1, tmp2); 6835 testl(tmp1, tmp1); 6836 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6837 decq(length); 6838 jcc(Assembler::zero, SAME_TILL_END); 6839 incq(result); 6840 load_unsigned_byte(tmp1, Address(obja, result)); 6841 load_unsigned_byte(tmp2, Address(objb, result)); 6842 xorl(tmp1, tmp2); 6843 testl(tmp1, tmp1); 6844 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6845 decq(length); 6846 jcc(Assembler::zero, SAME_TILL_END); 6847 incq(result); 6848 load_unsigned_byte(tmp1, Address(obja, result)); 6849 load_unsigned_byte(tmp2, Address(objb, result)); 6850 xorl(tmp1, tmp2); 6851 testl(tmp1, tmp1); 6852 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6853 jmp(SAME_TILL_END); 6854 6855 if (UseAVX >= 2) { 6856 bind(VECTOR32_NOT_EQUAL); 6857 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 6858 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 6859 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 6860 vpmovmskb(tmp1, rymm0); 6861 bsfq(tmp1, tmp1); 6862 addq(result, tmp1); 6863 shrq(result); 6864 jmp(DONE); 6865 } 6866 6867 bind(VECTOR16_NOT_EQUAL); 6868 if (UseAVX >= 2) { 6869 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 6870 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 6871 pxor(rymm0, rymm2); 6872 } else { 6873 pcmpeqb(rymm2, rymm2); 6874 pxor(rymm0, rymm1); 6875 pcmpeqb(rymm0, rymm1); 6876 pxor(rymm0, rymm2); 6877 } 6878 pmovmskb(tmp1, rymm0); 6879 bsfq(tmp1, tmp1); 6880 addq(result, tmp1); 6881 shrq(result); 6882 jmpb(DONE); 6883 6884 bind(VECTOR8_NOT_EQUAL); 6885 bind(VECTOR4_NOT_EQUAL); 6886 bsfq(tmp1, tmp1); 6887 shrq(tmp1, 3); 6888 addq(result, tmp1); 6889 bind(BYTES_NOT_EQUAL); 6890 shrq(result); 6891 jmpb(DONE); 6892 6893 bind(SAME_TILL_END); 6894 mov64(result, -1); 6895 6896 bind(DONE); 6897 } 6898 6899 //Helper functions for square_to_len() 6900 6901 /** 6902 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 6903 * Preserves x and z and modifies rest of the registers. 6904 */ 6905 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 6906 // Perform square and right shift by 1 6907 // Handle odd xlen case first, then for even xlen do the following 6908 // jlong carry = 0; 6909 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 6910 // huge_128 product = x[j:j+1] * x[j:j+1]; 6911 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 6912 // z[i+2:i+3] = (jlong)(product >>> 1); 6913 // carry = (jlong)product; 6914 // } 6915 6916 xorq(tmp5, tmp5); // carry 6917 xorq(rdxReg, rdxReg); 6918 xorl(tmp1, tmp1); // index for x 6919 xorl(tmp4, tmp4); // index for z 6920 6921 Label L_first_loop, L_first_loop_exit; 6922 6923 testl(xlen, 1); 6924 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 6925 6926 // Square and right shift by 1 the odd element using 32 bit multiply 6927 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 6928 imulq(raxReg, raxReg); 6929 shrq(raxReg, 1); 6930 adcq(tmp5, 0); 6931 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 6932 incrementl(tmp1); 6933 addl(tmp4, 2); 6934 6935 // Square and right shift by 1 the rest using 64 bit multiply 6936 bind(L_first_loop); 6937 cmpptr(tmp1, xlen); 6938 jccb(Assembler::equal, L_first_loop_exit); 6939 6940 // Square 6941 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 6942 rorq(raxReg, 32); // convert big-endian to little-endian 6943 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 6944 6945 // Right shift by 1 and save carry 6946 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 6947 rcrq(rdxReg, 1); 6948 rcrq(raxReg, 1); 6949 adcq(tmp5, 0); 6950 6951 // Store result in z 6952 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 6953 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 6954 6955 // Update indices for x and z 6956 addl(tmp1, 2); 6957 addl(tmp4, 4); 6958 jmp(L_first_loop); 6959 6960 bind(L_first_loop_exit); 6961 } 6962 6963 6964 /** 6965 * Perform the following multiply add operation using BMI2 instructions 6966 * carry:sum = sum + op1*op2 + carry 6967 * op2 should be in rdx 6968 * op2 is preserved, all other registers are modified 6969 */ 6970 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 6971 // assert op2 is rdx 6972 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 6973 addq(sum, carry); 6974 adcq(tmp2, 0); 6975 addq(sum, op1); 6976 adcq(tmp2, 0); 6977 movq(carry, tmp2); 6978 } 6979 6980 /** 6981 * Perform the following multiply add operation: 6982 * carry:sum = sum + op1*op2 + carry 6983 * Preserves op1, op2 and modifies rest of registers 6984 */ 6985 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 6986 // rdx:rax = op1 * op2 6987 movq(raxReg, op2); 6988 mulq(op1); 6989 6990 // rdx:rax = sum + carry + rdx:rax 6991 addq(sum, carry); 6992 adcq(rdxReg, 0); 6993 addq(sum, raxReg); 6994 adcq(rdxReg, 0); 6995 6996 // carry:sum = rdx:sum 6997 movq(carry, rdxReg); 6998 } 6999 7000 /** 7001 * Add 64 bit long carry into z[] with carry propagation. 7002 * Preserves z and carry register values and modifies rest of registers. 7003 * 7004 */ 7005 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7006 Label L_fourth_loop, L_fourth_loop_exit; 7007 7008 movl(tmp1, 1); 7009 subl(zlen, 2); 7010 addq(Address(z, zlen, Address::times_4, 0), carry); 7011 7012 bind(L_fourth_loop); 7013 jccb(Assembler::carryClear, L_fourth_loop_exit); 7014 subl(zlen, 2); 7015 jccb(Assembler::negative, L_fourth_loop_exit); 7016 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7017 jmp(L_fourth_loop); 7018 bind(L_fourth_loop_exit); 7019 } 7020 7021 /** 7022 * Shift z[] left by 1 bit. 7023 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7024 * 7025 */ 7026 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7027 7028 Label L_fifth_loop, L_fifth_loop_exit; 7029 7030 // Fifth loop 7031 // Perform primitiveLeftShift(z, zlen, 1) 7032 7033 const Register prev_carry = tmp1; 7034 const Register new_carry = tmp4; 7035 const Register value = tmp2; 7036 const Register zidx = tmp3; 7037 7038 // int zidx, carry; 7039 // long value; 7040 // carry = 0; 7041 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7042 // (carry:value) = (z[i] << 1) | carry ; 7043 // z[i] = value; 7044 // } 7045 7046 movl(zidx, zlen); 7047 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7048 7049 bind(L_fifth_loop); 7050 decl(zidx); // Use decl to preserve carry flag 7051 decl(zidx); 7052 jccb(Assembler::negative, L_fifth_loop_exit); 7053 7054 if (UseBMI2Instructions) { 7055 movq(value, Address(z, zidx, Address::times_4, 0)); 7056 rclq(value, 1); 7057 rorxq(value, value, 32); 7058 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7059 } 7060 else { 7061 // clear new_carry 7062 xorl(new_carry, new_carry); 7063 7064 // Shift z[i] by 1, or in previous carry and save new carry 7065 movq(value, Address(z, zidx, Address::times_4, 0)); 7066 shlq(value, 1); 7067 adcl(new_carry, 0); 7068 7069 orq(value, prev_carry); 7070 rorq(value, 0x20); 7071 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7072 7073 // Set previous carry = new carry 7074 movl(prev_carry, new_carry); 7075 } 7076 jmp(L_fifth_loop); 7077 7078 bind(L_fifth_loop_exit); 7079 } 7080 7081 7082 /** 7083 * Code for BigInteger::squareToLen() intrinsic 7084 * 7085 * rdi: x 7086 * rsi: len 7087 * r8: z 7088 * rcx: zlen 7089 * r12: tmp1 7090 * r13: tmp2 7091 * r14: tmp3 7092 * r15: tmp4 7093 * rbx: tmp5 7094 * 7095 */ 7096 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7097 7098 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7099 push(tmp1); 7100 push(tmp2); 7101 push(tmp3); 7102 push(tmp4); 7103 push(tmp5); 7104 7105 // First loop 7106 // Store the squares, right shifted one bit (i.e., divided by 2). 7107 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7108 7109 // Add in off-diagonal sums. 7110 // 7111 // Second, third (nested) and fourth loops. 7112 // zlen +=2; 7113 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7114 // carry = 0; 7115 // long op2 = x[xidx:xidx+1]; 7116 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7117 // k -= 2; 7118 // long op1 = x[j:j+1]; 7119 // long sum = z[k:k+1]; 7120 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7121 // z[k:k+1] = sum; 7122 // } 7123 // add_one_64(z, k, carry, tmp_regs); 7124 // } 7125 7126 const Register carry = tmp5; 7127 const Register sum = tmp3; 7128 const Register op1 = tmp4; 7129 Register op2 = tmp2; 7130 7131 push(zlen); 7132 push(len); 7133 addl(zlen,2); 7134 bind(L_second_loop); 7135 xorq(carry, carry); 7136 subl(zlen, 4); 7137 subl(len, 2); 7138 push(zlen); 7139 push(len); 7140 cmpl(len, 0); 7141 jccb(Assembler::lessEqual, L_second_loop_exit); 7142 7143 // Multiply an array by one 64 bit long. 7144 if (UseBMI2Instructions) { 7145 op2 = rdxReg; 7146 movq(op2, Address(x, len, Address::times_4, 0)); 7147 rorxq(op2, op2, 32); 7148 } 7149 else { 7150 movq(op2, Address(x, len, Address::times_4, 0)); 7151 rorq(op2, 32); 7152 } 7153 7154 bind(L_third_loop); 7155 decrementl(len); 7156 jccb(Assembler::negative, L_third_loop_exit); 7157 decrementl(len); 7158 jccb(Assembler::negative, L_last_x); 7159 7160 movq(op1, Address(x, len, Address::times_4, 0)); 7161 rorq(op1, 32); 7162 7163 bind(L_multiply); 7164 subl(zlen, 2); 7165 movq(sum, Address(z, zlen, Address::times_4, 0)); 7166 7167 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7168 if (UseBMI2Instructions) { 7169 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7170 } 7171 else { 7172 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7173 } 7174 7175 movq(Address(z, zlen, Address::times_4, 0), sum); 7176 7177 jmp(L_third_loop); 7178 bind(L_third_loop_exit); 7179 7180 // Fourth loop 7181 // Add 64 bit long carry into z with carry propagation. 7182 // Uses offsetted zlen. 7183 add_one_64(z, zlen, carry, tmp1); 7184 7185 pop(len); 7186 pop(zlen); 7187 jmp(L_second_loop); 7188 7189 // Next infrequent code is moved outside loops. 7190 bind(L_last_x); 7191 movl(op1, Address(x, 0)); 7192 jmp(L_multiply); 7193 7194 bind(L_second_loop_exit); 7195 pop(len); 7196 pop(zlen); 7197 pop(len); 7198 pop(zlen); 7199 7200 // Fifth loop 7201 // Shift z left 1 bit. 7202 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7203 7204 // z[zlen-1] |= x[len-1] & 1; 7205 movl(tmp3, Address(x, len, Address::times_4, -4)); 7206 andl(tmp3, 1); 7207 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7208 7209 pop(tmp5); 7210 pop(tmp4); 7211 pop(tmp3); 7212 pop(tmp2); 7213 pop(tmp1); 7214 } 7215 7216 /** 7217 * Helper function for mul_add() 7218 * Multiply the in[] by int k and add to out[] starting at offset offs using 7219 * 128 bit by 32 bit multiply and return the carry in tmp5. 7220 * Only quad int aligned length of in[] is operated on in this function. 7221 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7222 * This function preserves out, in and k registers. 7223 * len and offset point to the appropriate index in "in" & "out" correspondingly 7224 * tmp5 has the carry. 7225 * other registers are temporary and are modified. 7226 * 7227 */ 7228 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7229 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7230 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7231 7232 Label L_first_loop, L_first_loop_exit; 7233 7234 movl(tmp1, len); 7235 shrl(tmp1, 2); 7236 7237 bind(L_first_loop); 7238 subl(tmp1, 1); 7239 jccb(Assembler::negative, L_first_loop_exit); 7240 7241 subl(len, 4); 7242 subl(offset, 4); 7243 7244 Register op2 = tmp2; 7245 const Register sum = tmp3; 7246 const Register op1 = tmp4; 7247 const Register carry = tmp5; 7248 7249 if (UseBMI2Instructions) { 7250 op2 = rdxReg; 7251 } 7252 7253 movq(op1, Address(in, len, Address::times_4, 8)); 7254 rorq(op1, 32); 7255 movq(sum, Address(out, offset, Address::times_4, 8)); 7256 rorq(sum, 32); 7257 if (UseBMI2Instructions) { 7258 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7259 } 7260 else { 7261 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7262 } 7263 // Store back in big endian from little endian 7264 rorq(sum, 0x20); 7265 movq(Address(out, offset, Address::times_4, 8), sum); 7266 7267 movq(op1, Address(in, len, Address::times_4, 0)); 7268 rorq(op1, 32); 7269 movq(sum, Address(out, offset, Address::times_4, 0)); 7270 rorq(sum, 32); 7271 if (UseBMI2Instructions) { 7272 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7273 } 7274 else { 7275 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7276 } 7277 // Store back in big endian from little endian 7278 rorq(sum, 0x20); 7279 movq(Address(out, offset, Address::times_4, 0), sum); 7280 7281 jmp(L_first_loop); 7282 bind(L_first_loop_exit); 7283 } 7284 7285 /** 7286 * Code for BigInteger::mulAdd() intrinsic 7287 * 7288 * rdi: out 7289 * rsi: in 7290 * r11: offs (out.length - offset) 7291 * rcx: len 7292 * r8: k 7293 * r12: tmp1 7294 * r13: tmp2 7295 * r14: tmp3 7296 * r15: tmp4 7297 * rbx: tmp5 7298 * Multiply the in[] by word k and add to out[], return the carry in rax 7299 */ 7300 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7301 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7302 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7303 7304 Label L_carry, L_last_in, L_done; 7305 7306 // carry = 0; 7307 // for (int j=len-1; j >= 0; j--) { 7308 // long product = (in[j] & LONG_MASK) * kLong + 7309 // (out[offs] & LONG_MASK) + carry; 7310 // out[offs--] = (int)product; 7311 // carry = product >>> 32; 7312 // } 7313 // 7314 push(tmp1); 7315 push(tmp2); 7316 push(tmp3); 7317 push(tmp4); 7318 push(tmp5); 7319 7320 Register op2 = tmp2; 7321 const Register sum = tmp3; 7322 const Register op1 = tmp4; 7323 const Register carry = tmp5; 7324 7325 if (UseBMI2Instructions) { 7326 op2 = rdxReg; 7327 movl(op2, k); 7328 } 7329 else { 7330 movl(op2, k); 7331 } 7332 7333 xorq(carry, carry); 7334 7335 //First loop 7336 7337 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7338 //The carry is in tmp5 7339 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7340 7341 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7342 decrementl(len); 7343 jccb(Assembler::negative, L_carry); 7344 decrementl(len); 7345 jccb(Assembler::negative, L_last_in); 7346 7347 movq(op1, Address(in, len, Address::times_4, 0)); 7348 rorq(op1, 32); 7349 7350 subl(offs, 2); 7351 movq(sum, Address(out, offs, Address::times_4, 0)); 7352 rorq(sum, 32); 7353 7354 if (UseBMI2Instructions) { 7355 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7356 } 7357 else { 7358 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7359 } 7360 7361 // Store back in big endian from little endian 7362 rorq(sum, 0x20); 7363 movq(Address(out, offs, Address::times_4, 0), sum); 7364 7365 testl(len, len); 7366 jccb(Assembler::zero, L_carry); 7367 7368 //Multiply the last in[] entry, if any 7369 bind(L_last_in); 7370 movl(op1, Address(in, 0)); 7371 movl(sum, Address(out, offs, Address::times_4, -4)); 7372 7373 movl(raxReg, k); 7374 mull(op1); //tmp4 * eax -> edx:eax 7375 addl(sum, carry); 7376 adcl(rdxReg, 0); 7377 addl(sum, raxReg); 7378 adcl(rdxReg, 0); 7379 movl(carry, rdxReg); 7380 7381 movl(Address(out, offs, Address::times_4, -4), sum); 7382 7383 bind(L_carry); 7384 //return tmp5/carry as carry in rax 7385 movl(rax, carry); 7386 7387 bind(L_done); 7388 pop(tmp5); 7389 pop(tmp4); 7390 pop(tmp3); 7391 pop(tmp2); 7392 pop(tmp1); 7393 } 7394 7395 /** 7396 * Emits code to update CRC-32 with a byte value according to constants in table 7397 * 7398 * @param [in,out]crc Register containing the crc. 7399 * @param [in]val Register containing the byte to fold into the CRC. 7400 * @param [in]table Register containing the table of crc constants. 7401 * 7402 * uint32_t crc; 7403 * val = crc_table[(val ^ crc) & 0xFF]; 7404 * crc = val ^ (crc >> 8); 7405 * 7406 */ 7407 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7408 xorl(val, crc); 7409 andl(val, 0xFF); 7410 shrl(crc, 8); // unsigned shift 7411 xorl(crc, Address(table, val, Address::times_4, 0)); 7412 } 7413 7414 /** 7415 * Fold 128-bit data chunk 7416 */ 7417 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7418 if (UseAVX > 0) { 7419 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7420 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7421 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7422 pxor(xcrc, xtmp); 7423 } else { 7424 movdqa(xtmp, xcrc); 7425 pclmulhdq(xtmp, xK); // [123:64] 7426 pclmulldq(xcrc, xK); // [63:0] 7427 pxor(xcrc, xtmp); 7428 movdqu(xtmp, Address(buf, offset)); 7429 pxor(xcrc, xtmp); 7430 } 7431 } 7432 7433 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7434 if (UseAVX > 0) { 7435 vpclmulhdq(xtmp, xK, xcrc); 7436 vpclmulldq(xcrc, xK, xcrc); 7437 pxor(xcrc, xbuf); 7438 pxor(xcrc, xtmp); 7439 } else { 7440 movdqa(xtmp, xcrc); 7441 pclmulhdq(xtmp, xK); 7442 pclmulldq(xcrc, xK); 7443 pxor(xcrc, xbuf); 7444 pxor(xcrc, xtmp); 7445 } 7446 } 7447 7448 /** 7449 * 8-bit folds to compute 32-bit CRC 7450 * 7451 * uint64_t xcrc; 7452 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7453 */ 7454 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7455 movdl(tmp, xcrc); 7456 andl(tmp, 0xFF); 7457 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7458 psrldq(xcrc, 1); // unsigned shift one byte 7459 pxor(xcrc, xtmp); 7460 } 7461 7462 /** 7463 * uint32_t crc; 7464 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7465 */ 7466 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7467 movl(tmp, crc); 7468 andl(tmp, 0xFF); 7469 shrl(crc, 8); 7470 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7471 } 7472 7473 /** 7474 * @param crc register containing existing CRC (32-bit) 7475 * @param buf register pointing to input byte buffer (byte*) 7476 * @param len register containing number of bytes 7477 * @param table register that will contain address of CRC table 7478 * @param tmp scratch register 7479 */ 7480 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7481 assert_different_registers(crc, buf, len, table, tmp, rax); 7482 7483 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7484 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7485 7486 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7487 // context for the registers used, where all instructions below are using 128-bit mode 7488 // On EVEX without VL and BW, these instructions will all be AVX. 7489 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7490 notl(crc); // ~crc 7491 cmpl(len, 16); 7492 jcc(Assembler::less, L_tail); 7493 7494 // Align buffer to 16 bytes 7495 movl(tmp, buf); 7496 andl(tmp, 0xF); 7497 jccb(Assembler::zero, L_aligned); 7498 subl(tmp, 16); 7499 addl(len, tmp); 7500 7501 align(4); 7502 BIND(L_align_loop); 7503 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7504 update_byte_crc32(crc, rax, table); 7505 increment(buf); 7506 incrementl(tmp); 7507 jccb(Assembler::less, L_align_loop); 7508 7509 BIND(L_aligned); 7510 movl(tmp, len); // save 7511 shrl(len, 4); 7512 jcc(Assembler::zero, L_tail_restore); 7513 7514 // Fold crc into first bytes of vector 7515 movdqa(xmm1, Address(buf, 0)); 7516 movdl(rax, xmm1); 7517 xorl(crc, rax); 7518 if (VM_Version::supports_sse4_1()) { 7519 pinsrd(xmm1, crc, 0); 7520 } else { 7521 pinsrw(xmm1, crc, 0); 7522 shrl(crc, 16); 7523 pinsrw(xmm1, crc, 1); 7524 } 7525 addptr(buf, 16); 7526 subl(len, 4); // len > 0 7527 jcc(Assembler::less, L_fold_tail); 7528 7529 movdqa(xmm2, Address(buf, 0)); 7530 movdqa(xmm3, Address(buf, 16)); 7531 movdqa(xmm4, Address(buf, 32)); 7532 addptr(buf, 48); 7533 subl(len, 3); 7534 jcc(Assembler::lessEqual, L_fold_512b); 7535 7536 // Fold total 512 bits of polynomial on each iteration, 7537 // 128 bits per each of 4 parallel streams. 7538 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 7539 7540 align32(); 7541 BIND(L_fold_512b_loop); 7542 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7543 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 7544 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 7545 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 7546 addptr(buf, 64); 7547 subl(len, 4); 7548 jcc(Assembler::greater, L_fold_512b_loop); 7549 7550 // Fold 512 bits to 128 bits. 7551 BIND(L_fold_512b); 7552 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7553 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 7554 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 7555 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 7556 7557 // Fold the rest of 128 bits data chunks 7558 BIND(L_fold_tail); 7559 addl(len, 3); 7560 jccb(Assembler::lessEqual, L_fold_128b); 7561 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7562 7563 BIND(L_fold_tail_loop); 7564 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7565 addptr(buf, 16); 7566 decrementl(len); 7567 jccb(Assembler::greater, L_fold_tail_loop); 7568 7569 // Fold 128 bits in xmm1 down into 32 bits in crc register. 7570 BIND(L_fold_128b); 7571 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 7572 if (UseAVX > 0) { 7573 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 7574 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 7575 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 7576 } else { 7577 movdqa(xmm2, xmm0); 7578 pclmulqdq(xmm2, xmm1, 0x1); 7579 movdqa(xmm3, xmm0); 7580 pand(xmm3, xmm2); 7581 pclmulqdq(xmm0, xmm3, 0x1); 7582 } 7583 psrldq(xmm1, 8); 7584 psrldq(xmm2, 4); 7585 pxor(xmm0, xmm1); 7586 pxor(xmm0, xmm2); 7587 7588 // 8 8-bit folds to compute 32-bit CRC. 7589 for (int j = 0; j < 4; j++) { 7590 fold_8bit_crc32(xmm0, table, xmm1, rax); 7591 } 7592 movdl(crc, xmm0); // mov 32 bits to general register 7593 for (int j = 0; j < 4; j++) { 7594 fold_8bit_crc32(crc, table, rax); 7595 } 7596 7597 BIND(L_tail_restore); 7598 movl(len, tmp); // restore 7599 BIND(L_tail); 7600 andl(len, 0xf); 7601 jccb(Assembler::zero, L_exit); 7602 7603 // Fold the rest of bytes 7604 align(4); 7605 BIND(L_tail_loop); 7606 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7607 update_byte_crc32(crc, rax, table); 7608 increment(buf); 7609 decrementl(len); 7610 jccb(Assembler::greater, L_tail_loop); 7611 7612 BIND(L_exit); 7613 notl(crc); // ~c 7614 } 7615 7616 // Helper function for AVX 512 CRC32 7617 // Fold 512-bit data chunks 7618 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 7619 Register pos, int offset) { 7620 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 7621 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 7622 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 7623 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 7624 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 7625 } 7626 7627 // Helper function for AVX 512 CRC32 7628 // Compute CRC32 for < 256B buffers 7629 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 7630 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 7631 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 7632 7633 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 7634 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 7635 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 7636 7637 // check if there is enough buffer to be able to fold 16B at a time 7638 cmpl(len, 32); 7639 jcc(Assembler::less, L_less_than_32); 7640 7641 // if there is, load the constants 7642 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 7643 movdl(xmm0, crc); // get the initial crc value 7644 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7645 pxor(xmm7, xmm0); 7646 7647 // update the buffer pointer 7648 addl(pos, 16); 7649 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 7650 subl(len, 32); 7651 jmp(L_16B_reduction_loop); 7652 7653 bind(L_less_than_32); 7654 //mov initial crc to the return value. this is necessary for zero - length buffers. 7655 movl(rax, crc); 7656 testl(len, len); 7657 jcc(Assembler::equal, L_cleanup); 7658 7659 movdl(xmm0, crc); //get the initial crc value 7660 7661 cmpl(len, 16); 7662 jcc(Assembler::equal, L_exact_16_left); 7663 jcc(Assembler::less, L_less_than_16_left); 7664 7665 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7666 pxor(xmm7, xmm0); //xor the initial crc value 7667 addl(pos, 16); 7668 subl(len, 16); 7669 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 7670 jmp(L_get_last_two_xmms); 7671 7672 bind(L_less_than_16_left); 7673 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 7674 pxor(xmm1, xmm1); 7675 movptr(tmp1, rsp); 7676 movdqu(Address(tmp1, 0 * 16), xmm1); 7677 7678 cmpl(len, 4); 7679 jcc(Assembler::less, L_only_less_than_4); 7680 7681 //backup the counter value 7682 movl(tmp2, len); 7683 cmpl(len, 8); 7684 jcc(Assembler::less, L_less_than_8_left); 7685 7686 //load 8 Bytes 7687 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 7688 movq(Address(tmp1, 0 * 16), rax); 7689 addptr(tmp1, 8); 7690 subl(len, 8); 7691 addl(pos, 8); 7692 7693 bind(L_less_than_8_left); 7694 cmpl(len, 4); 7695 jcc(Assembler::less, L_less_than_4_left); 7696 7697 //load 4 Bytes 7698 movl(rax, Address(buf, pos, Address::times_1, 0)); 7699 movl(Address(tmp1, 0 * 16), rax); 7700 addptr(tmp1, 4); 7701 subl(len, 4); 7702 addl(pos, 4); 7703 7704 bind(L_less_than_4_left); 7705 cmpl(len, 2); 7706 jcc(Assembler::less, L_less_than_2_left); 7707 7708 // load 2 Bytes 7709 movw(rax, Address(buf, pos, Address::times_1, 0)); 7710 movl(Address(tmp1, 0 * 16), rax); 7711 addptr(tmp1, 2); 7712 subl(len, 2); 7713 addl(pos, 2); 7714 7715 bind(L_less_than_2_left); 7716 cmpl(len, 1); 7717 jcc(Assembler::less, L_zero_left); 7718 7719 // load 1 Byte 7720 movb(rax, Address(buf, pos, Address::times_1, 0)); 7721 movb(Address(tmp1, 0 * 16), rax); 7722 7723 bind(L_zero_left); 7724 movdqu(xmm7, Address(rsp, 0)); 7725 pxor(xmm7, xmm0); //xor the initial crc value 7726 7727 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7728 movdqu(xmm0, Address(rax, tmp2)); 7729 pshufb(xmm7, xmm0); 7730 jmp(L_128_done); 7731 7732 bind(L_exact_16_left); 7733 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 7734 pxor(xmm7, xmm0); //xor the initial crc value 7735 jmp(L_128_done); 7736 7737 bind(L_only_less_than_4); 7738 cmpl(len, 3); 7739 jcc(Assembler::less, L_only_less_than_3); 7740 7741 // load 3 Bytes 7742 movb(rax, Address(buf, pos, Address::times_1, 0)); 7743 movb(Address(tmp1, 0), rax); 7744 7745 movb(rax, Address(buf, pos, Address::times_1, 1)); 7746 movb(Address(tmp1, 1), rax); 7747 7748 movb(rax, Address(buf, pos, Address::times_1, 2)); 7749 movb(Address(tmp1, 2), rax); 7750 7751 movdqu(xmm7, Address(rsp, 0)); 7752 pxor(xmm7, xmm0); //xor the initial crc value 7753 7754 pslldq(xmm7, 0x5); 7755 jmp(L_barrett); 7756 bind(L_only_less_than_3); 7757 cmpl(len, 2); 7758 jcc(Assembler::less, L_only_less_than_2); 7759 7760 // load 2 Bytes 7761 movb(rax, Address(buf, pos, Address::times_1, 0)); 7762 movb(Address(tmp1, 0), rax); 7763 7764 movb(rax, Address(buf, pos, Address::times_1, 1)); 7765 movb(Address(tmp1, 1), rax); 7766 7767 movdqu(xmm7, Address(rsp, 0)); 7768 pxor(xmm7, xmm0); //xor the initial crc value 7769 7770 pslldq(xmm7, 0x6); 7771 jmp(L_barrett); 7772 7773 bind(L_only_less_than_2); 7774 //load 1 Byte 7775 movb(rax, Address(buf, pos, Address::times_1, 0)); 7776 movb(Address(tmp1, 0), rax); 7777 7778 movdqu(xmm7, Address(rsp, 0)); 7779 pxor(xmm7, xmm0); //xor the initial crc value 7780 7781 pslldq(xmm7, 0x7); 7782 } 7783 7784 /** 7785 * Compute CRC32 using AVX512 instructions 7786 * param crc register containing existing CRC (32-bit) 7787 * param buf register pointing to input byte buffer (byte*) 7788 * param len register containing number of bytes 7789 * param table address of crc or crc32c table 7790 * param tmp1 scratch register 7791 * param tmp2 scratch register 7792 * return rax result register 7793 * 7794 * This routine is identical for crc32c with the exception of the precomputed constant 7795 * table which will be passed as the table argument. The calculation steps are 7796 * the same for both variants. 7797 */ 7798 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 7799 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 7800 7801 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7802 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7803 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 7804 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 7805 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 7806 7807 const Register pos = r12; 7808 push(r12); 7809 subptr(rsp, 16 * 2 + 8); 7810 7811 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7812 // context for the registers used, where all instructions below are using 128-bit mode 7813 // On EVEX without VL and BW, these instructions will all be AVX. 7814 movl(pos, 0); 7815 7816 // check if smaller than 256B 7817 cmpl(len, 256); 7818 jcc(Assembler::less, L_less_than_256); 7819 7820 // load the initial crc value 7821 movdl(xmm10, crc); 7822 7823 // receive the initial 64B data, xor the initial crc value 7824 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 7825 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 7826 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 7827 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 7828 7829 subl(len, 256); 7830 cmpl(len, 256); 7831 jcc(Assembler::less, L_fold_128_B_loop); 7832 7833 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 7834 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 7835 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 7836 subl(len, 256); 7837 7838 bind(L_fold_256_B_loop); 7839 addl(pos, 256); 7840 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 7841 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 7842 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 7843 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 7844 7845 subl(len, 256); 7846 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 7847 7848 // Fold 256 into 128 7849 addl(pos, 256); 7850 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 7851 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 7852 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 7853 7854 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 7855 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 7856 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 7857 7858 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 7859 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 7860 7861 addl(len, 128); 7862 jmp(L_fold_128_B_register); 7863 7864 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 7865 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 7866 7867 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 7868 bind(L_fold_128_B_loop); 7869 addl(pos, 128); 7870 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 7871 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 7872 7873 subl(len, 128); 7874 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 7875 7876 addl(pos, 128); 7877 7878 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 7879 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 7880 bind(L_fold_128_B_register); 7881 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 7882 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 7883 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 7884 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 7885 // save last that has no multiplicand 7886 vextracti64x2(xmm7, xmm4, 3); 7887 7888 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 7889 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 7890 // Needed later in reduction loop 7891 movdqu(xmm10, Address(table, 1 * 16)); 7892 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 7893 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 7894 7895 // Swap 1,0,3,2 - 01 00 11 10 7896 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 7897 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 7898 vextracti128(xmm5, xmm8, 1); 7899 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 7900 7901 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 7902 // instead of a cmp instruction, we use the negative flag with the jl instruction 7903 addl(len, 128 - 16); 7904 jcc(Assembler::less, L_final_reduction_for_128); 7905 7906 bind(L_16B_reduction_loop); 7907 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7908 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7909 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7910 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 7911 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7912 addl(pos, 16); 7913 subl(len, 16); 7914 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 7915 7916 bind(L_final_reduction_for_128); 7917 addl(len, 16); 7918 jcc(Assembler::equal, L_128_done); 7919 7920 bind(L_get_last_two_xmms); 7921 movdqu(xmm2, xmm7); 7922 addl(pos, len); 7923 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 7924 subl(pos, len); 7925 7926 // get rid of the extra data that was loaded before 7927 // load the shift constant 7928 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7929 movdqu(xmm0, Address(rax, len)); 7930 addl(rax, len); 7931 7932 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7933 //Change mask to 512 7934 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 7935 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 7936 7937 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 7938 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7939 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7940 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7941 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 7942 7943 bind(L_128_done); 7944 // compute crc of a 128-bit value 7945 movdqu(xmm10, Address(table, 3 * 16)); 7946 movdqu(xmm0, xmm7); 7947 7948 // 64b fold 7949 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 7950 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 7951 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7952 7953 // 32b fold 7954 movdqu(xmm0, xmm7); 7955 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 7956 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7957 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7958 jmp(L_barrett); 7959 7960 bind(L_less_than_256); 7961 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 7962 7963 //barrett reduction 7964 bind(L_barrett); 7965 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 7966 movdqu(xmm1, xmm7); 7967 movdqu(xmm2, xmm7); 7968 movdqu(xmm10, Address(table, 4 * 16)); 7969 7970 pclmulqdq(xmm7, xmm10, 0x0); 7971 pxor(xmm7, xmm2); 7972 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 7973 movdqu(xmm2, xmm7); 7974 pclmulqdq(xmm7, xmm10, 0x10); 7975 pxor(xmm7, xmm2); 7976 pxor(xmm7, xmm1); 7977 pextrd(crc, xmm7, 2); 7978 7979 bind(L_cleanup); 7980 addptr(rsp, 16 * 2 + 8); 7981 pop(r12); 7982 } 7983 7984 // S. Gueron / Information Processing Letters 112 (2012) 184 7985 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 7986 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 7987 // Output: the 64-bit carry-less product of B * CONST 7988 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 7989 Register tmp1, Register tmp2, Register tmp3) { 7990 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 7991 if (n > 0) { 7992 addq(tmp3, n * 256 * 8); 7993 } 7994 // Q1 = TABLEExt[n][B & 0xFF]; 7995 movl(tmp1, in); 7996 andl(tmp1, 0x000000FF); 7997 shll(tmp1, 3); 7998 addq(tmp1, tmp3); 7999 movq(tmp1, Address(tmp1, 0)); 8000 8001 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8002 movl(tmp2, in); 8003 shrl(tmp2, 8); 8004 andl(tmp2, 0x000000FF); 8005 shll(tmp2, 3); 8006 addq(tmp2, tmp3); 8007 movq(tmp2, Address(tmp2, 0)); 8008 8009 shlq(tmp2, 8); 8010 xorq(tmp1, tmp2); 8011 8012 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8013 movl(tmp2, in); 8014 shrl(tmp2, 16); 8015 andl(tmp2, 0x000000FF); 8016 shll(tmp2, 3); 8017 addq(tmp2, tmp3); 8018 movq(tmp2, Address(tmp2, 0)); 8019 8020 shlq(tmp2, 16); 8021 xorq(tmp1, tmp2); 8022 8023 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8024 shrl(in, 24); 8025 andl(in, 0x000000FF); 8026 shll(in, 3); 8027 addq(in, tmp3); 8028 movq(in, Address(in, 0)); 8029 8030 shlq(in, 24); 8031 xorq(in, tmp1); 8032 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8033 } 8034 8035 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8036 Register in_out, 8037 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8038 XMMRegister w_xtmp2, 8039 Register tmp1, 8040 Register n_tmp2, Register n_tmp3) { 8041 if (is_pclmulqdq_supported) { 8042 movdl(w_xtmp1, in_out); // modified blindly 8043 8044 movl(tmp1, const_or_pre_comp_const_index); 8045 movdl(w_xtmp2, tmp1); 8046 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8047 8048 movdq(in_out, w_xtmp1); 8049 } else { 8050 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8051 } 8052 } 8053 8054 // Recombination Alternative 2: No bit-reflections 8055 // T1 = (CRC_A * U1) << 1 8056 // T2 = (CRC_B * U2) << 1 8057 // C1 = T1 >> 32 8058 // C2 = T2 >> 32 8059 // T1 = T1 & 0xFFFFFFFF 8060 // T2 = T2 & 0xFFFFFFFF 8061 // T1 = CRC32(0, T1) 8062 // T2 = CRC32(0, T2) 8063 // C1 = C1 ^ T1 8064 // C2 = C2 ^ T2 8065 // CRC = C1 ^ C2 ^ CRC_C 8066 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8067 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8068 Register tmp1, Register tmp2, 8069 Register n_tmp3) { 8070 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8071 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8072 shlq(in_out, 1); 8073 movl(tmp1, in_out); 8074 shrq(in_out, 32); 8075 xorl(tmp2, tmp2); 8076 crc32(tmp2, tmp1, 4); 8077 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8078 shlq(in1, 1); 8079 movl(tmp1, in1); 8080 shrq(in1, 32); 8081 xorl(tmp2, tmp2); 8082 crc32(tmp2, tmp1, 4); 8083 xorl(in1, tmp2); 8084 xorl(in_out, in1); 8085 xorl(in_out, in2); 8086 } 8087 8088 // Set N to predefined value 8089 // Subtract from a length of a buffer 8090 // execute in a loop: 8091 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8092 // for i = 1 to N do 8093 // CRC_A = CRC32(CRC_A, A[i]) 8094 // CRC_B = CRC32(CRC_B, B[i]) 8095 // CRC_C = CRC32(CRC_C, C[i]) 8096 // end for 8097 // Recombine 8098 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8099 Register in_out1, Register in_out2, Register in_out3, 8100 Register tmp1, Register tmp2, Register tmp3, 8101 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8102 Register tmp4, Register tmp5, 8103 Register n_tmp6) { 8104 Label L_processPartitions; 8105 Label L_processPartition; 8106 Label L_exit; 8107 8108 bind(L_processPartitions); 8109 cmpl(in_out1, 3 * size); 8110 jcc(Assembler::less, L_exit); 8111 xorl(tmp1, tmp1); 8112 xorl(tmp2, tmp2); 8113 movq(tmp3, in_out2); 8114 addq(tmp3, size); 8115 8116 bind(L_processPartition); 8117 crc32(in_out3, Address(in_out2, 0), 8); 8118 crc32(tmp1, Address(in_out2, size), 8); 8119 crc32(tmp2, Address(in_out2, size * 2), 8); 8120 addq(in_out2, 8); 8121 cmpq(in_out2, tmp3); 8122 jcc(Assembler::less, L_processPartition); 8123 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8124 w_xtmp1, w_xtmp2, w_xtmp3, 8125 tmp4, tmp5, 8126 n_tmp6); 8127 addq(in_out2, 2 * size); 8128 subl(in_out1, 3 * size); 8129 jmp(L_processPartitions); 8130 8131 bind(L_exit); 8132 } 8133 8134 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8135 // Input: A buffer I of L bytes. 8136 // Output: the CRC32C value of the buffer. 8137 // Notations: 8138 // Write L = 24N + r, with N = floor (L/24). 8139 // r = L mod 24 (0 <= r < 24). 8140 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8141 // N quadwords, and R consists of r bytes. 8142 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8143 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8144 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8145 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8146 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8147 Register tmp1, Register tmp2, Register tmp3, 8148 Register tmp4, Register tmp5, Register tmp6, 8149 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8150 bool is_pclmulqdq_supported) { 8151 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8152 Label L_wordByWord; 8153 Label L_byteByByteProlog; 8154 Label L_byteByByte; 8155 Label L_exit; 8156 8157 if (is_pclmulqdq_supported ) { 8158 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr(); 8159 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1); 8160 8161 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2); 8162 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3); 8163 8164 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4); 8165 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5); 8166 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8167 } else { 8168 const_or_pre_comp_const_index[0] = 1; 8169 const_or_pre_comp_const_index[1] = 0; 8170 8171 const_or_pre_comp_const_index[2] = 3; 8172 const_or_pre_comp_const_index[3] = 2; 8173 8174 const_or_pre_comp_const_index[4] = 5; 8175 const_or_pre_comp_const_index[5] = 4; 8176 } 8177 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8178 in2, in1, in_out, 8179 tmp1, tmp2, tmp3, 8180 w_xtmp1, w_xtmp2, w_xtmp3, 8181 tmp4, tmp5, 8182 tmp6); 8183 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8184 in2, in1, in_out, 8185 tmp1, tmp2, tmp3, 8186 w_xtmp1, w_xtmp2, w_xtmp3, 8187 tmp4, tmp5, 8188 tmp6); 8189 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8190 in2, in1, in_out, 8191 tmp1, tmp2, tmp3, 8192 w_xtmp1, w_xtmp2, w_xtmp3, 8193 tmp4, tmp5, 8194 tmp6); 8195 movl(tmp1, in2); 8196 andl(tmp1, 0x00000007); 8197 negl(tmp1); 8198 addl(tmp1, in2); 8199 addq(tmp1, in1); 8200 8201 cmpq(in1, tmp1); 8202 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8203 align(16); 8204 BIND(L_wordByWord); 8205 crc32(in_out, Address(in1, 0), 8); 8206 addq(in1, 8); 8207 cmpq(in1, tmp1); 8208 jcc(Assembler::less, L_wordByWord); 8209 8210 BIND(L_byteByByteProlog); 8211 andl(in2, 0x00000007); 8212 movl(tmp2, 1); 8213 8214 cmpl(tmp2, in2); 8215 jccb(Assembler::greater, L_exit); 8216 BIND(L_byteByByte); 8217 crc32(in_out, Address(in1, 0), 1); 8218 incq(in1); 8219 incl(tmp2); 8220 cmpl(tmp2, in2); 8221 jcc(Assembler::lessEqual, L_byteByByte); 8222 8223 BIND(L_exit); 8224 } 8225 #undef BIND 8226 #undef BLOCK_COMMENT 8227 8228 // Compress char[] array to byte[]. 8229 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 8230 // Return the array length if every element in array can be encoded, 8231 // otherwise, the index of first non-latin1 (> 0xff) character. 8232 // @IntrinsicCandidate 8233 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8234 // for (int i = 0; i < len; i++) { 8235 // char c = src[srcOff]; 8236 // if (c > 0xff) { 8237 // return i; // return index of non-latin1 char 8238 // } 8239 // dst[dstOff] = (byte)c; 8240 // srcOff++; 8241 // dstOff++; 8242 // } 8243 // return len; 8244 // } 8245 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8246 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8247 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8248 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8249 Label copy_chars_loop, done, reset_sp, copy_tail; 8250 8251 // rsi: src 8252 // rdi: dst 8253 // rdx: len 8254 // rcx: tmp5 8255 // rax: result 8256 8257 // rsi holds start addr of source char[] to be compressed 8258 // rdi holds start addr of destination byte[] 8259 // rdx holds length 8260 8261 assert(len != result, ""); 8262 8263 // save length for return 8264 movl(result, len); 8265 8266 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8267 VM_Version::supports_avx512vlbw() && 8268 VM_Version::supports_bmi2()) { 8269 8270 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 8271 8272 // alignment 8273 Label post_alignment; 8274 8275 // if length of the string is less than 32, handle it the old fashioned way 8276 testl(len, -32); 8277 jcc(Assembler::zero, below_threshold); 8278 8279 // First check whether a character is compressible ( <= 0xFF). 8280 // Create mask to test for Unicode chars inside zmm vector 8281 movl(tmp5, 0x00FF); 8282 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 8283 8284 testl(len, -64); 8285 jccb(Assembler::zero, post_alignment); 8286 8287 movl(tmp5, dst); 8288 andl(tmp5, (32 - 1)); 8289 negl(tmp5); 8290 andl(tmp5, (32 - 1)); 8291 8292 // bail out when there is nothing to be done 8293 testl(tmp5, 0xFFFFFFFF); 8294 jccb(Assembler::zero, post_alignment); 8295 8296 // ~(~0 << len), where len is the # of remaining elements to process 8297 movl(len, 0xFFFFFFFF); 8298 shlxl(len, len, tmp5); 8299 notl(len); 8300 kmovdl(mask2, len); 8301 movl(len, result); 8302 8303 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8304 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8305 ktestd(mask1, mask2); 8306 jcc(Assembler::carryClear, copy_tail); 8307 8308 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8309 8310 addptr(src, tmp5); 8311 addptr(src, tmp5); 8312 addptr(dst, tmp5); 8313 subl(len, tmp5); 8314 8315 bind(post_alignment); 8316 // end of alignment 8317 8318 movl(tmp5, len); 8319 andl(tmp5, (32 - 1)); // tail count (in chars) 8320 andl(len, ~(32 - 1)); // vector count (in chars) 8321 jccb(Assembler::zero, copy_loop_tail); 8322 8323 lea(src, Address(src, len, Address::times_2)); 8324 lea(dst, Address(dst, len, Address::times_1)); 8325 negptr(len); 8326 8327 bind(copy_32_loop); 8328 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 8329 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 8330 kortestdl(mask1, mask1); 8331 jccb(Assembler::carryClear, reset_for_copy_tail); 8332 8333 // All elements in current processed chunk are valid candidates for 8334 // compression. Write a truncated byte elements to the memory. 8335 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 8336 addptr(len, 32); 8337 jccb(Assembler::notZero, copy_32_loop); 8338 8339 bind(copy_loop_tail); 8340 // bail out when there is nothing to be done 8341 testl(tmp5, 0xFFFFFFFF); 8342 jcc(Assembler::zero, done); 8343 8344 movl(len, tmp5); 8345 8346 // ~(~0 << len), where len is the # of remaining elements to process 8347 movl(tmp5, 0xFFFFFFFF); 8348 shlxl(tmp5, tmp5, len); 8349 notl(tmp5); 8350 8351 kmovdl(mask2, tmp5); 8352 8353 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8354 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8355 ktestd(mask1, mask2); 8356 jcc(Assembler::carryClear, copy_tail); 8357 8358 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8359 jmp(done); 8360 8361 bind(reset_for_copy_tail); 8362 lea(src, Address(src, tmp5, Address::times_2)); 8363 lea(dst, Address(dst, tmp5, Address::times_1)); 8364 subptr(len, tmp5); 8365 jmp(copy_chars_loop); 8366 8367 bind(below_threshold); 8368 } 8369 8370 if (UseSSE42Intrinsics) { 8371 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 8372 8373 // vectored compression 8374 testl(len, 0xfffffff8); 8375 jcc(Assembler::zero, copy_tail); 8376 8377 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 8378 movdl(tmp1Reg, tmp5); 8379 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 8380 8381 andl(len, 0xfffffff0); 8382 jccb(Assembler::zero, copy_16); 8383 8384 // compress 16 chars per iter 8385 pxor(tmp4Reg, tmp4Reg); 8386 8387 lea(src, Address(src, len, Address::times_2)); 8388 lea(dst, Address(dst, len, Address::times_1)); 8389 negptr(len); 8390 8391 bind(copy_32_loop); 8392 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 8393 por(tmp4Reg, tmp2Reg); 8394 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 8395 por(tmp4Reg, tmp3Reg); 8396 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 8397 jccb(Assembler::notZero, reset_for_copy_tail); 8398 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 8399 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 8400 addptr(len, 16); 8401 jccb(Assembler::notZero, copy_32_loop); 8402 8403 // compress next vector of 8 chars (if any) 8404 bind(copy_16); 8405 // len = 0 8406 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 8407 jccb(Assembler::zero, copy_tail_sse); 8408 8409 pxor(tmp3Reg, tmp3Reg); 8410 8411 movdqu(tmp2Reg, Address(src, 0)); 8412 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8413 jccb(Assembler::notZero, reset_for_copy_tail); 8414 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 8415 movq(Address(dst, 0), tmp2Reg); 8416 addptr(src, 16); 8417 addptr(dst, 8); 8418 jmpb(copy_tail_sse); 8419 8420 bind(reset_for_copy_tail); 8421 movl(tmp5, result); 8422 andl(tmp5, 0x0000000f); 8423 lea(src, Address(src, tmp5, Address::times_2)); 8424 lea(dst, Address(dst, tmp5, Address::times_1)); 8425 subptr(len, tmp5); 8426 jmpb(copy_chars_loop); 8427 8428 bind(copy_tail_sse); 8429 movl(len, result); 8430 andl(len, 0x00000007); // tail count (in chars) 8431 } 8432 // compress 1 char per iter 8433 bind(copy_tail); 8434 testl(len, len); 8435 jccb(Assembler::zero, done); 8436 lea(src, Address(src, len, Address::times_2)); 8437 lea(dst, Address(dst, len, Address::times_1)); 8438 negptr(len); 8439 8440 bind(copy_chars_loop); 8441 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 8442 testl(tmp5, 0xff00); // check if Unicode char 8443 jccb(Assembler::notZero, reset_sp); 8444 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 8445 increment(len); 8446 jccb(Assembler::notZero, copy_chars_loop); 8447 8448 // add len then return (len will be zero if compress succeeded, otherwise negative) 8449 bind(reset_sp); 8450 addl(result, len); 8451 8452 bind(done); 8453 } 8454 8455 // Inflate byte[] array to char[]. 8456 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 8457 // @IntrinsicCandidate 8458 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 8459 // for (int i = 0; i < len; i++) { 8460 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 8461 // } 8462 // } 8463 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 8464 XMMRegister tmp1, Register tmp2, KRegister mask) { 8465 Label copy_chars_loop, done, below_threshold, avx3_threshold; 8466 // rsi: src 8467 // rdi: dst 8468 // rdx: len 8469 // rcx: tmp2 8470 8471 // rsi holds start addr of source byte[] to be inflated 8472 // rdi holds start addr of destination char[] 8473 // rdx holds length 8474 assert_different_registers(src, dst, len, tmp2); 8475 movl(tmp2, len); 8476 if ((UseAVX > 2) && // AVX512 8477 VM_Version::supports_avx512vlbw() && 8478 VM_Version::supports_bmi2()) { 8479 8480 Label copy_32_loop, copy_tail; 8481 Register tmp3_aliased = len; 8482 8483 // if length of the string is less than 16, handle it in an old fashioned way 8484 testl(len, -16); 8485 jcc(Assembler::zero, below_threshold); 8486 8487 testl(len, -1 * AVX3Threshold); 8488 jcc(Assembler::zero, avx3_threshold); 8489 8490 // In order to use only one arithmetic operation for the main loop we use 8491 // this pre-calculation 8492 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 8493 andl(len, -32); // vector count 8494 jccb(Assembler::zero, copy_tail); 8495 8496 lea(src, Address(src, len, Address::times_1)); 8497 lea(dst, Address(dst, len, Address::times_2)); 8498 negptr(len); 8499 8500 8501 // inflate 32 chars per iter 8502 bind(copy_32_loop); 8503 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 8504 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 8505 addptr(len, 32); 8506 jcc(Assembler::notZero, copy_32_loop); 8507 8508 bind(copy_tail); 8509 // bail out when there is nothing to be done 8510 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 8511 jcc(Assembler::zero, done); 8512 8513 // ~(~0 << length), where length is the # of remaining elements to process 8514 movl(tmp3_aliased, -1); 8515 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 8516 notl(tmp3_aliased); 8517 kmovdl(mask, tmp3_aliased); 8518 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 8519 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 8520 8521 jmp(done); 8522 bind(avx3_threshold); 8523 } 8524 if (UseSSE42Intrinsics) { 8525 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 8526 8527 if (UseAVX > 1) { 8528 andl(tmp2, (16 - 1)); 8529 andl(len, -16); 8530 jccb(Assembler::zero, copy_new_tail); 8531 } else { 8532 andl(tmp2, 0x00000007); // tail count (in chars) 8533 andl(len, 0xfffffff8); // vector count (in chars) 8534 jccb(Assembler::zero, copy_tail); 8535 } 8536 8537 // vectored inflation 8538 lea(src, Address(src, len, Address::times_1)); 8539 lea(dst, Address(dst, len, Address::times_2)); 8540 negptr(len); 8541 8542 if (UseAVX > 1) { 8543 bind(copy_16_loop); 8544 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 8545 vmovdqu(Address(dst, len, Address::times_2), tmp1); 8546 addptr(len, 16); 8547 jcc(Assembler::notZero, copy_16_loop); 8548 8549 bind(below_threshold); 8550 bind(copy_new_tail); 8551 movl(len, tmp2); 8552 andl(tmp2, 0x00000007); 8553 andl(len, 0xFFFFFFF8); 8554 jccb(Assembler::zero, copy_tail); 8555 8556 pmovzxbw(tmp1, Address(src, 0)); 8557 movdqu(Address(dst, 0), tmp1); 8558 addptr(src, 8); 8559 addptr(dst, 2 * 8); 8560 8561 jmp(copy_tail, true); 8562 } 8563 8564 // inflate 8 chars per iter 8565 bind(copy_8_loop); 8566 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 8567 movdqu(Address(dst, len, Address::times_2), tmp1); 8568 addptr(len, 8); 8569 jcc(Assembler::notZero, copy_8_loop); 8570 8571 bind(copy_tail); 8572 movl(len, tmp2); 8573 8574 cmpl(len, 4); 8575 jccb(Assembler::less, copy_bytes); 8576 8577 movdl(tmp1, Address(src, 0)); // load 4 byte chars 8578 pmovzxbw(tmp1, tmp1); 8579 movq(Address(dst, 0), tmp1); 8580 subptr(len, 4); 8581 addptr(src, 4); 8582 addptr(dst, 8); 8583 8584 bind(copy_bytes); 8585 } else { 8586 bind(below_threshold); 8587 } 8588 8589 testl(len, len); 8590 jccb(Assembler::zero, done); 8591 lea(src, Address(src, len, Address::times_1)); 8592 lea(dst, Address(dst, len, Address::times_2)); 8593 negptr(len); 8594 8595 // inflate 1 char per iter 8596 bind(copy_chars_loop); 8597 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 8598 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 8599 increment(len); 8600 jcc(Assembler::notZero, copy_chars_loop); 8601 8602 bind(done); 8603 } 8604 8605 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 8606 switch(type) { 8607 case T_BYTE: 8608 case T_BOOLEAN: 8609 evmovdqub(dst, kmask, src, merge, vector_len); 8610 break; 8611 case T_CHAR: 8612 case T_SHORT: 8613 evmovdquw(dst, kmask, src, merge, vector_len); 8614 break; 8615 case T_INT: 8616 case T_FLOAT: 8617 evmovdqul(dst, kmask, src, merge, vector_len); 8618 break; 8619 case T_LONG: 8620 case T_DOUBLE: 8621 evmovdquq(dst, kmask, src, merge, vector_len); 8622 break; 8623 default: 8624 fatal("Unexpected type argument %s", type2name(type)); 8625 break; 8626 } 8627 } 8628 8629 8630 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 8631 switch(type) { 8632 case T_BYTE: 8633 case T_BOOLEAN: 8634 evmovdqub(dst, kmask, src, merge, vector_len); 8635 break; 8636 case T_CHAR: 8637 case T_SHORT: 8638 evmovdquw(dst, kmask, src, merge, vector_len); 8639 break; 8640 case T_INT: 8641 case T_FLOAT: 8642 evmovdqul(dst, kmask, src, merge, vector_len); 8643 break; 8644 case T_LONG: 8645 case T_DOUBLE: 8646 evmovdquq(dst, kmask, src, merge, vector_len); 8647 break; 8648 default: 8649 fatal("Unexpected type argument %s", type2name(type)); 8650 break; 8651 } 8652 } 8653 8654 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 8655 switch(type) { 8656 case T_BYTE: 8657 case T_BOOLEAN: 8658 evmovdqub(dst, kmask, src, merge, vector_len); 8659 break; 8660 case T_CHAR: 8661 case T_SHORT: 8662 evmovdquw(dst, kmask, src, merge, vector_len); 8663 break; 8664 case T_INT: 8665 case T_FLOAT: 8666 evmovdqul(dst, kmask, src, merge, vector_len); 8667 break; 8668 case T_LONG: 8669 case T_DOUBLE: 8670 evmovdquq(dst, kmask, src, merge, vector_len); 8671 break; 8672 default: 8673 fatal("Unexpected type argument %s", type2name(type)); 8674 break; 8675 } 8676 } 8677 8678 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 8679 switch(masklen) { 8680 case 2: 8681 knotbl(dst, src); 8682 movl(rtmp, 3); 8683 kmovbl(ktmp, rtmp); 8684 kandbl(dst, ktmp, dst); 8685 break; 8686 case 4: 8687 knotbl(dst, src); 8688 movl(rtmp, 15); 8689 kmovbl(ktmp, rtmp); 8690 kandbl(dst, ktmp, dst); 8691 break; 8692 case 8: 8693 knotbl(dst, src); 8694 break; 8695 case 16: 8696 knotwl(dst, src); 8697 break; 8698 case 32: 8699 knotdl(dst, src); 8700 break; 8701 case 64: 8702 knotql(dst, src); 8703 break; 8704 default: 8705 fatal("Unexpected vector length %d", masklen); 8706 break; 8707 } 8708 } 8709 8710 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8711 switch(type) { 8712 case T_BOOLEAN: 8713 case T_BYTE: 8714 kandbl(dst, src1, src2); 8715 break; 8716 case T_CHAR: 8717 case T_SHORT: 8718 kandwl(dst, src1, src2); 8719 break; 8720 case T_INT: 8721 case T_FLOAT: 8722 kanddl(dst, src1, src2); 8723 break; 8724 case T_LONG: 8725 case T_DOUBLE: 8726 kandql(dst, src1, src2); 8727 break; 8728 default: 8729 fatal("Unexpected type argument %s", type2name(type)); 8730 break; 8731 } 8732 } 8733 8734 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8735 switch(type) { 8736 case T_BOOLEAN: 8737 case T_BYTE: 8738 korbl(dst, src1, src2); 8739 break; 8740 case T_CHAR: 8741 case T_SHORT: 8742 korwl(dst, src1, src2); 8743 break; 8744 case T_INT: 8745 case T_FLOAT: 8746 kordl(dst, src1, src2); 8747 break; 8748 case T_LONG: 8749 case T_DOUBLE: 8750 korql(dst, src1, src2); 8751 break; 8752 default: 8753 fatal("Unexpected type argument %s", type2name(type)); 8754 break; 8755 } 8756 } 8757 8758 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8759 switch(type) { 8760 case T_BOOLEAN: 8761 case T_BYTE: 8762 kxorbl(dst, src1, src2); 8763 break; 8764 case T_CHAR: 8765 case T_SHORT: 8766 kxorwl(dst, src1, src2); 8767 break; 8768 case T_INT: 8769 case T_FLOAT: 8770 kxordl(dst, src1, src2); 8771 break; 8772 case T_LONG: 8773 case T_DOUBLE: 8774 kxorql(dst, src1, src2); 8775 break; 8776 default: 8777 fatal("Unexpected type argument %s", type2name(type)); 8778 break; 8779 } 8780 } 8781 8782 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8783 switch(type) { 8784 case T_BOOLEAN: 8785 case T_BYTE: 8786 evpermb(dst, mask, nds, src, merge, vector_len); break; 8787 case T_CHAR: 8788 case T_SHORT: 8789 evpermw(dst, mask, nds, src, merge, vector_len); break; 8790 case T_INT: 8791 case T_FLOAT: 8792 evpermd(dst, mask, nds, src, merge, vector_len); break; 8793 case T_LONG: 8794 case T_DOUBLE: 8795 evpermq(dst, mask, nds, src, merge, vector_len); break; 8796 default: 8797 fatal("Unexpected type argument %s", type2name(type)); break; 8798 } 8799 } 8800 8801 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8802 switch(type) { 8803 case T_BOOLEAN: 8804 case T_BYTE: 8805 evpermb(dst, mask, nds, src, merge, vector_len); break; 8806 case T_CHAR: 8807 case T_SHORT: 8808 evpermw(dst, mask, nds, src, merge, vector_len); break; 8809 case T_INT: 8810 case T_FLOAT: 8811 evpermd(dst, mask, nds, src, merge, vector_len); break; 8812 case T_LONG: 8813 case T_DOUBLE: 8814 evpermq(dst, mask, nds, src, merge, vector_len); break; 8815 default: 8816 fatal("Unexpected type argument %s", type2name(type)); break; 8817 } 8818 } 8819 8820 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8821 switch(type) { 8822 case T_BYTE: 8823 evpminub(dst, mask, nds, src, merge, vector_len); break; 8824 case T_SHORT: 8825 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8826 case T_INT: 8827 evpminud(dst, mask, nds, src, merge, vector_len); break; 8828 case T_LONG: 8829 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8830 default: 8831 fatal("Unexpected type argument %s", type2name(type)); break; 8832 } 8833 } 8834 8835 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8836 switch(type) { 8837 case T_BYTE: 8838 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8839 case T_SHORT: 8840 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8841 case T_INT: 8842 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8843 case T_LONG: 8844 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8845 default: 8846 fatal("Unexpected type argument %s", type2name(type)); break; 8847 } 8848 } 8849 8850 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8851 switch(type) { 8852 case T_BYTE: 8853 evpminub(dst, mask, nds, src, merge, vector_len); break; 8854 case T_SHORT: 8855 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8856 case T_INT: 8857 evpminud(dst, mask, nds, src, merge, vector_len); break; 8858 case T_LONG: 8859 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8860 default: 8861 fatal("Unexpected type argument %s", type2name(type)); break; 8862 } 8863 } 8864 8865 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8866 switch(type) { 8867 case T_BYTE: 8868 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8869 case T_SHORT: 8870 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8871 case T_INT: 8872 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8873 case T_LONG: 8874 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8875 default: 8876 fatal("Unexpected type argument %s", type2name(type)); break; 8877 } 8878 } 8879 8880 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8881 switch(type) { 8882 case T_BYTE: 8883 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8884 case T_SHORT: 8885 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8886 case T_INT: 8887 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8888 case T_LONG: 8889 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8890 case T_FLOAT: 8891 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8892 case T_DOUBLE: 8893 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8894 default: 8895 fatal("Unexpected type argument %s", type2name(type)); break; 8896 } 8897 } 8898 8899 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8900 switch(type) { 8901 case T_BYTE: 8902 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8903 case T_SHORT: 8904 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8905 case T_INT: 8906 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8907 case T_LONG: 8908 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8909 case T_FLOAT: 8910 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8911 case T_DOUBLE: 8912 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8913 default: 8914 fatal("Unexpected type argument %s", type2name(type)); break; 8915 } 8916 } 8917 8918 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8919 switch(type) { 8920 case T_BYTE: 8921 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8922 case T_SHORT: 8923 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8924 case T_INT: 8925 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8926 case T_LONG: 8927 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8928 case T_FLOAT: 8929 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8930 case T_DOUBLE: 8931 evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break; 8932 default: 8933 fatal("Unexpected type argument %s", type2name(type)); break; 8934 } 8935 } 8936 8937 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8938 switch(type) { 8939 case T_BYTE: 8940 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8941 case T_SHORT: 8942 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8943 case T_INT: 8944 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8945 case T_LONG: 8946 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8947 case T_FLOAT: 8948 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8949 case T_DOUBLE: 8950 evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break; 8951 default: 8952 fatal("Unexpected type argument %s", type2name(type)); break; 8953 } 8954 } 8955 8956 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8957 switch(type) { 8958 case T_INT: 8959 evpxord(dst, mask, nds, src, merge, vector_len); break; 8960 case T_LONG: 8961 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8962 default: 8963 fatal("Unexpected type argument %s", type2name(type)); break; 8964 } 8965 } 8966 8967 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8968 switch(type) { 8969 case T_INT: 8970 evpxord(dst, mask, nds, src, merge, vector_len); break; 8971 case T_LONG: 8972 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8973 default: 8974 fatal("Unexpected type argument %s", type2name(type)); break; 8975 } 8976 } 8977 8978 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8979 switch(type) { 8980 case T_INT: 8981 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8982 case T_LONG: 8983 evporq(dst, mask, nds, src, merge, vector_len); break; 8984 default: 8985 fatal("Unexpected type argument %s", type2name(type)); break; 8986 } 8987 } 8988 8989 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8990 switch(type) { 8991 case T_INT: 8992 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8993 case T_LONG: 8994 evporq(dst, mask, nds, src, merge, vector_len); break; 8995 default: 8996 fatal("Unexpected type argument %s", type2name(type)); break; 8997 } 8998 } 8999 9000 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9001 switch(type) { 9002 case T_INT: 9003 evpandd(dst, mask, nds, src, merge, vector_len); break; 9004 case T_LONG: 9005 evpandq(dst, mask, nds, src, merge, vector_len); break; 9006 default: 9007 fatal("Unexpected type argument %s", type2name(type)); break; 9008 } 9009 } 9010 9011 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9012 switch(type) { 9013 case T_INT: 9014 evpandd(dst, mask, nds, src, merge, vector_len); break; 9015 case T_LONG: 9016 evpandq(dst, mask, nds, src, merge, vector_len); break; 9017 default: 9018 fatal("Unexpected type argument %s", type2name(type)); break; 9019 } 9020 } 9021 9022 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 9023 switch(masklen) { 9024 case 8: 9025 kortestbl(src1, src2); 9026 break; 9027 case 16: 9028 kortestwl(src1, src2); 9029 break; 9030 case 32: 9031 kortestdl(src1, src2); 9032 break; 9033 case 64: 9034 kortestql(src1, src2); 9035 break; 9036 default: 9037 fatal("Unexpected mask length %d", masklen); 9038 break; 9039 } 9040 } 9041 9042 9043 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 9044 switch(masklen) { 9045 case 8: 9046 ktestbl(src1, src2); 9047 break; 9048 case 16: 9049 ktestwl(src1, src2); 9050 break; 9051 case 32: 9052 ktestdl(src1, src2); 9053 break; 9054 case 64: 9055 ktestql(src1, src2); 9056 break; 9057 default: 9058 fatal("Unexpected mask length %d", masklen); 9059 break; 9060 } 9061 } 9062 9063 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9064 switch(type) { 9065 case T_INT: 9066 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9067 case T_LONG: 9068 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9069 default: 9070 fatal("Unexpected type argument %s", type2name(type)); break; 9071 break; 9072 } 9073 } 9074 9075 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9076 switch(type) { 9077 case T_INT: 9078 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9079 case T_LONG: 9080 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9081 default: 9082 fatal("Unexpected type argument %s", type2name(type)); break; 9083 } 9084 } 9085 9086 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9087 switch(type) { 9088 case T_INT: 9089 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9090 case T_LONG: 9091 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9092 default: 9093 fatal("Unexpected type argument %s", type2name(type)); break; 9094 } 9095 } 9096 9097 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9098 switch(type) { 9099 case T_INT: 9100 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9101 case T_LONG: 9102 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9103 default: 9104 fatal("Unexpected type argument %s", type2name(type)); break; 9105 } 9106 } 9107 9108 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9109 assert(rscratch != noreg || always_reachable(src), "missing"); 9110 9111 if (reachable(src)) { 9112 evpandq(dst, nds, as_Address(src), vector_len); 9113 } else { 9114 lea(rscratch, src); 9115 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9116 } 9117 } 9118 9119 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 9120 assert(rscratch != noreg || always_reachable(src), "missing"); 9121 9122 if (reachable(src)) { 9123 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 9124 } else { 9125 lea(rscratch, src); 9126 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 9127 } 9128 } 9129 9130 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9131 assert(rscratch != noreg || always_reachable(src), "missing"); 9132 9133 if (reachable(src)) { 9134 evporq(dst, nds, as_Address(src), vector_len); 9135 } else { 9136 lea(rscratch, src); 9137 evporq(dst, nds, Address(rscratch, 0), vector_len); 9138 } 9139 } 9140 9141 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9142 assert(rscratch != noreg || always_reachable(src), "missing"); 9143 9144 if (reachable(src)) { 9145 vpshufb(dst, nds, as_Address(src), vector_len); 9146 } else { 9147 lea(rscratch, src); 9148 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 9149 } 9150 } 9151 9152 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9153 assert(rscratch != noreg || always_reachable(src), "missing"); 9154 9155 if (reachable(src)) { 9156 Assembler::vpor(dst, nds, as_Address(src), vector_len); 9157 } else { 9158 lea(rscratch, src); 9159 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 9160 } 9161 } 9162 9163 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9164 assert(rscratch != noreg || always_reachable(src3), "missing"); 9165 9166 if (reachable(src3)) { 9167 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9168 } else { 9169 lea(rscratch, src3); 9170 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9171 } 9172 } 9173 9174 #if COMPILER2_OR_JVMCI 9175 9176 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9177 Register length, Register temp, int vec_enc) { 9178 // Computing mask for predicated vector store. 9179 movptr(temp, -1); 9180 bzhiq(temp, temp, length); 9181 kmov(mask, temp); 9182 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9183 } 9184 9185 // Set memory operation for length "less than" 64 bytes. 9186 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9187 XMMRegister xmm, KRegister mask, Register length, 9188 Register temp, bool use64byteVector) { 9189 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9190 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9191 if (!use64byteVector) { 9192 fill32(dst, disp, xmm); 9193 subptr(length, 32 >> shift); 9194 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9195 } else { 9196 assert(MaxVectorSize == 64, "vector length != 64"); 9197 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9198 } 9199 } 9200 9201 9202 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9203 XMMRegister xmm, KRegister mask, Register length, 9204 Register temp) { 9205 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9206 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9207 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9208 } 9209 9210 9211 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9212 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9213 vmovdqu(dst, xmm); 9214 } 9215 9216 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9217 fill32(Address(dst, disp), xmm); 9218 } 9219 9220 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9221 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9222 if (!use64byteVector) { 9223 fill32(dst, xmm); 9224 fill32(dst.plus_disp(32), xmm); 9225 } else { 9226 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9227 } 9228 } 9229 9230 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9231 fill64(Address(dst, disp), xmm, use64byteVector); 9232 } 9233 9234 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9235 Register count, Register rtmp, XMMRegister xtmp) { 9236 Label L_exit; 9237 Label L_fill_start; 9238 Label L_fill_64_bytes; 9239 Label L_fill_96_bytes; 9240 Label L_fill_128_bytes; 9241 Label L_fill_128_bytes_loop; 9242 Label L_fill_128_loop_header; 9243 Label L_fill_128_bytes_loop_header; 9244 Label L_fill_128_bytes_loop_pre_header; 9245 Label L_fill_zmm_sequence; 9246 9247 int shift = -1; 9248 int avx3threshold = VM_Version::avx3_threshold(); 9249 switch(type) { 9250 case T_BYTE: shift = 0; 9251 break; 9252 case T_SHORT: shift = 1; 9253 break; 9254 case T_INT: shift = 2; 9255 break; 9256 /* Uncomment when LONG fill stubs are supported. 9257 case T_LONG: shift = 3; 9258 break; 9259 */ 9260 default: 9261 fatal("Unhandled type: %s\n", type2name(type)); 9262 } 9263 9264 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9265 9266 if (MaxVectorSize == 64) { 9267 cmpq(count, avx3threshold >> shift); 9268 jcc(Assembler::greater, L_fill_zmm_sequence); 9269 } 9270 9271 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9272 9273 bind(L_fill_start); 9274 9275 cmpq(count, 32 >> shift); 9276 jccb(Assembler::greater, L_fill_64_bytes); 9277 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9278 jmp(L_exit); 9279 9280 bind(L_fill_64_bytes); 9281 cmpq(count, 64 >> shift); 9282 jccb(Assembler::greater, L_fill_96_bytes); 9283 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9284 jmp(L_exit); 9285 9286 bind(L_fill_96_bytes); 9287 cmpq(count, 96 >> shift); 9288 jccb(Assembler::greater, L_fill_128_bytes); 9289 fill64(to, 0, xtmp); 9290 subq(count, 64 >> shift); 9291 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9292 jmp(L_exit); 9293 9294 bind(L_fill_128_bytes); 9295 cmpq(count, 128 >> shift); 9296 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9297 fill64(to, 0, xtmp); 9298 fill32(to, 64, xtmp); 9299 subq(count, 96 >> shift); 9300 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9301 jmp(L_exit); 9302 9303 bind(L_fill_128_bytes_loop_pre_header); 9304 { 9305 mov(rtmp, to); 9306 andq(rtmp, 31); 9307 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9308 negq(rtmp); 9309 addq(rtmp, 32); 9310 mov64(r8, -1L); 9311 bzhiq(r8, r8, rtmp); 9312 kmovql(k2, r8); 9313 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9314 addq(to, rtmp); 9315 shrq(rtmp, shift); 9316 subq(count, rtmp); 9317 } 9318 9319 cmpq(count, 128 >> shift); 9320 jcc(Assembler::less, L_fill_start); 9321 9322 bind(L_fill_128_bytes_loop_header); 9323 subq(count, 128 >> shift); 9324 9325 align32(); 9326 bind(L_fill_128_bytes_loop); 9327 fill64(to, 0, xtmp); 9328 fill64(to, 64, xtmp); 9329 addq(to, 128); 9330 subq(count, 128 >> shift); 9331 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9332 9333 addq(count, 128 >> shift); 9334 jcc(Assembler::zero, L_exit); 9335 jmp(L_fill_start); 9336 } 9337 9338 if (MaxVectorSize == 64) { 9339 // Sequence using 64 byte ZMM register. 9340 Label L_fill_128_bytes_zmm; 9341 Label L_fill_192_bytes_zmm; 9342 Label L_fill_192_bytes_loop_zmm; 9343 Label L_fill_192_bytes_loop_header_zmm; 9344 Label L_fill_192_bytes_loop_pre_header_zmm; 9345 Label L_fill_start_zmm_sequence; 9346 9347 bind(L_fill_zmm_sequence); 9348 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9349 9350 bind(L_fill_start_zmm_sequence); 9351 cmpq(count, 64 >> shift); 9352 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9353 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9354 jmp(L_exit); 9355 9356 bind(L_fill_128_bytes_zmm); 9357 cmpq(count, 128 >> shift); 9358 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9359 fill64(to, 0, xtmp, true); 9360 subq(count, 64 >> shift); 9361 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9362 jmp(L_exit); 9363 9364 bind(L_fill_192_bytes_zmm); 9365 cmpq(count, 192 >> shift); 9366 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9367 fill64(to, 0, xtmp, true); 9368 fill64(to, 64, xtmp, true); 9369 subq(count, 128 >> shift); 9370 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9371 jmp(L_exit); 9372 9373 bind(L_fill_192_bytes_loop_pre_header_zmm); 9374 { 9375 movq(rtmp, to); 9376 andq(rtmp, 63); 9377 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9378 negq(rtmp); 9379 addq(rtmp, 64); 9380 mov64(r8, -1L); 9381 bzhiq(r8, r8, rtmp); 9382 kmovql(k2, r8); 9383 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9384 addq(to, rtmp); 9385 shrq(rtmp, shift); 9386 subq(count, rtmp); 9387 } 9388 9389 cmpq(count, 192 >> shift); 9390 jcc(Assembler::less, L_fill_start_zmm_sequence); 9391 9392 bind(L_fill_192_bytes_loop_header_zmm); 9393 subq(count, 192 >> shift); 9394 9395 align32(); 9396 bind(L_fill_192_bytes_loop_zmm); 9397 fill64(to, 0, xtmp, true); 9398 fill64(to, 64, xtmp, true); 9399 fill64(to, 128, xtmp, true); 9400 addq(to, 192); 9401 subq(count, 192 >> shift); 9402 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 9403 9404 addq(count, 192 >> shift); 9405 jcc(Assembler::zero, L_exit); 9406 jmp(L_fill_start_zmm_sequence); 9407 } 9408 bind(L_exit); 9409 } 9410 #endif //COMPILER2_OR_JVMCI 9411 9412 9413 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 9414 Label done; 9415 cvttss2sil(dst, src); 9416 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9417 cmpl(dst, 0x80000000); // float_sign_flip 9418 jccb(Assembler::notEqual, done); 9419 subptr(rsp, 8); 9420 movflt(Address(rsp, 0), src); 9421 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 9422 pop(dst); 9423 bind(done); 9424 } 9425 9426 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 9427 Label done; 9428 cvttsd2sil(dst, src); 9429 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9430 cmpl(dst, 0x80000000); // float_sign_flip 9431 jccb(Assembler::notEqual, done); 9432 subptr(rsp, 8); 9433 movdbl(Address(rsp, 0), src); 9434 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 9435 pop(dst); 9436 bind(done); 9437 } 9438 9439 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 9440 Label done; 9441 cvttss2siq(dst, src); 9442 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9443 jccb(Assembler::notEqual, done); 9444 subptr(rsp, 8); 9445 movflt(Address(rsp, 0), src); 9446 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 9447 pop(dst); 9448 bind(done); 9449 } 9450 9451 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9452 // Following code is line by line assembly translation rounding algorithm. 9453 // Please refer to java.lang.Math.round(float) algorithm for details. 9454 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 9455 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 9456 const int32_t FloatConsts_EXP_BIAS = 127; 9457 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 9458 const int32_t MINUS_32 = 0xFFFFFFE0; 9459 Label L_special_case, L_block1, L_exit; 9460 movl(rtmp, FloatConsts_EXP_BIT_MASK); 9461 movdl(dst, src); 9462 andl(dst, rtmp); 9463 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 9464 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 9465 subl(rtmp, dst); 9466 movl(rcx, rtmp); 9467 movl(dst, MINUS_32); 9468 testl(rtmp, dst); 9469 jccb(Assembler::notEqual, L_special_case); 9470 movdl(dst, src); 9471 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 9472 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 9473 movdl(rtmp, src); 9474 testl(rtmp, rtmp); 9475 jccb(Assembler::greaterEqual, L_block1); 9476 negl(dst); 9477 bind(L_block1); 9478 sarl(dst); 9479 addl(dst, 0x1); 9480 sarl(dst, 0x1); 9481 jmp(L_exit); 9482 bind(L_special_case); 9483 convert_f2i(dst, src); 9484 bind(L_exit); 9485 } 9486 9487 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9488 // Following code is line by line assembly translation rounding algorithm. 9489 // Please refer to java.lang.Math.round(double) algorithm for details. 9490 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 9491 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 9492 const int64_t DoubleConsts_EXP_BIAS = 1023; 9493 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 9494 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 9495 Label L_special_case, L_block1, L_exit; 9496 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 9497 movq(dst, src); 9498 andq(dst, rtmp); 9499 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 9500 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 9501 subq(rtmp, dst); 9502 movq(rcx, rtmp); 9503 mov64(dst, MINUS_64); 9504 testq(rtmp, dst); 9505 jccb(Assembler::notEqual, L_special_case); 9506 movq(dst, src); 9507 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 9508 andq(dst, rtmp); 9509 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 9510 orq(dst, rtmp); 9511 movq(rtmp, src); 9512 testq(rtmp, rtmp); 9513 jccb(Assembler::greaterEqual, L_block1); 9514 negq(dst); 9515 bind(L_block1); 9516 sarq(dst); 9517 addq(dst, 0x1); 9518 sarq(dst, 0x1); 9519 jmp(L_exit); 9520 bind(L_special_case); 9521 convert_d2l(dst, src); 9522 bind(L_exit); 9523 } 9524 9525 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 9526 Label done; 9527 cvttsd2siq(dst, src); 9528 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9529 jccb(Assembler::notEqual, done); 9530 subptr(rsp, 8); 9531 movdbl(Address(rsp, 0), src); 9532 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 9533 pop(dst); 9534 bind(done); 9535 } 9536 9537 void MacroAssembler::cache_wb(Address line) 9538 { 9539 // 64 bit cpus always support clflush 9540 assert(VM_Version::supports_clflush(), "clflush should be available"); 9541 bool optimized = VM_Version::supports_clflushopt(); 9542 bool no_evict = VM_Version::supports_clwb(); 9543 9544 // prefer clwb (writeback without evict) otherwise 9545 // prefer clflushopt (potentially parallel writeback with evict) 9546 // otherwise fallback on clflush (serial writeback with evict) 9547 9548 if (optimized) { 9549 if (no_evict) { 9550 clwb(line); 9551 } else { 9552 clflushopt(line); 9553 } 9554 } else { 9555 // no need for fence when using CLFLUSH 9556 clflush(line); 9557 } 9558 } 9559 9560 void MacroAssembler::cache_wbsync(bool is_pre) 9561 { 9562 assert(VM_Version::supports_clflush(), "clflush should be available"); 9563 bool optimized = VM_Version::supports_clflushopt(); 9564 bool no_evict = VM_Version::supports_clwb(); 9565 9566 // pick the correct implementation 9567 9568 if (!is_pre && (optimized || no_evict)) { 9569 // need an sfence for post flush when using clflushopt or clwb 9570 // otherwise no no need for any synchroniaztion 9571 9572 sfence(); 9573 } 9574 } 9575 9576 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 9577 switch (cond) { 9578 // Note some conditions are synonyms for others 9579 case Assembler::zero: return Assembler::notZero; 9580 case Assembler::notZero: return Assembler::zero; 9581 case Assembler::less: return Assembler::greaterEqual; 9582 case Assembler::lessEqual: return Assembler::greater; 9583 case Assembler::greater: return Assembler::lessEqual; 9584 case Assembler::greaterEqual: return Assembler::less; 9585 case Assembler::below: return Assembler::aboveEqual; 9586 case Assembler::belowEqual: return Assembler::above; 9587 case Assembler::above: return Assembler::belowEqual; 9588 case Assembler::aboveEqual: return Assembler::below; 9589 case Assembler::overflow: return Assembler::noOverflow; 9590 case Assembler::noOverflow: return Assembler::overflow; 9591 case Assembler::negative: return Assembler::positive; 9592 case Assembler::positive: return Assembler::negative; 9593 case Assembler::parity: return Assembler::noParity; 9594 case Assembler::noParity: return Assembler::parity; 9595 } 9596 ShouldNotReachHere(); return Assembler::overflow; 9597 } 9598 9599 // This is simply a call to Thread::current() 9600 void MacroAssembler::get_thread_slow(Register thread) { 9601 if (thread != rax) { 9602 push(rax); 9603 } 9604 push(rdi); 9605 push(rsi); 9606 push(rdx); 9607 push(rcx); 9608 push(r8); 9609 push(r9); 9610 push(r10); 9611 push(r11); 9612 9613 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 9614 9615 pop(r11); 9616 pop(r10); 9617 pop(r9); 9618 pop(r8); 9619 pop(rcx); 9620 pop(rdx); 9621 pop(rsi); 9622 pop(rdi); 9623 if (thread != rax) { 9624 mov(thread, rax); 9625 pop(rax); 9626 } 9627 } 9628 9629 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 9630 Label L_stack_ok; 9631 if (bias == 0) { 9632 testptr(sp, 2 * wordSize - 1); 9633 } else { 9634 // lea(tmp, Address(rsp, bias); 9635 mov(tmp, sp); 9636 addptr(tmp, bias); 9637 testptr(tmp, 2 * wordSize - 1); 9638 } 9639 jcc(Assembler::equal, L_stack_ok); 9640 block_comment(msg); 9641 stop(msg); 9642 bind(L_stack_ok); 9643 } 9644 9645 // Implements lightweight-locking. 9646 // 9647 // obj: the object to be locked 9648 // reg_rax: rax 9649 // thread: the thread which attempts to lock obj 9650 // tmp: a temporary register 9651 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) { 9652 Register thread = r15_thread; 9653 9654 assert(reg_rax == rax, ""); 9655 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 9656 9657 Label push; 9658 const Register top = tmp; 9659 9660 // Preload the markWord. It is important that this is the first 9661 // instruction emitted as it is part of C1's null check semantics. 9662 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9663 9664 if (UseObjectMonitorTable) { 9665 // Clear cache in case fast locking succeeds or we need to take the slow-path. 9666 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 9667 } 9668 9669 if (DiagnoseSyncOnValueBasedClasses != 0) { 9670 load_klass(tmp, obj, rscratch1); 9671 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class); 9672 jcc(Assembler::notZero, slow); 9673 } 9674 9675 // Load top. 9676 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9677 9678 // Check if the lock-stack is full. 9679 cmpl(top, LockStack::end_offset()); 9680 jcc(Assembler::greaterEqual, slow); 9681 9682 // Check for recursion. 9683 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9684 jcc(Assembler::equal, push); 9685 9686 // Check header for monitor (0b10). 9687 testptr(reg_rax, markWord::monitor_value); 9688 jcc(Assembler::notZero, slow); 9689 9690 // Try to lock. Transition lock bits 0b01 => 0b00 9691 movptr(tmp, reg_rax); 9692 andptr(tmp, ~(int32_t)markWord::unlocked_value); 9693 orptr(reg_rax, markWord::unlocked_value); 9694 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9695 jcc(Assembler::notEqual, slow); 9696 9697 // Restore top, CAS clobbers register. 9698 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9699 9700 bind(push); 9701 // After successful lock, push object on lock-stack. 9702 movptr(Address(thread, top), obj); 9703 incrementl(top, oopSize); 9704 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 9705 } 9706 9707 // Implements lightweight-unlocking. 9708 // 9709 // obj: the object to be unlocked 9710 // reg_rax: rax 9711 // thread: the thread 9712 // tmp: a temporary register 9713 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) { 9714 Register thread = r15_thread; 9715 9716 assert(reg_rax == rax, ""); 9717 assert_different_registers(obj, reg_rax, thread, tmp); 9718 9719 Label unlocked, push_and_slow; 9720 const Register top = tmp; 9721 9722 // Check if obj is top of lock-stack. 9723 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9724 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9725 jcc(Assembler::notEqual, slow); 9726 9727 // Pop lock-stack. 9728 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 9729 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9730 9731 // Check if recursive. 9732 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 9733 jcc(Assembler::equal, unlocked); 9734 9735 // Not recursive. Check header for monitor (0b10). 9736 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9737 testptr(reg_rax, markWord::monitor_value); 9738 jcc(Assembler::notZero, push_and_slow); 9739 9740 #ifdef ASSERT 9741 // Check header not unlocked (0b01). 9742 Label not_unlocked; 9743 testptr(reg_rax, markWord::unlocked_value); 9744 jcc(Assembler::zero, not_unlocked); 9745 stop("lightweight_unlock already unlocked"); 9746 bind(not_unlocked); 9747 #endif 9748 9749 // Try to unlock. Transition lock bits 0b00 => 0b01 9750 movptr(tmp, reg_rax); 9751 orptr(tmp, markWord::unlocked_value); 9752 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9753 jcc(Assembler::equal, unlocked); 9754 9755 bind(push_and_slow); 9756 // Restore lock-stack and handle the unlock in runtime. 9757 #ifdef ASSERT 9758 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9759 movptr(Address(thread, top), obj); 9760 #endif 9761 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9762 jmp(slow); 9763 9764 bind(unlocked); 9765 } 9766 9767 // Saves legacy GPRs state on stack. 9768 void MacroAssembler::save_legacy_gprs() { 9769 subq(rsp, 16 * wordSize); 9770 movq(Address(rsp, 15 * wordSize), rax); 9771 movq(Address(rsp, 14 * wordSize), rcx); 9772 movq(Address(rsp, 13 * wordSize), rdx); 9773 movq(Address(rsp, 12 * wordSize), rbx); 9774 movq(Address(rsp, 10 * wordSize), rbp); 9775 movq(Address(rsp, 9 * wordSize), rsi); 9776 movq(Address(rsp, 8 * wordSize), rdi); 9777 movq(Address(rsp, 7 * wordSize), r8); 9778 movq(Address(rsp, 6 * wordSize), r9); 9779 movq(Address(rsp, 5 * wordSize), r10); 9780 movq(Address(rsp, 4 * wordSize), r11); 9781 movq(Address(rsp, 3 * wordSize), r12); 9782 movq(Address(rsp, 2 * wordSize), r13); 9783 movq(Address(rsp, wordSize), r14); 9784 movq(Address(rsp, 0), r15); 9785 } 9786 9787 // Resotres back legacy GPRs state from stack. 9788 void MacroAssembler::restore_legacy_gprs() { 9789 movq(r15, Address(rsp, 0)); 9790 movq(r14, Address(rsp, wordSize)); 9791 movq(r13, Address(rsp, 2 * wordSize)); 9792 movq(r12, Address(rsp, 3 * wordSize)); 9793 movq(r11, Address(rsp, 4 * wordSize)); 9794 movq(r10, Address(rsp, 5 * wordSize)); 9795 movq(r9, Address(rsp, 6 * wordSize)); 9796 movq(r8, Address(rsp, 7 * wordSize)); 9797 movq(rdi, Address(rsp, 8 * wordSize)); 9798 movq(rsi, Address(rsp, 9 * wordSize)); 9799 movq(rbp, Address(rsp, 10 * wordSize)); 9800 movq(rbx, Address(rsp, 12 * wordSize)); 9801 movq(rdx, Address(rsp, 13 * wordSize)); 9802 movq(rcx, Address(rsp, 14 * wordSize)); 9803 movq(rax, Address(rsp, 15 * wordSize)); 9804 addq(rsp, 16 * wordSize); 9805 } 9806 9807 void MacroAssembler::load_aotrc_address(Register reg, address a) { 9808 #if INCLUDE_CDS 9809 assert(AOTRuntimeConstants::contains(a), "address out of range for data area"); 9810 if (AOTCodeCache::is_on_for_dump()) { 9811 // all aotrc field addresses should be registered in the AOTCodeCache address table 9812 lea(reg, ExternalAddress(a)); 9813 } else { 9814 mov64(reg, (uint64_t)a); 9815 } 9816 #else 9817 ShouldNotReachHere(); 9818 #endif 9819 } 9820 9821 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 9822 if (VM_Version::supports_apx_f()) { 9823 esetzucc(comparison, dst); 9824 } else { 9825 setb(comparison, dst); 9826 movzbl(dst, dst); 9827 } 9828 }