1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "asm/assembler.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "code/aotCodeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "crc32c.h" 32 #include "gc/shared/barrierSet.hpp" 33 #include "gc/shared/barrierSetAssembler.hpp" 34 #include "gc/shared/collectedHeap.inline.hpp" 35 #include "gc/shared/tlab_globals.hpp" 36 #include "interpreter/bytecodeHistogram.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "interpreter/interpreterRuntime.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "prims/methodHandles.hpp" 47 #include "runtime/continuation.hpp" 48 #include "runtime/interfaceSupport.inline.hpp" 49 #include "runtime/javaThread.hpp" 50 #include "runtime/jniHandles.hpp" 51 #include "runtime/objectMonitor.hpp" 52 #include "runtime/os.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/safepointMechanism.hpp" 55 #include "runtime/sharedRuntime.hpp" 56 #include "runtime/stubRoutines.hpp" 57 #include "utilities/checkedCast.hpp" 58 #include "utilities/macros.hpp" 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #define STOP(error) stop(error) 63 #else 64 #define BLOCK_COMMENT(str) block_comment(str) 65 #define STOP(error) block_comment(error); stop(error) 66 #endif 67 68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 69 70 #ifdef ASSERT 71 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 72 #endif 73 74 static const Assembler::Condition reverse[] = { 75 Assembler::noOverflow /* overflow = 0x0 */ , 76 Assembler::overflow /* noOverflow = 0x1 */ , 77 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 78 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 79 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 80 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 81 Assembler::above /* belowEqual = 0x6 */ , 82 Assembler::belowEqual /* above = 0x7 */ , 83 Assembler::positive /* negative = 0x8 */ , 84 Assembler::negative /* positive = 0x9 */ , 85 Assembler::noParity /* parity = 0xa */ , 86 Assembler::parity /* noParity = 0xb */ , 87 Assembler::greaterEqual /* less = 0xc */ , 88 Assembler::less /* greaterEqual = 0xd */ , 89 Assembler::greater /* lessEqual = 0xe */ , 90 Assembler::lessEqual /* greater = 0xf, */ 91 92 }; 93 94 95 // Implementation of MacroAssembler 96 97 Address MacroAssembler::as_Address(AddressLiteral adr) { 98 // amd64 always does this as a pc-rel 99 // we can be absolute or disp based on the instruction type 100 // jmp/call are displacements others are absolute 101 assert(!adr.is_lval(), "must be rval"); 102 assert(reachable(adr), "must be"); 103 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 104 105 } 106 107 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 108 AddressLiteral base = adr.base(); 109 lea(rscratch, base); 110 Address index = adr.index(); 111 assert(index._disp == 0, "must not have disp"); // maybe it can? 112 Address array(rscratch, index._index, index._scale, index._disp); 113 return array; 114 } 115 116 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 117 Label L, E; 118 119 #ifdef _WIN64 120 // Windows always allocates space for it's register args 121 assert(num_args <= 4, "only register arguments supported"); 122 subq(rsp, frame::arg_reg_save_area_bytes); 123 #endif 124 125 // Align stack if necessary 126 testl(rsp, 15); 127 jcc(Assembler::zero, L); 128 129 subq(rsp, 8); 130 call(RuntimeAddress(entry_point)); 131 addq(rsp, 8); 132 jmp(E); 133 134 bind(L); 135 call(RuntimeAddress(entry_point)); 136 137 bind(E); 138 139 #ifdef _WIN64 140 // restore stack pointer 141 addq(rsp, frame::arg_reg_save_area_bytes); 142 #endif 143 } 144 145 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 146 assert(!src2.is_lval(), "should use cmpptr"); 147 assert(rscratch != noreg || always_reachable(src2), "missing"); 148 149 if (reachable(src2)) { 150 cmpq(src1, as_Address(src2)); 151 } else { 152 lea(rscratch, src2); 153 Assembler::cmpq(src1, Address(rscratch, 0)); 154 } 155 } 156 157 int MacroAssembler::corrected_idivq(Register reg) { 158 // Full implementation of Java ldiv and lrem; checks for special 159 // case as described in JVM spec., p.243 & p.271. The function 160 // returns the (pc) offset of the idivl instruction - may be needed 161 // for implicit exceptions. 162 // 163 // normal case special case 164 // 165 // input : rax: dividend min_long 166 // reg: divisor (may not be eax/edx) -1 167 // 168 // output: rax: quotient (= rax idiv reg) min_long 169 // rdx: remainder (= rax irem reg) 0 170 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 171 static const int64_t min_long = 0x8000000000000000; 172 Label normal_case, special_case; 173 174 // check for special case 175 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 176 jcc(Assembler::notEqual, normal_case); 177 xorl(rdx, rdx); // prepare rdx for possible special case (where 178 // remainder = 0) 179 cmpq(reg, -1); 180 jcc(Assembler::equal, special_case); 181 182 // handle normal case 183 bind(normal_case); 184 cdqq(); 185 int idivq_offset = offset(); 186 idivq(reg); 187 188 // normal and special case exit 189 bind(special_case); 190 191 return idivq_offset; 192 } 193 194 void MacroAssembler::decrementq(Register reg, int value) { 195 if (value == min_jint) { subq(reg, value); return; } 196 if (value < 0) { incrementq(reg, -value); return; } 197 if (value == 0) { ; return; } 198 if (value == 1 && UseIncDec) { decq(reg) ; return; } 199 /* else */ { subq(reg, value) ; return; } 200 } 201 202 void MacroAssembler::decrementq(Address dst, int value) { 203 if (value == min_jint) { subq(dst, value); return; } 204 if (value < 0) { incrementq(dst, -value); return; } 205 if (value == 0) { ; return; } 206 if (value == 1 && UseIncDec) { decq(dst) ; return; } 207 /* else */ { subq(dst, value) ; return; } 208 } 209 210 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 211 assert(rscratch != noreg || always_reachable(dst), "missing"); 212 213 if (reachable(dst)) { 214 incrementq(as_Address(dst)); 215 } else { 216 lea(rscratch, dst); 217 incrementq(Address(rscratch, 0)); 218 } 219 } 220 221 void MacroAssembler::incrementq(Register reg, int value) { 222 if (value == min_jint) { addq(reg, value); return; } 223 if (value < 0) { decrementq(reg, -value); return; } 224 if (value == 0) { ; return; } 225 if (value == 1 && UseIncDec) { incq(reg) ; return; } 226 /* else */ { addq(reg, value) ; return; } 227 } 228 229 void MacroAssembler::incrementq(Address dst, int value) { 230 if (value == min_jint) { addq(dst, value); return; } 231 if (value < 0) { decrementq(dst, -value); return; } 232 if (value == 0) { ; return; } 233 if (value == 1 && UseIncDec) { incq(dst) ; return; } 234 /* else */ { addq(dst, value) ; return; } 235 } 236 237 // 32bit can do a case table jump in one instruction but we no longer allow the base 238 // to be installed in the Address class 239 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 240 lea(rscratch, entry.base()); 241 Address dispatch = entry.index(); 242 assert(dispatch._base == noreg, "must be"); 243 dispatch._base = rscratch; 244 jmp(dispatch); 245 } 246 247 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 248 ShouldNotReachHere(); // 64bit doesn't use two regs 249 cmpq(x_lo, y_lo); 250 } 251 252 void MacroAssembler::lea(Register dst, AddressLiteral src) { 253 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 254 } 255 256 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 257 lea(rscratch, adr); 258 movptr(dst, rscratch); 259 } 260 261 void MacroAssembler::leave() { 262 // %%% is this really better? Why not on 32bit too? 263 emit_int8((unsigned char)0xC9); // LEAVE 264 } 265 266 void MacroAssembler::lneg(Register hi, Register lo) { 267 ShouldNotReachHere(); // 64bit doesn't use two regs 268 negq(lo); 269 } 270 271 void MacroAssembler::movoop(Register dst, jobject obj) { 272 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 273 } 274 275 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 276 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 277 movq(dst, rscratch); 278 } 279 280 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 281 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 282 } 283 284 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 285 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 286 movq(dst, rscratch); 287 } 288 289 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 290 if (src.is_lval()) { 291 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 292 } else { 293 if (reachable(src)) { 294 movq(dst, as_Address(src)); 295 } else { 296 lea(dst, src); 297 movq(dst, Address(dst, 0)); 298 } 299 } 300 } 301 302 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 303 movq(as_Address(dst, rscratch), src); 304 } 305 306 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 307 movq(dst, as_Address(src, dst /*rscratch*/)); 308 } 309 310 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 311 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 312 if (is_simm32(src)) { 313 movptr(dst, checked_cast<int32_t>(src)); 314 } else { 315 mov64(rscratch, src); 316 movq(dst, rscratch); 317 } 318 } 319 320 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 321 movoop(rscratch, obj); 322 push(rscratch); 323 } 324 325 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 326 mov_metadata(rscratch, obj); 327 push(rscratch); 328 } 329 330 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 331 lea(rscratch, src); 332 if (src.is_lval()) { 333 push(rscratch); 334 } else { 335 pushq(Address(rscratch, 0)); 336 } 337 } 338 339 static void pass_arg0(MacroAssembler* masm, Register arg) { 340 if (c_rarg0 != arg ) { 341 masm->mov(c_rarg0, arg); 342 } 343 } 344 345 static void pass_arg1(MacroAssembler* masm, Register arg) { 346 if (c_rarg1 != arg ) { 347 masm->mov(c_rarg1, arg); 348 } 349 } 350 351 static void pass_arg2(MacroAssembler* masm, Register arg) { 352 if (c_rarg2 != arg ) { 353 masm->mov(c_rarg2, arg); 354 } 355 } 356 357 static void pass_arg3(MacroAssembler* masm, Register arg) { 358 if (c_rarg3 != arg ) { 359 masm->mov(c_rarg3, arg); 360 } 361 } 362 363 void MacroAssembler::stop(const char* msg) { 364 if (ShowMessageBoxOnError) { 365 address rip = pc(); 366 pusha(); // get regs on stack 367 lea(c_rarg1, InternalAddress(rip)); 368 movq(c_rarg2, rsp); // pass pointer to regs array 369 } 370 // Skip AOT caching C strings in scratch buffer. 371 const char* str = (code_section()->scratch_emit()) ? msg : AOTCodeCache::add_C_string(msg); 372 lea(c_rarg0, ExternalAddress((address) str)); 373 andq(rsp, -16); // align stack as required by ABI 374 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 375 hlt(); 376 } 377 378 void MacroAssembler::warn(const char* msg) { 379 push(rbp); 380 movq(rbp, rsp); 381 andq(rsp, -16); // align stack as required by push_CPU_state and call 382 push_CPU_state(); // keeps alignment at 16 bytes 383 384 #ifdef _WIN64 385 // Windows always allocates space for its register args 386 subq(rsp, frame::arg_reg_save_area_bytes); 387 #endif 388 lea(c_rarg0, ExternalAddress((address) msg)); 389 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 390 391 #ifdef _WIN64 392 // restore stack pointer 393 addq(rsp, frame::arg_reg_save_area_bytes); 394 #endif 395 pop_CPU_state(); 396 mov(rsp, rbp); 397 pop(rbp); 398 } 399 400 void MacroAssembler::print_state() { 401 address rip = pc(); 402 pusha(); // get regs on stack 403 push(rbp); 404 movq(rbp, rsp); 405 andq(rsp, -16); // align stack as required by push_CPU_state and call 406 push_CPU_state(); // keeps alignment at 16 bytes 407 408 lea(c_rarg0, InternalAddress(rip)); 409 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 410 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 411 412 pop_CPU_state(); 413 mov(rsp, rbp); 414 pop(rbp); 415 popa(); 416 } 417 418 #ifndef PRODUCT 419 extern "C" void findpc(intptr_t x); 420 #endif 421 422 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 423 // In order to get locks to work, we need to fake a in_VM state 424 if (ShowMessageBoxOnError) { 425 JavaThread* thread = JavaThread::current(); 426 JavaThreadState saved_state = thread->thread_state(); 427 thread->set_thread_state(_thread_in_vm); 428 #ifndef PRODUCT 429 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 430 ttyLocker ttyl; 431 BytecodeCounter::print(); 432 } 433 #endif 434 // To see where a verify_oop failed, get $ebx+40/X for this frame. 435 // XXX correct this offset for amd64 436 // This is the value of eip which points to where verify_oop will return. 437 if (os::message_box(msg, "Execution stopped, print registers?")) { 438 print_state64(pc, regs); 439 BREAKPOINT; 440 } 441 } 442 fatal("DEBUG MESSAGE: %s", msg); 443 } 444 445 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 446 ttyLocker ttyl; 447 DebuggingContext debugging{}; 448 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 449 #ifndef PRODUCT 450 tty->cr(); 451 findpc(pc); 452 tty->cr(); 453 #endif 454 #define PRINT_REG(rax, value) \ 455 { tty->print("%s = ", #rax); os::print_location(tty, value); } 456 PRINT_REG(rax, regs[15]); 457 PRINT_REG(rbx, regs[12]); 458 PRINT_REG(rcx, regs[14]); 459 PRINT_REG(rdx, regs[13]); 460 PRINT_REG(rdi, regs[8]); 461 PRINT_REG(rsi, regs[9]); 462 PRINT_REG(rbp, regs[10]); 463 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 464 PRINT_REG(rsp, (intptr_t)(®s[16])); 465 PRINT_REG(r8 , regs[7]); 466 PRINT_REG(r9 , regs[6]); 467 PRINT_REG(r10, regs[5]); 468 PRINT_REG(r11, regs[4]); 469 PRINT_REG(r12, regs[3]); 470 PRINT_REG(r13, regs[2]); 471 PRINT_REG(r14, regs[1]); 472 PRINT_REG(r15, regs[0]); 473 #undef PRINT_REG 474 // Print some words near the top of the stack. 475 int64_t* rsp = ®s[16]; 476 int64_t* dump_sp = rsp; 477 for (int col1 = 0; col1 < 8; col1++) { 478 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 479 os::print_location(tty, *dump_sp++); 480 } 481 for (int row = 0; row < 25; row++) { 482 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 483 for (int col = 0; col < 4; col++) { 484 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 485 } 486 tty->cr(); 487 } 488 // Print some instructions around pc: 489 Disassembler::decode((address)pc-64, (address)pc); 490 tty->print_cr("--------"); 491 Disassembler::decode((address)pc, (address)pc+32); 492 } 493 494 // The java_calling_convention describes stack locations as ideal slots on 495 // a frame with no abi restrictions. Since we must observe abi restrictions 496 // (like the placement of the register window) the slots must be biased by 497 // the following value. 498 static int reg2offset_in(VMReg r) { 499 // Account for saved rbp and return address 500 // This should really be in_preserve_stack_slots 501 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 502 } 503 504 static int reg2offset_out(VMReg r) { 505 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 506 } 507 508 // A long move 509 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 510 511 // The calling conventions assures us that each VMregpair is either 512 // all really one physical register or adjacent stack slots. 513 514 if (src.is_single_phys_reg() ) { 515 if (dst.is_single_phys_reg()) { 516 if (dst.first() != src.first()) { 517 mov(dst.first()->as_Register(), src.first()->as_Register()); 518 } 519 } else { 520 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 521 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 522 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 523 } 524 } else if (dst.is_single_phys_reg()) { 525 assert(src.is_single_reg(), "not a stack pair"); 526 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 527 } else { 528 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 529 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 530 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 531 } 532 } 533 534 // A double move 535 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 536 537 // The calling conventions assures us that each VMregpair is either 538 // all really one physical register or adjacent stack slots. 539 540 if (src.is_single_phys_reg() ) { 541 if (dst.is_single_phys_reg()) { 542 // In theory these overlap but the ordering is such that this is likely a nop 543 if ( src.first() != dst.first()) { 544 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 545 } 546 } else { 547 assert(dst.is_single_reg(), "not a stack pair"); 548 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 549 } 550 } else if (dst.is_single_phys_reg()) { 551 assert(src.is_single_reg(), "not a stack pair"); 552 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 553 } else { 554 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 555 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 556 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 557 } 558 } 559 560 561 // A float arg may have to do float reg int reg conversion 562 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 563 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 564 565 // The calling conventions assures us that each VMregpair is either 566 // all really one physical register or adjacent stack slots. 567 568 if (src.first()->is_stack()) { 569 if (dst.first()->is_stack()) { 570 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 571 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 572 } else { 573 // stack to reg 574 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 575 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 576 } 577 } else if (dst.first()->is_stack()) { 578 // reg to stack 579 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 580 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 581 } else { 582 // reg to reg 583 // In theory these overlap but the ordering is such that this is likely a nop 584 if ( src.first() != dst.first()) { 585 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 586 } 587 } 588 } 589 590 // On 64 bit we will store integer like items to the stack as 591 // 64 bits items (x86_32/64 abi) even though java would only store 592 // 32bits for a parameter. On 32bit it will simply be 32 bits 593 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 594 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 595 if (src.first()->is_stack()) { 596 if (dst.first()->is_stack()) { 597 // stack to stack 598 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 599 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 600 } else { 601 // stack to reg 602 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 603 } 604 } else if (dst.first()->is_stack()) { 605 // reg to stack 606 // Do we really have to sign extend??? 607 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 608 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 609 } else { 610 // Do we really have to sign extend??? 611 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 612 if (dst.first() != src.first()) { 613 movq(dst.first()->as_Register(), src.first()->as_Register()); 614 } 615 } 616 } 617 618 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 619 if (src.first()->is_stack()) { 620 if (dst.first()->is_stack()) { 621 // stack to stack 622 movq(rax, Address(rbp, reg2offset_in(src.first()))); 623 movq(Address(rsp, reg2offset_out(dst.first())), rax); 624 } else { 625 // stack to reg 626 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 627 } 628 } else if (dst.first()->is_stack()) { 629 // reg to stack 630 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 631 } else { 632 if (dst.first() != src.first()) { 633 movq(dst.first()->as_Register(), src.first()->as_Register()); 634 } 635 } 636 } 637 638 // An oop arg. Must pass a handle not the oop itself 639 void MacroAssembler::object_move(OopMap* map, 640 int oop_handle_offset, 641 int framesize_in_slots, 642 VMRegPair src, 643 VMRegPair dst, 644 bool is_receiver, 645 int* receiver_offset) { 646 647 // must pass a handle. First figure out the location we use as a handle 648 649 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 650 651 // See if oop is null if it is we need no handle 652 653 if (src.first()->is_stack()) { 654 655 // Oop is already on the stack as an argument 656 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 657 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 658 if (is_receiver) { 659 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 660 } 661 662 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 663 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 664 // conditionally move a null 665 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 666 } else { 667 668 // Oop is in a register we must store it to the space we reserve 669 // on the stack for oop_handles and pass a handle if oop is non-null 670 671 const Register rOop = src.first()->as_Register(); 672 int oop_slot; 673 if (rOop == j_rarg0) 674 oop_slot = 0; 675 else if (rOop == j_rarg1) 676 oop_slot = 1; 677 else if (rOop == j_rarg2) 678 oop_slot = 2; 679 else if (rOop == j_rarg3) 680 oop_slot = 3; 681 else if (rOop == j_rarg4) 682 oop_slot = 4; 683 else { 684 assert(rOop == j_rarg5, "wrong register"); 685 oop_slot = 5; 686 } 687 688 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 689 int offset = oop_slot*VMRegImpl::stack_slot_size; 690 691 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 692 // Store oop in handle area, may be null 693 movptr(Address(rsp, offset), rOop); 694 if (is_receiver) { 695 *receiver_offset = offset; 696 } 697 698 cmpptr(rOop, NULL_WORD); 699 lea(rHandle, Address(rsp, offset)); 700 // conditionally move a null from the handle area where it was just stored 701 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 702 } 703 704 // If arg is on the stack then place it otherwise it is already in correct reg. 705 if (dst.first()->is_stack()) { 706 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 707 } 708 } 709 710 void MacroAssembler::addptr(Register dst, int32_t imm32) { 711 addq(dst, imm32); 712 } 713 714 void MacroAssembler::addptr(Register dst, Register src) { 715 addq(dst, src); 716 } 717 718 void MacroAssembler::addptr(Address dst, Register src) { 719 addq(dst, src); 720 } 721 722 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 723 assert(rscratch != noreg || always_reachable(src), "missing"); 724 725 if (reachable(src)) { 726 Assembler::addsd(dst, as_Address(src)); 727 } else { 728 lea(rscratch, src); 729 Assembler::addsd(dst, Address(rscratch, 0)); 730 } 731 } 732 733 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 734 assert(rscratch != noreg || always_reachable(src), "missing"); 735 736 if (reachable(src)) { 737 addss(dst, as_Address(src)); 738 } else { 739 lea(rscratch, src); 740 addss(dst, Address(rscratch, 0)); 741 } 742 } 743 744 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 745 assert(rscratch != noreg || always_reachable(src), "missing"); 746 747 if (reachable(src)) { 748 Assembler::addpd(dst, as_Address(src)); 749 } else { 750 lea(rscratch, src); 751 Assembler::addpd(dst, Address(rscratch, 0)); 752 } 753 } 754 755 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 756 // Stub code is generated once and never copied. 757 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 758 void MacroAssembler::align64() { 759 align(64, (uint)(uintptr_t)pc()); 760 } 761 762 void MacroAssembler::align32() { 763 align(32, (uint)(uintptr_t)pc()); 764 } 765 766 void MacroAssembler::align(uint modulus) { 767 // 8273459: Ensure alignment is possible with current segment alignment 768 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 769 align(modulus, offset()); 770 } 771 772 void MacroAssembler::align(uint modulus, uint target) { 773 if (target % modulus != 0) { 774 nop(modulus - (target % modulus)); 775 } 776 } 777 778 void MacroAssembler::push_f(XMMRegister r) { 779 subptr(rsp, wordSize); 780 movflt(Address(rsp, 0), r); 781 } 782 783 void MacroAssembler::pop_f(XMMRegister r) { 784 movflt(r, Address(rsp, 0)); 785 addptr(rsp, wordSize); 786 } 787 788 void MacroAssembler::push_d(XMMRegister r) { 789 subptr(rsp, 2 * wordSize); 790 movdbl(Address(rsp, 0), r); 791 } 792 793 void MacroAssembler::pop_d(XMMRegister r) { 794 movdbl(r, Address(rsp, 0)); 795 addptr(rsp, 2 * Interpreter::stackElementSize); 796 } 797 798 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 799 // Used in sign-masking with aligned address. 800 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 801 assert(rscratch != noreg || always_reachable(src), "missing"); 802 803 if (UseAVX > 2 && 804 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 805 (dst->encoding() >= 16)) { 806 vpand(dst, dst, src, AVX_512bit, rscratch); 807 } else if (reachable(src)) { 808 Assembler::andpd(dst, as_Address(src)); 809 } else { 810 lea(rscratch, src); 811 Assembler::andpd(dst, Address(rscratch, 0)); 812 } 813 } 814 815 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 816 // Used in sign-masking with aligned address. 817 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 818 assert(rscratch != noreg || always_reachable(src), "missing"); 819 820 if (reachable(src)) { 821 Assembler::andps(dst, as_Address(src)); 822 } else { 823 lea(rscratch, src); 824 Assembler::andps(dst, Address(rscratch, 0)); 825 } 826 } 827 828 void MacroAssembler::andptr(Register dst, int32_t imm32) { 829 andq(dst, imm32); 830 } 831 832 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 833 assert(rscratch != noreg || always_reachable(src), "missing"); 834 835 if (reachable(src)) { 836 andq(dst, as_Address(src)); 837 } else { 838 lea(rscratch, src); 839 andq(dst, Address(rscratch, 0)); 840 } 841 } 842 843 void MacroAssembler::atomic_incl(Address counter_addr) { 844 lock(); 845 incrementl(counter_addr); 846 } 847 848 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 849 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 850 851 if (reachable(counter_addr)) { 852 atomic_incl(as_Address(counter_addr)); 853 } else { 854 lea(rscratch, counter_addr); 855 atomic_incl(Address(rscratch, 0)); 856 } 857 } 858 859 void MacroAssembler::atomic_incq(Address counter_addr) { 860 lock(); 861 incrementq(counter_addr); 862 } 863 864 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 865 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 866 867 if (reachable(counter_addr)) { 868 atomic_incq(as_Address(counter_addr)); 869 } else { 870 lea(rscratch, counter_addr); 871 atomic_incq(Address(rscratch, 0)); 872 } 873 } 874 875 // Writes to stack successive pages until offset reached to check for 876 // stack overflow + shadow pages. This clobbers tmp. 877 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 878 movptr(tmp, rsp); 879 // Bang stack for total size given plus shadow page size. 880 // Bang one page at a time because large size can bang beyond yellow and 881 // red zones. 882 Label loop; 883 bind(loop); 884 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 885 subptr(tmp, (int)os::vm_page_size()); 886 subl(size, (int)os::vm_page_size()); 887 jcc(Assembler::greater, loop); 888 889 // Bang down shadow pages too. 890 // At this point, (tmp-0) is the last address touched, so don't 891 // touch it again. (It was touched as (tmp-pagesize) but then tmp 892 // was post-decremented.) Skip this address by starting at i=1, and 893 // touch a few more pages below. N.B. It is important to touch all 894 // the way down including all pages in the shadow zone. 895 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 896 // this could be any sized move but this is can be a debugging crumb 897 // so the bigger the better. 898 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 899 } 900 } 901 902 void MacroAssembler::reserved_stack_check() { 903 // testing if reserved zone needs to be enabled 904 Label no_reserved_zone_enabling; 905 906 cmpptr(rsp, Address(r15_thread, JavaThread::reserved_stack_activation_offset())); 907 jcc(Assembler::below, no_reserved_zone_enabling); 908 909 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), r15_thread); 910 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 911 should_not_reach_here(); 912 913 bind(no_reserved_zone_enabling); 914 } 915 916 void MacroAssembler::c2bool(Register x) { 917 // implements x == 0 ? 0 : 1 918 // note: must only look at least-significant byte of x 919 // since C-style booleans are stored in one byte 920 // only! (was bug) 921 andl(x, 0xFF); 922 setb(Assembler::notZero, x); 923 } 924 925 // Wouldn't need if AddressLiteral version had new name 926 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 927 Assembler::call(L, rtype); 928 } 929 930 void MacroAssembler::call(Register entry) { 931 Assembler::call(entry); 932 } 933 934 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 935 assert(rscratch != noreg || always_reachable(entry), "missing"); 936 937 if (reachable(entry)) { 938 Assembler::call_literal(entry.target(), entry.rspec()); 939 } else { 940 lea(rscratch, entry); 941 Assembler::call(rscratch); 942 } 943 } 944 945 void MacroAssembler::ic_call(address entry, jint method_index) { 946 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 947 // Needs full 64-bit immediate for later patching. 948 mov64(rax, (int64_t)Universe::non_oop_word()); 949 call(AddressLiteral(entry, rh)); 950 } 951 952 int MacroAssembler::ic_check_size() { 953 return UseCompactObjectHeaders ? 17 : 14; 954 } 955 956 int MacroAssembler::ic_check(int end_alignment) { 957 Register receiver = j_rarg0; 958 Register data = rax; 959 Register temp = rscratch1; 960 961 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 962 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 963 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 964 // before the inline cache check here, and not after 965 align(end_alignment, offset() + ic_check_size()); 966 967 int uep_offset = offset(); 968 969 if (UseCompactObjectHeaders) { 970 load_narrow_klass_compact(temp, receiver); 971 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 972 } else if (UseCompressedClassPointers) { 973 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 974 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 975 } else { 976 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 977 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 978 } 979 980 // if inline cache check fails, then jump to runtime routine 981 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 982 assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment); 983 984 return uep_offset; 985 } 986 987 void MacroAssembler::emit_static_call_stub() { 988 // Static stub relocation also tags the Method* in the code-stream. 989 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 990 // This is recognized as unresolved by relocs/nativeinst/ic code. 991 jump(RuntimeAddress(pc())); 992 } 993 994 // Implementation of call_VM versions 995 996 void MacroAssembler::call_VM(Register oop_result, 997 address entry_point, 998 bool check_exceptions) { 999 Label C, E; 1000 call(C, relocInfo::none); 1001 jmp(E); 1002 1003 bind(C); 1004 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1005 ret(0); 1006 1007 bind(E); 1008 } 1009 1010 void MacroAssembler::call_VM(Register oop_result, 1011 address entry_point, 1012 Register arg_1, 1013 bool check_exceptions) { 1014 Label C, E; 1015 call(C, relocInfo::none); 1016 jmp(E); 1017 1018 bind(C); 1019 pass_arg1(this, arg_1); 1020 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1021 ret(0); 1022 1023 bind(E); 1024 } 1025 1026 void MacroAssembler::call_VM(Register oop_result, 1027 address entry_point, 1028 Register arg_1, 1029 Register arg_2, 1030 bool check_exceptions) { 1031 Label C, E; 1032 call(C, relocInfo::none); 1033 jmp(E); 1034 1035 bind(C); 1036 1037 assert_different_registers(arg_1, c_rarg2); 1038 1039 pass_arg2(this, arg_2); 1040 pass_arg1(this, arg_1); 1041 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1042 ret(0); 1043 1044 bind(E); 1045 } 1046 1047 void MacroAssembler::call_VM(Register oop_result, 1048 address entry_point, 1049 Register arg_1, 1050 Register arg_2, 1051 Register arg_3, 1052 bool check_exceptions) { 1053 Label C, E; 1054 call(C, relocInfo::none); 1055 jmp(E); 1056 1057 bind(C); 1058 1059 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1060 assert_different_registers(arg_2, c_rarg3); 1061 pass_arg3(this, arg_3); 1062 pass_arg2(this, arg_2); 1063 pass_arg1(this, arg_1); 1064 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1065 ret(0); 1066 1067 bind(E); 1068 } 1069 1070 void MacroAssembler::call_VM(Register oop_result, 1071 Register last_java_sp, 1072 address entry_point, 1073 int number_of_arguments, 1074 bool check_exceptions) { 1075 call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1076 } 1077 1078 void MacroAssembler::call_VM(Register oop_result, 1079 Register last_java_sp, 1080 address entry_point, 1081 Register arg_1, 1082 bool check_exceptions) { 1083 pass_arg1(this, arg_1); 1084 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1085 } 1086 1087 void MacroAssembler::call_VM(Register oop_result, 1088 Register last_java_sp, 1089 address entry_point, 1090 Register arg_1, 1091 Register arg_2, 1092 bool check_exceptions) { 1093 1094 assert_different_registers(arg_1, c_rarg2); 1095 pass_arg2(this, arg_2); 1096 pass_arg1(this, arg_1); 1097 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1098 } 1099 1100 void MacroAssembler::call_VM(Register oop_result, 1101 Register last_java_sp, 1102 address entry_point, 1103 Register arg_1, 1104 Register arg_2, 1105 Register arg_3, 1106 bool check_exceptions) { 1107 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1108 assert_different_registers(arg_2, c_rarg3); 1109 pass_arg3(this, arg_3); 1110 pass_arg2(this, arg_2); 1111 pass_arg1(this, arg_1); 1112 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1113 } 1114 1115 void MacroAssembler::super_call_VM(Register oop_result, 1116 Register last_java_sp, 1117 address entry_point, 1118 int number_of_arguments, 1119 bool check_exceptions) { 1120 MacroAssembler::call_VM_base(oop_result, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1121 } 1122 1123 void MacroAssembler::super_call_VM(Register oop_result, 1124 Register last_java_sp, 1125 address entry_point, 1126 Register arg_1, 1127 bool check_exceptions) { 1128 pass_arg1(this, arg_1); 1129 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1130 } 1131 1132 void MacroAssembler::super_call_VM(Register oop_result, 1133 Register last_java_sp, 1134 address entry_point, 1135 Register arg_1, 1136 Register arg_2, 1137 bool check_exceptions) { 1138 1139 assert_different_registers(arg_1, c_rarg2); 1140 pass_arg2(this, arg_2); 1141 pass_arg1(this, arg_1); 1142 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1143 } 1144 1145 void MacroAssembler::super_call_VM(Register oop_result, 1146 Register last_java_sp, 1147 address entry_point, 1148 Register arg_1, 1149 Register arg_2, 1150 Register arg_3, 1151 bool check_exceptions) { 1152 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1153 assert_different_registers(arg_2, c_rarg3); 1154 pass_arg3(this, arg_3); 1155 pass_arg2(this, arg_2); 1156 pass_arg1(this, arg_1); 1157 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1158 } 1159 1160 void MacroAssembler::call_VM_base(Register oop_result, 1161 Register last_java_sp, 1162 address entry_point, 1163 int number_of_arguments, 1164 bool check_exceptions) { 1165 Register java_thread = r15_thread; 1166 1167 // determine last_java_sp register 1168 if (!last_java_sp->is_valid()) { 1169 last_java_sp = rsp; 1170 } 1171 // debugging support 1172 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1173 #ifdef ASSERT 1174 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1175 // r12 is the heapbase. 1176 if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 1177 #endif // ASSERT 1178 1179 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1180 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1181 1182 // push java thread (becomes first argument of C function) 1183 1184 mov(c_rarg0, r15_thread); 1185 1186 // set last Java frame before call 1187 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1188 1189 // Only interpreter should have to set fp 1190 set_last_Java_frame(last_java_sp, rbp, nullptr, rscratch1); 1191 1192 // do the call, remove parameters 1193 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1194 1195 #ifdef ASSERT 1196 // Check that thread register is not clobbered. 1197 guarantee(java_thread != rax, "change this code"); 1198 push(rax); 1199 { Label L; 1200 get_thread_slow(rax); 1201 cmpptr(java_thread, rax); 1202 jcc(Assembler::equal, L); 1203 STOP("MacroAssembler::call_VM_base: java_thread not callee saved?"); 1204 bind(L); 1205 } 1206 pop(rax); 1207 #endif 1208 1209 // reset last Java frame 1210 // Only interpreter should have to clear fp 1211 reset_last_Java_frame(true); 1212 1213 // C++ interp handles this in the interpreter 1214 check_and_handle_popframe(); 1215 check_and_handle_earlyret(); 1216 1217 if (check_exceptions) { 1218 // check for pending exceptions (java_thread is set upon return) 1219 cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD); 1220 // This used to conditionally jump to forward_exception however it is 1221 // possible if we relocate that the branch will not reach. So we must jump 1222 // around so we can always reach 1223 1224 Label ok; 1225 jcc(Assembler::equal, ok); 1226 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1227 bind(ok); 1228 } 1229 1230 // get oop result if there is one and reset the value in the thread 1231 if (oop_result->is_valid()) { 1232 get_vm_result_oop(oop_result); 1233 } 1234 } 1235 1236 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1237 // Calculate the value for last_Java_sp somewhat subtle. 1238 // call_VM does an intermediate call which places a return address on 1239 // the stack just under the stack pointer as the user finished with it. 1240 // This allows use to retrieve last_Java_pc from last_Java_sp[-1]. 1241 1242 // We've pushed one address, correct last_Java_sp 1243 lea(rax, Address(rsp, wordSize)); 1244 1245 call_VM_base(oop_result, rax, entry_point, number_of_arguments, check_exceptions); 1246 } 1247 1248 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1249 void MacroAssembler::call_VM_leaf0(address entry_point) { 1250 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1251 } 1252 1253 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1254 call_VM_leaf_base(entry_point, number_of_arguments); 1255 } 1256 1257 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1258 pass_arg0(this, arg_0); 1259 call_VM_leaf(entry_point, 1); 1260 } 1261 1262 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1263 1264 assert_different_registers(arg_0, c_rarg1); 1265 pass_arg1(this, arg_1); 1266 pass_arg0(this, arg_0); 1267 call_VM_leaf(entry_point, 2); 1268 } 1269 1270 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1271 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1272 assert_different_registers(arg_1, c_rarg2); 1273 pass_arg2(this, arg_2); 1274 pass_arg1(this, arg_1); 1275 pass_arg0(this, arg_0); 1276 call_VM_leaf(entry_point, 3); 1277 } 1278 1279 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1280 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1281 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1282 assert_different_registers(arg_2, c_rarg3); 1283 pass_arg3(this, arg_3); 1284 pass_arg2(this, arg_2); 1285 pass_arg1(this, arg_1); 1286 pass_arg0(this, arg_0); 1287 call_VM_leaf(entry_point, 3); 1288 } 1289 1290 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1291 pass_arg0(this, arg_0); 1292 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1293 } 1294 1295 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1296 assert_different_registers(arg_0, c_rarg1); 1297 pass_arg1(this, arg_1); 1298 pass_arg0(this, arg_0); 1299 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1300 } 1301 1302 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1303 assert_different_registers(arg_0, c_rarg1, c_rarg2); 1304 assert_different_registers(arg_1, c_rarg2); 1305 pass_arg2(this, arg_2); 1306 pass_arg1(this, arg_1); 1307 pass_arg0(this, arg_0); 1308 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1309 } 1310 1311 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1312 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 1313 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1314 assert_different_registers(arg_2, c_rarg3); 1315 pass_arg3(this, arg_3); 1316 pass_arg2(this, arg_2); 1317 pass_arg1(this, arg_1); 1318 pass_arg0(this, arg_0); 1319 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1320 } 1321 1322 void MacroAssembler::get_vm_result_oop(Register oop_result) { 1323 movptr(oop_result, Address(r15_thread, JavaThread::vm_result_oop_offset())); 1324 movptr(Address(r15_thread, JavaThread::vm_result_oop_offset()), NULL_WORD); 1325 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1326 } 1327 1328 void MacroAssembler::get_vm_result_metadata(Register metadata_result) { 1329 movptr(metadata_result, Address(r15_thread, JavaThread::vm_result_metadata_offset())); 1330 movptr(Address(r15_thread, JavaThread::vm_result_metadata_offset()), NULL_WORD); 1331 } 1332 1333 void MacroAssembler::check_and_handle_earlyret() { 1334 } 1335 1336 void MacroAssembler::check_and_handle_popframe() { 1337 } 1338 1339 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1340 assert(rscratch != noreg || always_reachable(src1), "missing"); 1341 1342 if (reachable(src1)) { 1343 cmpl(as_Address(src1), imm); 1344 } else { 1345 lea(rscratch, src1); 1346 cmpl(Address(rscratch, 0), imm); 1347 } 1348 } 1349 1350 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1351 assert(!src2.is_lval(), "use cmpptr"); 1352 assert(rscratch != noreg || always_reachable(src2), "missing"); 1353 1354 if (reachable(src2)) { 1355 cmpl(src1, as_Address(src2)); 1356 } else { 1357 lea(rscratch, src2); 1358 cmpl(src1, Address(rscratch, 0)); 1359 } 1360 } 1361 1362 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1363 Assembler::cmpl(src1, imm); 1364 } 1365 1366 void MacroAssembler::cmp32(Register src1, Address src2) { 1367 Assembler::cmpl(src1, src2); 1368 } 1369 1370 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1371 ucomisd(opr1, opr2); 1372 1373 Label L; 1374 if (unordered_is_less) { 1375 movl(dst, -1); 1376 jcc(Assembler::parity, L); 1377 jcc(Assembler::below , L); 1378 movl(dst, 0); 1379 jcc(Assembler::equal , L); 1380 increment(dst); 1381 } else { // unordered is greater 1382 movl(dst, 1); 1383 jcc(Assembler::parity, L); 1384 jcc(Assembler::above , L); 1385 movl(dst, 0); 1386 jcc(Assembler::equal , L); 1387 decrementl(dst); 1388 } 1389 bind(L); 1390 } 1391 1392 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1393 ucomiss(opr1, opr2); 1394 1395 Label L; 1396 if (unordered_is_less) { 1397 movl(dst, -1); 1398 jcc(Assembler::parity, L); 1399 jcc(Assembler::below , L); 1400 movl(dst, 0); 1401 jcc(Assembler::equal , L); 1402 increment(dst); 1403 } else { // unordered is greater 1404 movl(dst, 1); 1405 jcc(Assembler::parity, L); 1406 jcc(Assembler::above , L); 1407 movl(dst, 0); 1408 jcc(Assembler::equal , L); 1409 decrementl(dst); 1410 } 1411 bind(L); 1412 } 1413 1414 1415 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1416 assert(rscratch != noreg || always_reachable(src1), "missing"); 1417 1418 if (reachable(src1)) { 1419 cmpb(as_Address(src1), imm); 1420 } else { 1421 lea(rscratch, src1); 1422 cmpb(Address(rscratch, 0), imm); 1423 } 1424 } 1425 1426 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1427 assert(rscratch != noreg || always_reachable(src2), "missing"); 1428 1429 if (src2.is_lval()) { 1430 movptr(rscratch, src2); 1431 Assembler::cmpq(src1, rscratch); 1432 } else if (reachable(src2)) { 1433 cmpq(src1, as_Address(src2)); 1434 } else { 1435 lea(rscratch, src2); 1436 Assembler::cmpq(src1, Address(rscratch, 0)); 1437 } 1438 } 1439 1440 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1441 assert(src2.is_lval(), "not a mem-mem compare"); 1442 // moves src2's literal address 1443 movptr(rscratch, src2); 1444 Assembler::cmpq(src1, rscratch); 1445 } 1446 1447 void MacroAssembler::cmpoop(Register src1, Register src2) { 1448 cmpptr(src1, src2); 1449 } 1450 1451 void MacroAssembler::cmpoop(Register src1, Address src2) { 1452 cmpptr(src1, src2); 1453 } 1454 1455 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1456 movoop(rscratch, src2); 1457 cmpptr(src1, rscratch); 1458 } 1459 1460 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1461 assert(rscratch != noreg || always_reachable(adr), "missing"); 1462 1463 if (reachable(adr)) { 1464 lock(); 1465 cmpxchgptr(reg, as_Address(adr)); 1466 } else { 1467 lea(rscratch, adr); 1468 lock(); 1469 cmpxchgptr(reg, Address(rscratch, 0)); 1470 } 1471 } 1472 1473 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1474 cmpxchgq(reg, adr); 1475 } 1476 1477 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1478 assert(rscratch != noreg || always_reachable(src), "missing"); 1479 1480 if (reachable(src)) { 1481 Assembler::comisd(dst, as_Address(src)); 1482 } else { 1483 lea(rscratch, src); 1484 Assembler::comisd(dst, Address(rscratch, 0)); 1485 } 1486 } 1487 1488 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1489 assert(rscratch != noreg || always_reachable(src), "missing"); 1490 1491 if (reachable(src)) { 1492 Assembler::comiss(dst, as_Address(src)); 1493 } else { 1494 lea(rscratch, src); 1495 Assembler::comiss(dst, Address(rscratch, 0)); 1496 } 1497 } 1498 1499 1500 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1501 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1502 1503 Condition negated_cond = negate_condition(cond); 1504 Label L; 1505 jcc(negated_cond, L); 1506 pushf(); // Preserve flags 1507 atomic_incl(counter_addr, rscratch); 1508 popf(); 1509 bind(L); 1510 } 1511 1512 int MacroAssembler::corrected_idivl(Register reg) { 1513 // Full implementation of Java idiv and irem; checks for 1514 // special case as described in JVM spec., p.243 & p.271. 1515 // The function returns the (pc) offset of the idivl 1516 // instruction - may be needed for implicit exceptions. 1517 // 1518 // normal case special case 1519 // 1520 // input : rax,: dividend min_int 1521 // reg: divisor (may not be rax,/rdx) -1 1522 // 1523 // output: rax,: quotient (= rax, idiv reg) min_int 1524 // rdx: remainder (= rax, irem reg) 0 1525 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1526 const int min_int = 0x80000000; 1527 Label normal_case, special_case; 1528 1529 // check for special case 1530 cmpl(rax, min_int); 1531 jcc(Assembler::notEqual, normal_case); 1532 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1533 cmpl(reg, -1); 1534 jcc(Assembler::equal, special_case); 1535 1536 // handle normal case 1537 bind(normal_case); 1538 cdql(); 1539 int idivl_offset = offset(); 1540 idivl(reg); 1541 1542 // normal and special case exit 1543 bind(special_case); 1544 1545 return idivl_offset; 1546 } 1547 1548 1549 1550 void MacroAssembler::decrementl(Register reg, int value) { 1551 if (value == min_jint) {subl(reg, value) ; return; } 1552 if (value < 0) { incrementl(reg, -value); return; } 1553 if (value == 0) { ; return; } 1554 if (value == 1 && UseIncDec) { decl(reg) ; return; } 1555 /* else */ { subl(reg, value) ; return; } 1556 } 1557 1558 void MacroAssembler::decrementl(Address dst, int value) { 1559 if (value == min_jint) {subl(dst, value) ; return; } 1560 if (value < 0) { incrementl(dst, -value); return; } 1561 if (value == 0) { ; return; } 1562 if (value == 1 && UseIncDec) { decl(dst) ; return; } 1563 /* else */ { subl(dst, value) ; return; } 1564 } 1565 1566 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 1567 assert(shift_value > 0, "illegal shift value"); 1568 Label _is_positive; 1569 testl (reg, reg); 1570 jcc (Assembler::positive, _is_positive); 1571 int offset = (1 << shift_value) - 1 ; 1572 1573 if (offset == 1) { 1574 incrementl(reg); 1575 } else { 1576 addl(reg, offset); 1577 } 1578 1579 bind (_is_positive); 1580 sarl(reg, shift_value); 1581 } 1582 1583 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1584 assert(rscratch != noreg || always_reachable(src), "missing"); 1585 1586 if (reachable(src)) { 1587 Assembler::divsd(dst, as_Address(src)); 1588 } else { 1589 lea(rscratch, src); 1590 Assembler::divsd(dst, Address(rscratch, 0)); 1591 } 1592 } 1593 1594 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1595 assert(rscratch != noreg || always_reachable(src), "missing"); 1596 1597 if (reachable(src)) { 1598 Assembler::divss(dst, as_Address(src)); 1599 } else { 1600 lea(rscratch, src); 1601 Assembler::divss(dst, Address(rscratch, 0)); 1602 } 1603 } 1604 1605 void MacroAssembler::enter() { 1606 push(rbp); 1607 mov(rbp, rsp); 1608 } 1609 1610 void MacroAssembler::post_call_nop() { 1611 if (!Continuations::enabled()) { 1612 return; 1613 } 1614 InstructionMark im(this); 1615 relocate(post_call_nop_Relocation::spec()); 1616 InlineSkippedInstructionsCounter skipCounter(this); 1617 emit_int8((uint8_t)0x0f); 1618 emit_int8((uint8_t)0x1f); 1619 emit_int8((uint8_t)0x84); 1620 emit_int8((uint8_t)0x00); 1621 emit_int32(0x00); 1622 } 1623 1624 // A 5 byte nop that is safe for patching (see patch_verified_entry) 1625 void MacroAssembler::fat_nop() { 1626 if (UseAddressNop) { 1627 addr_nop_5(); 1628 } else { 1629 emit_int8((uint8_t)0x26); // es: 1630 emit_int8((uint8_t)0x2e); // cs: 1631 emit_int8((uint8_t)0x64); // fs: 1632 emit_int8((uint8_t)0x65); // gs: 1633 emit_int8((uint8_t)0x90); 1634 } 1635 } 1636 1637 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1638 assert(rscratch != noreg || always_reachable(src), "missing"); 1639 if (reachable(src)) { 1640 Assembler::mulpd(dst, as_Address(src)); 1641 } else { 1642 lea(rscratch, src); 1643 Assembler::mulpd(dst, Address(rscratch, 0)); 1644 } 1645 } 1646 1647 // dst = c = a * b + c 1648 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1649 Assembler::vfmadd231sd(c, a, b); 1650 if (dst != c) { 1651 movdbl(dst, c); 1652 } 1653 } 1654 1655 // dst = c = a * b + c 1656 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 1657 Assembler::vfmadd231ss(c, a, b); 1658 if (dst != c) { 1659 movflt(dst, c); 1660 } 1661 } 1662 1663 // dst = c = a * b + c 1664 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1665 Assembler::vfmadd231pd(c, a, b, vector_len); 1666 if (dst != c) { 1667 vmovdqu(dst, c); 1668 } 1669 } 1670 1671 // dst = c = a * b + c 1672 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 1673 Assembler::vfmadd231ps(c, a, b, vector_len); 1674 if (dst != c) { 1675 vmovdqu(dst, c); 1676 } 1677 } 1678 1679 // dst = c = a * b + c 1680 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1681 Assembler::vfmadd231pd(c, a, b, vector_len); 1682 if (dst != c) { 1683 vmovdqu(dst, c); 1684 } 1685 } 1686 1687 // dst = c = a * b + c 1688 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 1689 Assembler::vfmadd231ps(c, a, b, vector_len); 1690 if (dst != c) { 1691 vmovdqu(dst, c); 1692 } 1693 } 1694 1695 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 1696 assert(rscratch != noreg || always_reachable(dst), "missing"); 1697 1698 if (reachable(dst)) { 1699 incrementl(as_Address(dst)); 1700 } else { 1701 lea(rscratch, dst); 1702 incrementl(Address(rscratch, 0)); 1703 } 1704 } 1705 1706 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 1707 incrementl(as_Address(dst, rscratch)); 1708 } 1709 1710 void MacroAssembler::incrementl(Register reg, int value) { 1711 if (value == min_jint) {addl(reg, value) ; return; } 1712 if (value < 0) { decrementl(reg, -value); return; } 1713 if (value == 0) { ; return; } 1714 if (value == 1 && UseIncDec) { incl(reg) ; return; } 1715 /* else */ { addl(reg, value) ; return; } 1716 } 1717 1718 void MacroAssembler::incrementl(Address dst, int value) { 1719 if (value == min_jint) {addl(dst, value) ; return; } 1720 if (value < 0) { decrementl(dst, -value); return; } 1721 if (value == 0) { ; return; } 1722 if (value == 1 && UseIncDec) { incl(dst) ; return; } 1723 /* else */ { addl(dst, value) ; return; } 1724 } 1725 1726 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 1727 assert(rscratch != noreg || always_reachable(dst), "missing"); 1728 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 1729 if (reachable(dst)) { 1730 jmp_literal(dst.target(), dst.rspec()); 1731 } else { 1732 lea(rscratch, dst); 1733 jmp(rscratch); 1734 } 1735 } 1736 1737 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 1738 assert(rscratch != noreg || always_reachable(dst), "missing"); 1739 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 1740 if (reachable(dst)) { 1741 InstructionMark im(this); 1742 relocate(dst.reloc()); 1743 const int short_size = 2; 1744 const int long_size = 6; 1745 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 1746 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 1747 // 0111 tttn #8-bit disp 1748 emit_int8(0x70 | cc); 1749 emit_int8((offs - short_size) & 0xFF); 1750 } else { 1751 // 0000 1111 1000 tttn #32-bit disp 1752 emit_int8(0x0F); 1753 emit_int8((unsigned char)(0x80 | cc)); 1754 emit_int32(offs - long_size); 1755 } 1756 } else { 1757 #ifdef ASSERT 1758 warning("reversing conditional branch"); 1759 #endif /* ASSERT */ 1760 Label skip; 1761 jccb(reverse[cc], skip); 1762 lea(rscratch, dst); 1763 Assembler::jmp(rscratch); 1764 bind(skip); 1765 } 1766 } 1767 1768 void MacroAssembler::cmp32_mxcsr_std(Address mxcsr_save, Register tmp, Register rscratch) { 1769 ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std()); 1770 assert(rscratch != noreg || always_reachable(mxcsr_std), "missing"); 1771 1772 stmxcsr(mxcsr_save); 1773 movl(tmp, mxcsr_save); 1774 if (EnableX86ECoreOpts) { 1775 // The mxcsr_std has status bits set for performance on ECore 1776 orl(tmp, 0x003f); 1777 } else { 1778 // Mask out status bits (only check control and mask bits) 1779 andl(tmp, 0xFFC0); 1780 } 1781 cmp32(tmp, mxcsr_std, rscratch); 1782 } 1783 1784 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 1785 assert(rscratch != noreg || always_reachable(src), "missing"); 1786 1787 if (reachable(src)) { 1788 Assembler::ldmxcsr(as_Address(src)); 1789 } else { 1790 lea(rscratch, src); 1791 Assembler::ldmxcsr(Address(rscratch, 0)); 1792 } 1793 } 1794 1795 int MacroAssembler::load_signed_byte(Register dst, Address src) { 1796 int off = offset(); 1797 movsbl(dst, src); // movsxb 1798 return off; 1799 } 1800 1801 // Note: load_signed_short used to be called load_signed_word. 1802 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 1803 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 1804 // The term "word" in HotSpot means a 32- or 64-bit machine word. 1805 int MacroAssembler::load_signed_short(Register dst, Address src) { 1806 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 1807 // version but this is what 64bit has always done. This seems to imply 1808 // that users are only using 32bits worth. 1809 int off = offset(); 1810 movswl(dst, src); // movsxw 1811 return off; 1812 } 1813 1814 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 1815 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1816 // and "3.9 Partial Register Penalties", p. 22). 1817 int off = offset(); 1818 movzbl(dst, src); // movzxb 1819 return off; 1820 } 1821 1822 // Note: load_unsigned_short used to be called load_unsigned_word. 1823 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 1824 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 1825 // and "3.9 Partial Register Penalties", p. 22). 1826 int off = offset(); 1827 movzwl(dst, src); // movzxw 1828 return off; 1829 } 1830 1831 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 1832 switch (size_in_bytes) { 1833 case 8: movq(dst, src); break; 1834 case 4: movl(dst, src); break; 1835 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 1836 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 1837 default: ShouldNotReachHere(); 1838 } 1839 } 1840 1841 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 1842 switch (size_in_bytes) { 1843 case 8: movq(dst, src); break; 1844 case 4: movl(dst, src); break; 1845 case 2: movw(dst, src); break; 1846 case 1: movb(dst, src); break; 1847 default: ShouldNotReachHere(); 1848 } 1849 } 1850 1851 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 1852 assert(rscratch != noreg || always_reachable(dst), "missing"); 1853 1854 if (reachable(dst)) { 1855 movl(as_Address(dst), src); 1856 } else { 1857 lea(rscratch, dst); 1858 movl(Address(rscratch, 0), src); 1859 } 1860 } 1861 1862 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 1863 if (reachable(src)) { 1864 movl(dst, as_Address(src)); 1865 } else { 1866 lea(dst, src); 1867 movl(dst, Address(dst, 0)); 1868 } 1869 } 1870 1871 // C++ bool manipulation 1872 1873 void MacroAssembler::movbool(Register dst, Address src) { 1874 if(sizeof(bool) == 1) 1875 movb(dst, src); 1876 else if(sizeof(bool) == 2) 1877 movw(dst, src); 1878 else if(sizeof(bool) == 4) 1879 movl(dst, src); 1880 else 1881 // unsupported 1882 ShouldNotReachHere(); 1883 } 1884 1885 void MacroAssembler::movbool(Address dst, bool boolconst) { 1886 if(sizeof(bool) == 1) 1887 movb(dst, (int) boolconst); 1888 else if(sizeof(bool) == 2) 1889 movw(dst, (int) boolconst); 1890 else if(sizeof(bool) == 4) 1891 movl(dst, (int) boolconst); 1892 else 1893 // unsupported 1894 ShouldNotReachHere(); 1895 } 1896 1897 void MacroAssembler::movbool(Address dst, Register src) { 1898 if(sizeof(bool) == 1) 1899 movb(dst, src); 1900 else if(sizeof(bool) == 2) 1901 movw(dst, src); 1902 else if(sizeof(bool) == 4) 1903 movl(dst, src); 1904 else 1905 // unsupported 1906 ShouldNotReachHere(); 1907 } 1908 1909 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1910 assert(rscratch != noreg || always_reachable(src), "missing"); 1911 1912 if (reachable(src)) { 1913 movdl(dst, as_Address(src)); 1914 } else { 1915 lea(rscratch, src); 1916 movdl(dst, Address(rscratch, 0)); 1917 } 1918 } 1919 1920 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 1921 assert(rscratch != noreg || always_reachable(src), "missing"); 1922 1923 if (reachable(src)) { 1924 movq(dst, as_Address(src)); 1925 } else { 1926 lea(rscratch, src); 1927 movq(dst, Address(rscratch, 0)); 1928 } 1929 } 1930 1931 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 1932 assert(rscratch != noreg || always_reachable(src), "missing"); 1933 1934 if (reachable(src)) { 1935 if (UseXmmLoadAndClearUpper) { 1936 movsd (dst, as_Address(src)); 1937 } else { 1938 movlpd(dst, as_Address(src)); 1939 } 1940 } else { 1941 lea(rscratch, src); 1942 if (UseXmmLoadAndClearUpper) { 1943 movsd (dst, Address(rscratch, 0)); 1944 } else { 1945 movlpd(dst, Address(rscratch, 0)); 1946 } 1947 } 1948 } 1949 1950 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 1951 assert(rscratch != noreg || always_reachable(src), "missing"); 1952 1953 if (reachable(src)) { 1954 movss(dst, as_Address(src)); 1955 } else { 1956 lea(rscratch, src); 1957 movss(dst, Address(rscratch, 0)); 1958 } 1959 } 1960 1961 void MacroAssembler::movptr(Register dst, Register src) { 1962 movq(dst, src); 1963 } 1964 1965 void MacroAssembler::movptr(Register dst, Address src) { 1966 movq(dst, src); 1967 } 1968 1969 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 1970 void MacroAssembler::movptr(Register dst, intptr_t src) { 1971 if (is_uimm32(src)) { 1972 movl(dst, checked_cast<uint32_t>(src)); 1973 } else if (is_simm32(src)) { 1974 movq(dst, checked_cast<int32_t>(src)); 1975 } else { 1976 mov64(dst, src); 1977 } 1978 } 1979 1980 void MacroAssembler::movptr(Address dst, Register src) { 1981 movq(dst, src); 1982 } 1983 1984 void MacroAssembler::movptr(Address dst, int32_t src) { 1985 movslq(dst, src); 1986 } 1987 1988 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 1989 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1990 Assembler::movdqu(dst, src); 1991 } 1992 1993 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 1994 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 1995 Assembler::movdqu(dst, src); 1996 } 1997 1998 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 1999 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2000 Assembler::movdqu(dst, src); 2001 } 2002 2003 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2004 assert(rscratch != noreg || always_reachable(src), "missing"); 2005 2006 if (reachable(src)) { 2007 movdqu(dst, as_Address(src)); 2008 } else { 2009 lea(rscratch, src); 2010 movdqu(dst, Address(rscratch, 0)); 2011 } 2012 } 2013 2014 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2015 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2016 Assembler::vmovdqu(dst, src); 2017 } 2018 2019 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2020 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2021 Assembler::vmovdqu(dst, src); 2022 } 2023 2024 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2025 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2026 Assembler::vmovdqu(dst, src); 2027 } 2028 2029 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2030 assert(rscratch != noreg || always_reachable(src), "missing"); 2031 2032 if (reachable(src)) { 2033 vmovdqu(dst, as_Address(src)); 2034 } 2035 else { 2036 lea(rscratch, src); 2037 vmovdqu(dst, Address(rscratch, 0)); 2038 } 2039 } 2040 2041 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2042 assert(rscratch != noreg || always_reachable(src), "missing"); 2043 2044 if (vector_len == AVX_512bit) { 2045 evmovdquq(dst, src, AVX_512bit, rscratch); 2046 } else if (vector_len == AVX_256bit) { 2047 vmovdqu(dst, src, rscratch); 2048 } else { 2049 movdqu(dst, src, rscratch); 2050 } 2051 } 2052 2053 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src, int vector_len) { 2054 if (vector_len == AVX_512bit) { 2055 evmovdquq(dst, src, AVX_512bit); 2056 } else if (vector_len == AVX_256bit) { 2057 vmovdqu(dst, src); 2058 } else { 2059 movdqu(dst, src); 2060 } 2061 } 2062 2063 void MacroAssembler::vmovdqu(Address dst, XMMRegister src, int vector_len) { 2064 if (vector_len == AVX_512bit) { 2065 evmovdquq(dst, src, AVX_512bit); 2066 } else if (vector_len == AVX_256bit) { 2067 vmovdqu(dst, src); 2068 } else { 2069 movdqu(dst, src); 2070 } 2071 } 2072 2073 void MacroAssembler::vmovdqu(XMMRegister dst, Address src, int vector_len) { 2074 if (vector_len == AVX_512bit) { 2075 evmovdquq(dst, src, AVX_512bit); 2076 } else if (vector_len == AVX_256bit) { 2077 vmovdqu(dst, src); 2078 } else { 2079 movdqu(dst, src); 2080 } 2081 } 2082 2083 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2084 assert(rscratch != noreg || always_reachable(src), "missing"); 2085 2086 if (reachable(src)) { 2087 vmovdqa(dst, as_Address(src)); 2088 } 2089 else { 2090 lea(rscratch, src); 2091 vmovdqa(dst, Address(rscratch, 0)); 2092 } 2093 } 2094 2095 void MacroAssembler::vmovdqa(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2096 assert(rscratch != noreg || always_reachable(src), "missing"); 2097 2098 if (vector_len == AVX_512bit) { 2099 evmovdqaq(dst, src, AVX_512bit, rscratch); 2100 } else if (vector_len == AVX_256bit) { 2101 vmovdqa(dst, src, rscratch); 2102 } else { 2103 movdqa(dst, src, rscratch); 2104 } 2105 } 2106 2107 void MacroAssembler::kmov(KRegister dst, Address src) { 2108 if (VM_Version::supports_avx512bw()) { 2109 kmovql(dst, src); 2110 } else { 2111 assert(VM_Version::supports_evex(), ""); 2112 kmovwl(dst, src); 2113 } 2114 } 2115 2116 void MacroAssembler::kmov(Address dst, KRegister src) { 2117 if (VM_Version::supports_avx512bw()) { 2118 kmovql(dst, src); 2119 } else { 2120 assert(VM_Version::supports_evex(), ""); 2121 kmovwl(dst, src); 2122 } 2123 } 2124 2125 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2126 if (VM_Version::supports_avx512bw()) { 2127 kmovql(dst, src); 2128 } else { 2129 assert(VM_Version::supports_evex(), ""); 2130 kmovwl(dst, src); 2131 } 2132 } 2133 2134 void MacroAssembler::kmov(Register dst, KRegister src) { 2135 if (VM_Version::supports_avx512bw()) { 2136 kmovql(dst, src); 2137 } else { 2138 assert(VM_Version::supports_evex(), ""); 2139 kmovwl(dst, src); 2140 } 2141 } 2142 2143 void MacroAssembler::kmov(KRegister dst, Register src) { 2144 if (VM_Version::supports_avx512bw()) { 2145 kmovql(dst, src); 2146 } else { 2147 assert(VM_Version::supports_evex(), ""); 2148 kmovwl(dst, src); 2149 } 2150 } 2151 2152 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2153 assert(rscratch != noreg || always_reachable(src), "missing"); 2154 2155 if (reachable(src)) { 2156 kmovql(dst, as_Address(src)); 2157 } else { 2158 lea(rscratch, src); 2159 kmovql(dst, Address(rscratch, 0)); 2160 } 2161 } 2162 2163 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2164 assert(rscratch != noreg || always_reachable(src), "missing"); 2165 2166 if (reachable(src)) { 2167 kmovwl(dst, as_Address(src)); 2168 } else { 2169 lea(rscratch, src); 2170 kmovwl(dst, Address(rscratch, 0)); 2171 } 2172 } 2173 2174 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2175 int vector_len, Register rscratch) { 2176 assert(rscratch != noreg || always_reachable(src), "missing"); 2177 2178 if (reachable(src)) { 2179 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2180 } else { 2181 lea(rscratch, src); 2182 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2183 } 2184 } 2185 2186 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2187 int vector_len, Register rscratch) { 2188 assert(rscratch != noreg || always_reachable(src), "missing"); 2189 2190 if (reachable(src)) { 2191 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2192 } else { 2193 lea(rscratch, src); 2194 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2195 } 2196 } 2197 2198 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2199 assert(rscratch != noreg || always_reachable(src), "missing"); 2200 2201 if (reachable(src)) { 2202 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2203 } else { 2204 lea(rscratch, src); 2205 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2206 } 2207 } 2208 2209 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2210 assert(rscratch != noreg || always_reachable(src), "missing"); 2211 2212 if (reachable(src)) { 2213 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2214 } else { 2215 lea(rscratch, src); 2216 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2217 } 2218 } 2219 2220 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2221 assert(rscratch != noreg || always_reachable(src), "missing"); 2222 2223 if (reachable(src)) { 2224 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2225 } else { 2226 lea(rscratch, src); 2227 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2228 } 2229 } 2230 2231 void MacroAssembler::evmovdqaq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2232 assert(rscratch != noreg || always_reachable(src), "missing"); 2233 2234 if (reachable(src)) { 2235 Assembler::evmovdqaq(dst, mask, as_Address(src), merge, vector_len); 2236 } else { 2237 lea(rscratch, src); 2238 Assembler::evmovdqaq(dst, mask, Address(rscratch, 0), merge, vector_len); 2239 } 2240 } 2241 2242 void MacroAssembler::evmovdqaq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2243 assert(rscratch != noreg || always_reachable(src), "missing"); 2244 2245 if (reachable(src)) { 2246 Assembler::evmovdqaq(dst, as_Address(src), vector_len); 2247 } else { 2248 lea(rscratch, src); 2249 Assembler::evmovdqaq(dst, Address(rscratch, 0), vector_len); 2250 } 2251 } 2252 2253 void MacroAssembler::movapd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2254 assert(rscratch != noreg || always_reachable(src), "missing"); 2255 2256 if (reachable(src)) { 2257 Assembler::movapd(dst, as_Address(src)); 2258 } else { 2259 lea(rscratch, src); 2260 Assembler::movapd(dst, Address(rscratch, 0)); 2261 } 2262 } 2263 2264 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2265 assert(rscratch != noreg || always_reachable(src), "missing"); 2266 2267 if (reachable(src)) { 2268 Assembler::movdqa(dst, as_Address(src)); 2269 } else { 2270 lea(rscratch, src); 2271 Assembler::movdqa(dst, Address(rscratch, 0)); 2272 } 2273 } 2274 2275 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2276 assert(rscratch != noreg || always_reachable(src), "missing"); 2277 2278 if (reachable(src)) { 2279 Assembler::movsd(dst, as_Address(src)); 2280 } else { 2281 lea(rscratch, src); 2282 Assembler::movsd(dst, Address(rscratch, 0)); 2283 } 2284 } 2285 2286 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2287 assert(rscratch != noreg || always_reachable(src), "missing"); 2288 2289 if (reachable(src)) { 2290 Assembler::movss(dst, as_Address(src)); 2291 } else { 2292 lea(rscratch, src); 2293 Assembler::movss(dst, Address(rscratch, 0)); 2294 } 2295 } 2296 2297 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2298 assert(rscratch != noreg || always_reachable(src), "missing"); 2299 2300 if (reachable(src)) { 2301 Assembler::movddup(dst, as_Address(src)); 2302 } else { 2303 lea(rscratch, src); 2304 Assembler::movddup(dst, Address(rscratch, 0)); 2305 } 2306 } 2307 2308 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2309 assert(rscratch != noreg || always_reachable(src), "missing"); 2310 2311 if (reachable(src)) { 2312 Assembler::vmovddup(dst, as_Address(src), vector_len); 2313 } else { 2314 lea(rscratch, src); 2315 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2316 } 2317 } 2318 2319 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2320 assert(rscratch != noreg || always_reachable(src), "missing"); 2321 2322 if (reachable(src)) { 2323 Assembler::mulsd(dst, as_Address(src)); 2324 } else { 2325 lea(rscratch, src); 2326 Assembler::mulsd(dst, Address(rscratch, 0)); 2327 } 2328 } 2329 2330 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2331 assert(rscratch != noreg || always_reachable(src), "missing"); 2332 2333 if (reachable(src)) { 2334 Assembler::mulss(dst, as_Address(src)); 2335 } else { 2336 lea(rscratch, src); 2337 Assembler::mulss(dst, Address(rscratch, 0)); 2338 } 2339 } 2340 2341 void MacroAssembler::null_check(Register reg, int offset) { 2342 if (needs_explicit_null_check(offset)) { 2343 // provoke OS null exception if reg is null by 2344 // accessing M[reg] w/o changing any (non-CC) registers 2345 // NOTE: cmpl is plenty here to provoke a segv 2346 cmpptr(rax, Address(reg, 0)); 2347 // Note: should probably use testl(rax, Address(reg, 0)); 2348 // may be shorter code (however, this version of 2349 // testl needs to be implemented first) 2350 } else { 2351 // nothing to do, (later) access of M[reg + offset] 2352 // will provoke OS null exception if reg is null 2353 } 2354 } 2355 2356 void MacroAssembler::os_breakpoint() { 2357 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2358 // (e.g., MSVC can't call ps() otherwise) 2359 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2360 } 2361 2362 void MacroAssembler::unimplemented(const char* what) { 2363 const char* buf = nullptr; 2364 { 2365 ResourceMark rm; 2366 stringStream ss; 2367 ss.print("unimplemented: %s", what); 2368 buf = code_string(ss.as_string()); 2369 } 2370 stop(buf); 2371 } 2372 2373 #define XSTATE_BV 0x200 2374 2375 void MacroAssembler::pop_CPU_state() { 2376 pop_FPU_state(); 2377 pop_IU_state(); 2378 } 2379 2380 void MacroAssembler::pop_FPU_state() { 2381 fxrstor(Address(rsp, 0)); 2382 addptr(rsp, FPUStateSizeInWords * wordSize); 2383 } 2384 2385 void MacroAssembler::pop_IU_state() { 2386 popa(); 2387 addq(rsp, 8); 2388 popf(); 2389 } 2390 2391 // Save Integer and Float state 2392 // Warning: Stack must be 16 byte aligned (64bit) 2393 void MacroAssembler::push_CPU_state() { 2394 push_IU_state(); 2395 push_FPU_state(); 2396 } 2397 2398 void MacroAssembler::push_FPU_state() { 2399 subptr(rsp, FPUStateSizeInWords * wordSize); 2400 fxsave(Address(rsp, 0)); 2401 } 2402 2403 void MacroAssembler::push_IU_state() { 2404 // Push flags first because pusha kills them 2405 pushf(); 2406 // Make sure rsp stays 16-byte aligned 2407 subq(rsp, 8); 2408 pusha(); 2409 } 2410 2411 void MacroAssembler::push_cont_fastpath() { 2412 if (!Continuations::enabled()) return; 2413 2414 Label L_done; 2415 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2416 jccb(Assembler::belowEqual, L_done); 2417 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), rsp); 2418 bind(L_done); 2419 } 2420 2421 void MacroAssembler::pop_cont_fastpath() { 2422 if (!Continuations::enabled()) return; 2423 2424 Label L_done; 2425 cmpptr(rsp, Address(r15_thread, JavaThread::cont_fastpath_offset())); 2426 jccb(Assembler::below, L_done); 2427 movptr(Address(r15_thread, JavaThread::cont_fastpath_offset()), 0); 2428 bind(L_done); 2429 } 2430 2431 void MacroAssembler::inc_held_monitor_count() { 2432 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2433 } 2434 2435 void MacroAssembler::dec_held_monitor_count() { 2436 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2437 } 2438 2439 #ifdef ASSERT 2440 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 2441 Label no_cont; 2442 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 2443 testl(cont, cont); 2444 jcc(Assembler::zero, no_cont); 2445 stop(name); 2446 bind(no_cont); 2447 } 2448 #endif 2449 2450 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { // determine java_thread register 2451 // we must set sp to zero to clear frame 2452 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 2453 // must clear fp, so that compiled frames are not confused; it is 2454 // possible that we need it only for debugging 2455 if (clear_fp) { 2456 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 2457 } 2458 // Always clear the pc because it could have been set by make_walkable() 2459 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 2460 vzeroupper(); 2461 } 2462 2463 void MacroAssembler::round_to(Register reg, int modulus) { 2464 addptr(reg, modulus - 1); 2465 andptr(reg, -modulus); 2466 } 2467 2468 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod) { 2469 if (at_return) { 2470 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 2471 // we may safely use rsp instead to perform the stack watermark check. 2472 cmpptr(in_nmethod ? rsp : rbp, Address(r15_thread, JavaThread::polling_word_offset())); 2473 jcc(Assembler::above, slow_path); 2474 return; 2475 } 2476 testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 2477 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 2478 } 2479 2480 // Calls to C land 2481 // 2482 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 2483 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 2484 // has to be reset to 0. This is required to allow proper stack traversal. 2485 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2486 Register last_java_fp, 2487 address last_java_pc, 2488 Register rscratch) { 2489 vzeroupper(); 2490 // determine last_java_sp register 2491 if (!last_java_sp->is_valid()) { 2492 last_java_sp = rsp; 2493 } 2494 // last_java_fp is optional 2495 if (last_java_fp->is_valid()) { 2496 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 2497 } 2498 // last_java_pc is optional 2499 if (last_java_pc != nullptr) { 2500 Address java_pc(r15_thread, 2501 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 2502 lea(java_pc, InternalAddress(last_java_pc), rscratch); 2503 } 2504 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 2505 } 2506 2507 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 2508 Register last_java_fp, 2509 Label &L, 2510 Register scratch) { 2511 lea(scratch, L); 2512 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), scratch); 2513 set_last_Java_frame(last_java_sp, last_java_fp, nullptr, scratch); 2514 } 2515 2516 void MacroAssembler::shlptr(Register dst, int imm8) { 2517 shlq(dst, imm8); 2518 } 2519 2520 void MacroAssembler::shrptr(Register dst, int imm8) { 2521 shrq(dst, imm8); 2522 } 2523 2524 void MacroAssembler::sign_extend_byte(Register reg) { 2525 movsbl(reg, reg); // movsxb 2526 } 2527 2528 void MacroAssembler::sign_extend_short(Register reg) { 2529 movswl(reg, reg); // movsxw 2530 } 2531 2532 void MacroAssembler::testl(Address dst, int32_t imm32) { 2533 if (imm32 >= 0 && is8bit(imm32)) { 2534 testb(dst, imm32); 2535 } else { 2536 Assembler::testl(dst, imm32); 2537 } 2538 } 2539 2540 void MacroAssembler::testl(Register dst, int32_t imm32) { 2541 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 2542 testb(dst, imm32); 2543 } else { 2544 Assembler::testl(dst, imm32); 2545 } 2546 } 2547 2548 void MacroAssembler::testl(Register dst, AddressLiteral src) { 2549 assert(always_reachable(src), "Address should be reachable"); 2550 testl(dst, as_Address(src)); 2551 } 2552 2553 void MacroAssembler::testq(Address dst, int32_t imm32) { 2554 if (imm32 >= 0) { 2555 testl(dst, imm32); 2556 } else { 2557 Assembler::testq(dst, imm32); 2558 } 2559 } 2560 2561 void MacroAssembler::testq(Register dst, int32_t imm32) { 2562 if (imm32 >= 0) { 2563 testl(dst, imm32); 2564 } else { 2565 Assembler::testq(dst, imm32); 2566 } 2567 } 2568 2569 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 2570 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2571 Assembler::pcmpeqb(dst, src); 2572 } 2573 2574 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 2575 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2576 Assembler::pcmpeqw(dst, src); 2577 } 2578 2579 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 2580 assert((dst->encoding() < 16),"XMM register should be 0-15"); 2581 Assembler::pcmpestri(dst, src, imm8); 2582 } 2583 2584 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 2585 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2586 Assembler::pcmpestri(dst, src, imm8); 2587 } 2588 2589 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 2590 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2591 Assembler::pmovzxbw(dst, src); 2592 } 2593 2594 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 2595 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2596 Assembler::pmovzxbw(dst, src); 2597 } 2598 2599 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 2600 assert((src->encoding() < 16),"XMM register should be 0-15"); 2601 Assembler::pmovmskb(dst, src); 2602 } 2603 2604 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 2605 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 2606 Assembler::ptest(dst, src); 2607 } 2608 2609 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2610 assert(rscratch != noreg || always_reachable(src), "missing"); 2611 2612 if (reachable(src)) { 2613 Assembler::sqrtss(dst, as_Address(src)); 2614 } else { 2615 lea(rscratch, src); 2616 Assembler::sqrtss(dst, Address(rscratch, 0)); 2617 } 2618 } 2619 2620 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2621 assert(rscratch != noreg || always_reachable(src), "missing"); 2622 2623 if (reachable(src)) { 2624 Assembler::subsd(dst, as_Address(src)); 2625 } else { 2626 lea(rscratch, src); 2627 Assembler::subsd(dst, Address(rscratch, 0)); 2628 } 2629 } 2630 2631 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 2632 assert(rscratch != noreg || always_reachable(src), "missing"); 2633 2634 if (reachable(src)) { 2635 Assembler::roundsd(dst, as_Address(src), rmode); 2636 } else { 2637 lea(rscratch, src); 2638 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 2639 } 2640 } 2641 2642 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2643 assert(rscratch != noreg || always_reachable(src), "missing"); 2644 2645 if (reachable(src)) { 2646 Assembler::subss(dst, as_Address(src)); 2647 } else { 2648 lea(rscratch, src); 2649 Assembler::subss(dst, Address(rscratch, 0)); 2650 } 2651 } 2652 2653 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2654 assert(rscratch != noreg || always_reachable(src), "missing"); 2655 2656 if (reachable(src)) { 2657 Assembler::ucomisd(dst, as_Address(src)); 2658 } else { 2659 lea(rscratch, src); 2660 Assembler::ucomisd(dst, Address(rscratch, 0)); 2661 } 2662 } 2663 2664 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2665 assert(rscratch != noreg || always_reachable(src), "missing"); 2666 2667 if (reachable(src)) { 2668 Assembler::ucomiss(dst, as_Address(src)); 2669 } else { 2670 lea(rscratch, src); 2671 Assembler::ucomiss(dst, Address(rscratch, 0)); 2672 } 2673 } 2674 2675 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2676 assert(rscratch != noreg || always_reachable(src), "missing"); 2677 2678 // Used in sign-bit flipping with aligned address. 2679 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2680 2681 if (UseAVX > 2 && 2682 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2683 (dst->encoding() >= 16)) { 2684 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2685 } else if (reachable(src)) { 2686 Assembler::xorpd(dst, as_Address(src)); 2687 } else { 2688 lea(rscratch, src); 2689 Assembler::xorpd(dst, Address(rscratch, 0)); 2690 } 2691 } 2692 2693 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 2694 if (UseAVX > 2 && 2695 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2696 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2697 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2698 } else { 2699 Assembler::xorpd(dst, src); 2700 } 2701 } 2702 2703 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 2704 if (UseAVX > 2 && 2705 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2706 ((dst->encoding() >= 16) || (src->encoding() >= 16))) { 2707 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 2708 } else { 2709 Assembler::xorps(dst, src); 2710 } 2711 } 2712 2713 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 2714 assert(rscratch != noreg || always_reachable(src), "missing"); 2715 2716 // Used in sign-bit flipping with aligned address. 2717 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 2718 2719 if (UseAVX > 2 && 2720 (!VM_Version::supports_avx512dq() || !VM_Version::supports_avx512vl()) && 2721 (dst->encoding() >= 16)) { 2722 vpxor(dst, dst, src, Assembler::AVX_512bit, rscratch); 2723 } else if (reachable(src)) { 2724 Assembler::xorps(dst, as_Address(src)); 2725 } else { 2726 lea(rscratch, src); 2727 Assembler::xorps(dst, Address(rscratch, 0)); 2728 } 2729 } 2730 2731 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 2732 assert(rscratch != noreg || always_reachable(src), "missing"); 2733 2734 // Used in sign-bit flipping with aligned address. 2735 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 2736 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 2737 if (reachable(src)) { 2738 Assembler::pshufb(dst, as_Address(src)); 2739 } else { 2740 lea(rscratch, src); 2741 Assembler::pshufb(dst, Address(rscratch, 0)); 2742 } 2743 } 2744 2745 // AVX 3-operands instructions 2746 2747 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2748 assert(rscratch != noreg || always_reachable(src), "missing"); 2749 2750 if (reachable(src)) { 2751 vaddsd(dst, nds, as_Address(src)); 2752 } else { 2753 lea(rscratch, src); 2754 vaddsd(dst, nds, Address(rscratch, 0)); 2755 } 2756 } 2757 2758 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 2759 assert(rscratch != noreg || always_reachable(src), "missing"); 2760 2761 if (reachable(src)) { 2762 vaddss(dst, nds, as_Address(src)); 2763 } else { 2764 lea(rscratch, src); 2765 vaddss(dst, nds, Address(rscratch, 0)); 2766 } 2767 } 2768 2769 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2770 assert(UseAVX > 0, "requires some form of AVX"); 2771 assert(rscratch != noreg || always_reachable(src), "missing"); 2772 2773 if (reachable(src)) { 2774 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 2775 } else { 2776 lea(rscratch, src); 2777 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 2778 } 2779 } 2780 2781 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2782 assert(UseAVX > 0, "requires some form of AVX"); 2783 assert(rscratch != noreg || always_reachable(src), "missing"); 2784 2785 if (reachable(src)) { 2786 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 2787 } else { 2788 lea(rscratch, src); 2789 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 2790 } 2791 } 2792 2793 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2794 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2795 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2796 2797 vandps(dst, nds, negate_field, vector_len, rscratch); 2798 } 2799 2800 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 2801 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 2802 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 2803 2804 vandpd(dst, nds, negate_field, vector_len, rscratch); 2805 } 2806 2807 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2808 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2809 Assembler::vpaddb(dst, nds, src, vector_len); 2810 } 2811 2812 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2813 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2814 Assembler::vpaddb(dst, nds, src, vector_len); 2815 } 2816 2817 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2818 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2819 Assembler::vpaddw(dst, nds, src, vector_len); 2820 } 2821 2822 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2823 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2824 Assembler::vpaddw(dst, nds, src, vector_len); 2825 } 2826 2827 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2828 assert(rscratch != noreg || always_reachable(src), "missing"); 2829 2830 if (reachable(src)) { 2831 Assembler::vpand(dst, nds, as_Address(src), vector_len); 2832 } else { 2833 lea(rscratch, src); 2834 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 2835 } 2836 } 2837 2838 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2839 assert(rscratch != noreg || always_reachable(src), "missing"); 2840 2841 if (reachable(src)) { 2842 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 2843 } else { 2844 lea(rscratch, src); 2845 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 2846 } 2847 } 2848 2849 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2850 assert(rscratch != noreg || always_reachable(src), "missing"); 2851 2852 if (reachable(src)) { 2853 Assembler::vbroadcasti128(dst, as_Address(src), vector_len); 2854 } else { 2855 lea(rscratch, src); 2856 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len); 2857 } 2858 } 2859 2860 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2861 assert(rscratch != noreg || always_reachable(src), "missing"); 2862 2863 if (reachable(src)) { 2864 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 2865 } else { 2866 lea(rscratch, src); 2867 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 2868 } 2869 } 2870 2871 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2872 assert(rscratch != noreg || always_reachable(src), "missing"); 2873 2874 if (reachable(src)) { 2875 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 2876 } else { 2877 lea(rscratch, src); 2878 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 2879 } 2880 } 2881 2882 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2883 assert(rscratch != noreg || always_reachable(src), "missing"); 2884 2885 if (reachable(src)) { 2886 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 2887 } else { 2888 lea(rscratch, src); 2889 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 2890 } 2891 } 2892 2893 // Vector float blend 2894 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2895 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2896 // WARN: Allow dst == (src1|src2), mask == scratch 2897 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2898 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 2899 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2900 if (blend_emulation && scratch_available && dst_available) { 2901 if (compute_mask) { 2902 vpsrad(scratch, mask, 32, vector_len); 2903 mask = scratch; 2904 } 2905 if (dst == src1) { 2906 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 2907 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2908 } else { 2909 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2910 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 2911 } 2912 vpor(dst, dst, scratch, vector_len); 2913 } else { 2914 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 2915 } 2916 } 2917 2918 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 2919 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 2920 // WARN: Allow dst == (src1|src2), mask == scratch 2921 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 2922 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 2923 bool dst_available = dst != mask && (dst != src1 || dst != src2); 2924 if (blend_emulation && scratch_available && dst_available) { 2925 if (compute_mask) { 2926 vpxor(scratch, scratch, scratch, vector_len); 2927 vpcmpgtq(scratch, scratch, mask, vector_len); 2928 mask = scratch; 2929 } 2930 if (dst == src1) { 2931 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 2932 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 2933 } else { 2934 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 2935 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 2936 } 2937 vpor(dst, dst, scratch, vector_len); 2938 } else { 2939 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 2940 } 2941 } 2942 2943 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2944 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2945 Assembler::vpcmpeqb(dst, nds, src, vector_len); 2946 } 2947 2948 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 2949 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2950 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 2951 } 2952 2953 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 2954 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2955 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2956 } 2957 2958 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 2959 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 2960 Assembler::vpcmpeqw(dst, nds, src, vector_len); 2961 } 2962 2963 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 2964 assert(rscratch != noreg || always_reachable(src), "missing"); 2965 2966 if (reachable(src)) { 2967 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 2968 } else { 2969 lea(rscratch, src); 2970 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 2971 } 2972 } 2973 2974 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2975 int comparison, bool is_signed, int vector_len, Register rscratch) { 2976 assert(rscratch != noreg || always_reachable(src), "missing"); 2977 2978 if (reachable(src)) { 2979 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2980 } else { 2981 lea(rscratch, src); 2982 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2983 } 2984 } 2985 2986 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2987 int comparison, bool is_signed, int vector_len, Register rscratch) { 2988 assert(rscratch != noreg || always_reachable(src), "missing"); 2989 2990 if (reachable(src)) { 2991 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 2992 } else { 2993 lea(rscratch, src); 2994 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 2995 } 2996 } 2997 2998 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 2999 int comparison, bool is_signed, int vector_len, Register rscratch) { 3000 assert(rscratch != noreg || always_reachable(src), "missing"); 3001 3002 if (reachable(src)) { 3003 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3004 } else { 3005 lea(rscratch, src); 3006 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3007 } 3008 } 3009 3010 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3011 int comparison, bool is_signed, int vector_len, Register rscratch) { 3012 assert(rscratch != noreg || always_reachable(src), "missing"); 3013 3014 if (reachable(src)) { 3015 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3016 } else { 3017 lea(rscratch, src); 3018 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3019 } 3020 } 3021 3022 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3023 if (width == Assembler::Q) { 3024 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3025 } else { 3026 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3027 } 3028 } 3029 3030 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3031 int eq_cond_enc = 0x29; 3032 int gt_cond_enc = 0x37; 3033 if (width != Assembler::Q) { 3034 eq_cond_enc = 0x74 + width; 3035 gt_cond_enc = 0x64 + width; 3036 } 3037 switch (cond) { 3038 case eq: 3039 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3040 break; 3041 case neq: 3042 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3043 vallones(xtmp, vector_len); 3044 vpxor(dst, xtmp, dst, vector_len); 3045 break; 3046 case le: 3047 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3048 vallones(xtmp, vector_len); 3049 vpxor(dst, xtmp, dst, vector_len); 3050 break; 3051 case nlt: 3052 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3053 vallones(xtmp, vector_len); 3054 vpxor(dst, xtmp, dst, vector_len); 3055 break; 3056 case lt: 3057 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3058 break; 3059 case nle: 3060 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3061 break; 3062 default: 3063 assert(false, "Should not reach here"); 3064 } 3065 } 3066 3067 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3068 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3069 Assembler::vpmovzxbw(dst, src, vector_len); 3070 } 3071 3072 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3073 assert((src->encoding() < 16),"XMM register should be 0-15"); 3074 Assembler::vpmovmskb(dst, src, vector_len); 3075 } 3076 3077 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3078 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3079 Assembler::vpmullw(dst, nds, src, vector_len); 3080 } 3081 3082 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3083 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3084 Assembler::vpmullw(dst, nds, src, vector_len); 3085 } 3086 3087 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3088 assert((UseAVX > 0), "AVX support is needed"); 3089 assert(rscratch != noreg || always_reachable(src), "missing"); 3090 3091 if (reachable(src)) { 3092 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3093 } else { 3094 lea(rscratch, src); 3095 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3096 } 3097 } 3098 3099 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3100 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3101 Assembler::vpsubb(dst, nds, src, vector_len); 3102 } 3103 3104 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3105 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3106 Assembler::vpsubb(dst, nds, src, vector_len); 3107 } 3108 3109 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3110 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3111 Assembler::vpsubw(dst, nds, src, vector_len); 3112 } 3113 3114 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3115 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3116 Assembler::vpsubw(dst, nds, src, vector_len); 3117 } 3118 3119 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3120 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3121 Assembler::vpsraw(dst, nds, shift, vector_len); 3122 } 3123 3124 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3125 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3126 Assembler::vpsraw(dst, nds, shift, vector_len); 3127 } 3128 3129 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3130 assert(UseAVX > 2,""); 3131 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3132 vector_len = 2; 3133 } 3134 Assembler::evpsraq(dst, nds, shift, vector_len); 3135 } 3136 3137 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3138 assert(UseAVX > 2,""); 3139 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3140 vector_len = 2; 3141 } 3142 Assembler::evpsraq(dst, nds, shift, vector_len); 3143 } 3144 3145 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3146 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3147 Assembler::vpsrlw(dst, nds, shift, vector_len); 3148 } 3149 3150 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3151 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3152 Assembler::vpsrlw(dst, nds, shift, vector_len); 3153 } 3154 3155 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3156 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3157 Assembler::vpsllw(dst, nds, shift, vector_len); 3158 } 3159 3160 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3161 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3162 Assembler::vpsllw(dst, nds, shift, vector_len); 3163 } 3164 3165 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3166 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3167 Assembler::vptest(dst, src); 3168 } 3169 3170 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3171 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3172 Assembler::punpcklbw(dst, src); 3173 } 3174 3175 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3176 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3177 Assembler::pshufd(dst, src, mode); 3178 } 3179 3180 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3181 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3182 Assembler::pshuflw(dst, src, mode); 3183 } 3184 3185 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3186 assert(rscratch != noreg || always_reachable(src), "missing"); 3187 3188 if (reachable(src)) { 3189 vandpd(dst, nds, as_Address(src), vector_len); 3190 } else { 3191 lea(rscratch, src); 3192 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3193 } 3194 } 3195 3196 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3197 assert(rscratch != noreg || always_reachable(src), "missing"); 3198 3199 if (reachable(src)) { 3200 vandps(dst, nds, as_Address(src), vector_len); 3201 } else { 3202 lea(rscratch, src); 3203 vandps(dst, nds, Address(rscratch, 0), vector_len); 3204 } 3205 } 3206 3207 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3208 bool merge, int vector_len, Register rscratch) { 3209 assert(rscratch != noreg || always_reachable(src), "missing"); 3210 3211 if (reachable(src)) { 3212 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3213 } else { 3214 lea(rscratch, src); 3215 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3216 } 3217 } 3218 3219 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3220 assert(rscratch != noreg || always_reachable(src), "missing"); 3221 3222 if (reachable(src)) { 3223 vdivsd(dst, nds, as_Address(src)); 3224 } else { 3225 lea(rscratch, src); 3226 vdivsd(dst, nds, Address(rscratch, 0)); 3227 } 3228 } 3229 3230 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3231 assert(rscratch != noreg || always_reachable(src), "missing"); 3232 3233 if (reachable(src)) { 3234 vdivss(dst, nds, as_Address(src)); 3235 } else { 3236 lea(rscratch, src); 3237 vdivss(dst, nds, Address(rscratch, 0)); 3238 } 3239 } 3240 3241 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3242 assert(rscratch != noreg || always_reachable(src), "missing"); 3243 3244 if (reachable(src)) { 3245 vmulsd(dst, nds, as_Address(src)); 3246 } else { 3247 lea(rscratch, src); 3248 vmulsd(dst, nds, Address(rscratch, 0)); 3249 } 3250 } 3251 3252 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3253 assert(rscratch != noreg || always_reachable(src), "missing"); 3254 3255 if (reachable(src)) { 3256 vmulss(dst, nds, as_Address(src)); 3257 } else { 3258 lea(rscratch, src); 3259 vmulss(dst, nds, Address(rscratch, 0)); 3260 } 3261 } 3262 3263 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3264 assert(rscratch != noreg || always_reachable(src), "missing"); 3265 3266 if (reachable(src)) { 3267 vsubsd(dst, nds, as_Address(src)); 3268 } else { 3269 lea(rscratch, src); 3270 vsubsd(dst, nds, Address(rscratch, 0)); 3271 } 3272 } 3273 3274 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3275 assert(rscratch != noreg || always_reachable(src), "missing"); 3276 3277 if (reachable(src)) { 3278 vsubss(dst, nds, as_Address(src)); 3279 } else { 3280 lea(rscratch, src); 3281 vsubss(dst, nds, Address(rscratch, 0)); 3282 } 3283 } 3284 3285 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3286 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3287 assert(rscratch != noreg || always_reachable(src), "missing"); 3288 3289 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3290 } 3291 3292 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3293 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3294 assert(rscratch != noreg || always_reachable(src), "missing"); 3295 3296 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3297 } 3298 3299 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3300 assert(rscratch != noreg || always_reachable(src), "missing"); 3301 3302 if (reachable(src)) { 3303 vxorpd(dst, nds, as_Address(src), vector_len); 3304 } else { 3305 lea(rscratch, src); 3306 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3307 } 3308 } 3309 3310 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3311 assert(rscratch != noreg || always_reachable(src), "missing"); 3312 3313 if (reachable(src)) { 3314 vxorps(dst, nds, as_Address(src), vector_len); 3315 } else { 3316 lea(rscratch, src); 3317 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3318 } 3319 } 3320 3321 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3322 assert(rscratch != noreg || always_reachable(src), "missing"); 3323 3324 if (UseAVX > 1 || (vector_len < 1)) { 3325 if (reachable(src)) { 3326 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3327 } else { 3328 lea(rscratch, src); 3329 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3330 } 3331 } else { 3332 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3333 } 3334 } 3335 3336 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3337 assert(rscratch != noreg || always_reachable(src), "missing"); 3338 3339 if (reachable(src)) { 3340 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3341 } else { 3342 lea(rscratch, src); 3343 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3344 } 3345 } 3346 3347 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3348 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3349 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3350 // The inverted mask is sign-extended 3351 andptr(possibly_non_local, inverted_mask); 3352 } 3353 3354 void MacroAssembler::resolve_jobject(Register value, 3355 Register tmp) { 3356 Register thread = r15_thread; 3357 assert_different_registers(value, thread, tmp); 3358 Label done, tagged, weak_tagged; 3359 testptr(value, value); 3360 jcc(Assembler::zero, done); // Use null as-is. 3361 testptr(value, JNIHandles::tag_mask); // Test for tag. 3362 jcc(Assembler::notZero, tagged); 3363 3364 // Resolve local handle 3365 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp); 3366 verify_oop(value); 3367 jmp(done); 3368 3369 bind(tagged); 3370 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 3371 jcc(Assembler::notZero, weak_tagged); 3372 3373 // Resolve global handle 3374 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3375 verify_oop(value); 3376 jmp(done); 3377 3378 bind(weak_tagged); 3379 // Resolve jweak. 3380 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3381 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp); 3382 verify_oop(value); 3383 3384 bind(done); 3385 } 3386 3387 void MacroAssembler::resolve_global_jobject(Register value, 3388 Register tmp) { 3389 Register thread = r15_thread; 3390 assert_different_registers(value, thread, tmp); 3391 Label done; 3392 3393 testptr(value, value); 3394 jcc(Assembler::zero, done); // Use null as-is. 3395 3396 #ifdef ASSERT 3397 { 3398 Label valid_global_tag; 3399 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 3400 jcc(Assembler::notZero, valid_global_tag); 3401 stop("non global jobject using resolve_global_jobject"); 3402 bind(valid_global_tag); 3403 } 3404 #endif 3405 3406 // Resolve global handle 3407 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp); 3408 verify_oop(value); 3409 3410 bind(done); 3411 } 3412 3413 void MacroAssembler::subptr(Register dst, int32_t imm32) { 3414 subq(dst, imm32); 3415 } 3416 3417 // Force generation of a 4 byte immediate value even if it fits into 8bit 3418 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 3419 subq_imm32(dst, imm32); 3420 } 3421 3422 void MacroAssembler::subptr(Register dst, Register src) { 3423 subq(dst, src); 3424 } 3425 3426 // C++ bool manipulation 3427 void MacroAssembler::testbool(Register dst) { 3428 if(sizeof(bool) == 1) 3429 testb(dst, 0xff); 3430 else if(sizeof(bool) == 2) { 3431 // testw implementation needed for two byte bools 3432 ShouldNotReachHere(); 3433 } else if(sizeof(bool) == 4) 3434 testl(dst, dst); 3435 else 3436 // unsupported 3437 ShouldNotReachHere(); 3438 } 3439 3440 void MacroAssembler::testptr(Register dst, Register src) { 3441 testq(dst, src); 3442 } 3443 3444 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3445 void MacroAssembler::tlab_allocate(Register obj, 3446 Register var_size_in_bytes, 3447 int con_size_in_bytes, 3448 Register t1, 3449 Register t2, 3450 Label& slow_case) { 3451 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3452 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 3453 } 3454 3455 RegSet MacroAssembler::call_clobbered_gp_registers() { 3456 RegSet regs; 3457 regs += RegSet::of(rax, rcx, rdx); 3458 #ifndef _WINDOWS 3459 regs += RegSet::of(rsi, rdi); 3460 #endif 3461 regs += RegSet::range(r8, r11); 3462 if (UseAPX) { 3463 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 3464 } 3465 return regs; 3466 } 3467 3468 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 3469 int num_xmm_registers = XMMRegister::available_xmm_registers(); 3470 #if defined(_WINDOWS) 3471 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 3472 if (num_xmm_registers > 16) { 3473 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 3474 } 3475 return result; 3476 #else 3477 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 3478 #endif 3479 } 3480 3481 // C1 only ever uses the first double/float of the XMM register. 3482 static int xmm_save_size() { return sizeof(double); } 3483 3484 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3485 masm->movdbl(Address(rsp, offset), reg); 3486 } 3487 3488 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 3489 masm->movdbl(reg, Address(rsp, offset)); 3490 } 3491 3492 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 3493 bool save_fpu, int& gp_area_size, int& xmm_area_size) { 3494 3495 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 3496 StackAlignmentInBytes); 3497 xmm_area_size = save_fpu ? xmm_registers.size() * xmm_save_size() : 0; 3498 3499 return gp_area_size + xmm_area_size; 3500 } 3501 3502 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 3503 block_comment("push_call_clobbered_registers start"); 3504 // Regular registers 3505 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 3506 3507 int gp_area_size; 3508 int xmm_area_size; 3509 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 3510 gp_area_size, xmm_area_size); 3511 subptr(rsp, total_save_size); 3512 3513 push_set(gp_registers_to_push, 0); 3514 3515 if (save_fpu) { 3516 push_set(call_clobbered_xmm_registers(), gp_area_size); 3517 } 3518 3519 block_comment("push_call_clobbered_registers end"); 3520 } 3521 3522 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 3523 block_comment("pop_call_clobbered_registers start"); 3524 3525 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 3526 3527 int gp_area_size; 3528 int xmm_area_size; 3529 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 3530 gp_area_size, xmm_area_size); 3531 3532 if (restore_fpu) { 3533 pop_set(call_clobbered_xmm_registers(), gp_area_size); 3534 } 3535 3536 pop_set(gp_registers_to_pop, 0); 3537 3538 addptr(rsp, total_save_size); 3539 3540 vzeroupper(); 3541 3542 block_comment("pop_call_clobbered_registers end"); 3543 } 3544 3545 void MacroAssembler::push_set(XMMRegSet set, int offset) { 3546 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 3547 int spill_offset = offset; 3548 3549 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 3550 save_xmm_register(this, spill_offset, *it); 3551 spill_offset += xmm_save_size(); 3552 } 3553 } 3554 3555 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 3556 int restore_size = set.size() * xmm_save_size(); 3557 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 3558 3559 int restore_offset = offset + restore_size - xmm_save_size(); 3560 3561 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 3562 restore_xmm_register(this, restore_offset, *it); 3563 restore_offset -= xmm_save_size(); 3564 } 3565 } 3566 3567 void MacroAssembler::push_set(RegSet set, int offset) { 3568 int spill_offset; 3569 if (offset == -1) { 3570 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3571 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 3572 subptr(rsp, aligned_size); 3573 spill_offset = 0; 3574 } else { 3575 spill_offset = offset; 3576 } 3577 3578 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 3579 movptr(Address(rsp, spill_offset), *it); 3580 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3581 } 3582 } 3583 3584 void MacroAssembler::pop_set(RegSet set, int offset) { 3585 3586 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 3587 int restore_size = set.size() * gp_reg_size; 3588 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 3589 3590 int restore_offset; 3591 if (offset == -1) { 3592 restore_offset = restore_size - gp_reg_size; 3593 } else { 3594 restore_offset = offset + restore_size - gp_reg_size; 3595 } 3596 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 3597 movptr(*it, Address(rsp, restore_offset)); 3598 restore_offset -= gp_reg_size; 3599 } 3600 3601 if (offset == -1) { 3602 addptr(rsp, aligned_size); 3603 } 3604 } 3605 3606 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 3607 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 3608 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 3609 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 3610 Label done; 3611 3612 testptr(length_in_bytes, length_in_bytes); 3613 jcc(Assembler::zero, done); 3614 3615 // initialize topmost word, divide index by 2, check if odd and test if zero 3616 // note: for the remaining code to work, index must be a multiple of BytesPerWord 3617 #ifdef ASSERT 3618 { 3619 Label L; 3620 testptr(length_in_bytes, BytesPerWord - 1); 3621 jcc(Assembler::zero, L); 3622 stop("length must be a multiple of BytesPerWord"); 3623 bind(L); 3624 } 3625 #endif 3626 Register index = length_in_bytes; 3627 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 3628 if (UseIncDec) { 3629 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 3630 } else { 3631 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 3632 shrptr(index, 1); 3633 } 3634 3635 // initialize remaining object fields: index is a multiple of 2 now 3636 { 3637 Label loop; 3638 bind(loop); 3639 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 3640 decrement(index); 3641 jcc(Assembler::notZero, loop); 3642 } 3643 3644 bind(done); 3645 } 3646 3647 // Look up the method for a megamorphic invokeinterface call. 3648 // The target method is determined by <intf_klass, itable_index>. 3649 // The receiver klass is in recv_klass. 3650 // On success, the result will be in method_result, and execution falls through. 3651 // On failure, execution transfers to the given label. 3652 void MacroAssembler::lookup_interface_method(Register recv_klass, 3653 Register intf_klass, 3654 RegisterOrConstant itable_index, 3655 Register method_result, 3656 Register scan_temp, 3657 Label& L_no_such_interface, 3658 bool return_method) { 3659 assert_different_registers(recv_klass, intf_klass, scan_temp); 3660 assert_different_registers(method_result, intf_klass, scan_temp); 3661 assert(recv_klass != method_result || !return_method, 3662 "recv_klass can be destroyed when method isn't needed"); 3663 3664 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 3665 "caller must use same register for non-constant itable index as for method"); 3666 3667 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 3668 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3669 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3670 int scan_step = itableOffsetEntry::size() * wordSize; 3671 int vte_size = vtableEntry::size_in_bytes(); 3672 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3673 assert(vte_size == wordSize, "else adjust times_vte_scale"); 3674 3675 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3676 3677 // Could store the aligned, prescaled offset in the klass. 3678 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 3679 3680 if (return_method) { 3681 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 3682 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3683 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 3684 } 3685 3686 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 3687 // if (scan->interface() == intf) { 3688 // result = (klass + scan->offset() + itable_index); 3689 // } 3690 // } 3691 Label search, found_method; 3692 3693 for (int peel = 1; peel >= 0; peel--) { 3694 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 3695 cmpptr(intf_klass, method_result); 3696 3697 if (peel) { 3698 jccb(Assembler::equal, found_method); 3699 } else { 3700 jccb(Assembler::notEqual, search); 3701 // (invert the test to fall through to found_method...) 3702 } 3703 3704 if (!peel) break; 3705 3706 bind(search); 3707 3708 // Check that the previous entry is non-null. A null entry means that 3709 // the receiver class doesn't implement the interface, and wasn't the 3710 // same as when the caller was compiled. 3711 testptr(method_result, method_result); 3712 jcc(Assembler::zero, L_no_such_interface); 3713 addptr(scan_temp, scan_step); 3714 } 3715 3716 bind(found_method); 3717 3718 if (return_method) { 3719 // Got a hit. 3720 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 3721 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 3722 } 3723 } 3724 3725 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 3726 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 3727 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 3728 // The target method is determined by <holder_klass, itable_index>. 3729 // The receiver klass is in recv_klass. 3730 // On success, the result will be in method_result, and execution falls through. 3731 // On failure, execution transfers to the given label. 3732 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 3733 Register holder_klass, 3734 Register resolved_klass, 3735 Register method_result, 3736 Register scan_temp, 3737 Register temp_reg2, 3738 Register receiver, 3739 int itable_index, 3740 Label& L_no_such_interface) { 3741 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 3742 Register temp_itbl_klass = method_result; 3743 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 3744 3745 int vtable_base = in_bytes(Klass::vtable_start_offset()); 3746 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 3747 int scan_step = itableOffsetEntry::size() * wordSize; 3748 int vte_size = vtableEntry::size_in_bytes(); 3749 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 3750 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 3751 Address::ScaleFactor times_vte_scale = Address::times_ptr; 3752 assert(vte_size == wordSize, "adjust times_vte_scale"); 3753 3754 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 3755 3756 // temp_itbl_klass = recv_klass.itable[0] 3757 // scan_temp = &recv_klass.itable[0] + step 3758 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 3759 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 3760 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 3761 xorptr(temp_reg, temp_reg); 3762 3763 // Initial checks: 3764 // - if (holder_klass != resolved_klass), go to "scan for resolved" 3765 // - if (itable[0] == 0), no such interface 3766 // - if (itable[0] == holder_klass), shortcut to "holder found" 3767 cmpptr(holder_klass, resolved_klass); 3768 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 3769 testptr(temp_itbl_klass, temp_itbl_klass); 3770 jccb(Assembler::zero, L_no_such_interface); 3771 cmpptr(holder_klass, temp_itbl_klass); 3772 jccb(Assembler::equal, L_holder_found); 3773 3774 // Loop: Look for holder_klass record in itable 3775 // do { 3776 // tmp = itable[index]; 3777 // index += step; 3778 // if (tmp == holder_klass) { 3779 // goto L_holder_found; // Found! 3780 // } 3781 // } while (tmp != 0); 3782 // goto L_no_such_interface // Not found. 3783 Label L_scan_holder; 3784 bind(L_scan_holder); 3785 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3786 addptr(scan_temp, scan_step); 3787 cmpptr(holder_klass, temp_itbl_klass); 3788 jccb(Assembler::equal, L_holder_found); 3789 testptr(temp_itbl_klass, temp_itbl_klass); 3790 jccb(Assembler::notZero, L_scan_holder); 3791 3792 jmpb(L_no_such_interface); 3793 3794 // Loop: Look for resolved_class record in itable 3795 // do { 3796 // tmp = itable[index]; 3797 // index += step; 3798 // if (tmp == holder_klass) { 3799 // // Also check if we have met a holder klass 3800 // holder_tmp = itable[index-step-ioffset]; 3801 // } 3802 // if (tmp == resolved_klass) { 3803 // goto L_resolved_found; // Found! 3804 // } 3805 // } while (tmp != 0); 3806 // goto L_no_such_interface // Not found. 3807 // 3808 Label L_loop_scan_resolved; 3809 bind(L_loop_scan_resolved); 3810 movptr(temp_itbl_klass, Address(scan_temp, 0)); 3811 addptr(scan_temp, scan_step); 3812 bind(L_loop_scan_resolved_entry); 3813 cmpptr(holder_klass, temp_itbl_klass); 3814 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3815 cmpptr(resolved_klass, temp_itbl_klass); 3816 jccb(Assembler::equal, L_resolved_found); 3817 testptr(temp_itbl_klass, temp_itbl_klass); 3818 jccb(Assembler::notZero, L_loop_scan_resolved); 3819 3820 jmpb(L_no_such_interface); 3821 3822 Label L_ready; 3823 3824 // See if we already have a holder klass. If not, go and scan for it. 3825 bind(L_resolved_found); 3826 testptr(temp_reg, temp_reg); 3827 jccb(Assembler::zero, L_scan_holder); 3828 jmpb(L_ready); 3829 3830 bind(L_holder_found); 3831 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 3832 3833 // Finally, temp_reg contains holder_klass vtable offset 3834 bind(L_ready); 3835 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 3836 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 3837 load_klass(scan_temp, receiver, noreg); 3838 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3839 } else { 3840 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 3841 } 3842 } 3843 3844 3845 // virtual method calling 3846 void MacroAssembler::lookup_virtual_method(Register recv_klass, 3847 RegisterOrConstant vtable_index, 3848 Register method_result) { 3849 const ByteSize base = Klass::vtable_start_offset(); 3850 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 3851 Address vtable_entry_addr(recv_klass, 3852 vtable_index, Address::times_ptr, 3853 base + vtableEntry::method_offset()); 3854 movptr(method_result, vtable_entry_addr); 3855 } 3856 3857 3858 void MacroAssembler::check_klass_subtype(Register sub_klass, 3859 Register super_klass, 3860 Register temp_reg, 3861 Label& L_success) { 3862 Label L_failure; 3863 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 3864 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 3865 bind(L_failure); 3866 } 3867 3868 3869 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 3870 Register super_klass, 3871 Register temp_reg, 3872 Label* L_success, 3873 Label* L_failure, 3874 Label* L_slow_path, 3875 RegisterOrConstant super_check_offset) { 3876 assert_different_registers(sub_klass, super_klass, temp_reg); 3877 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 3878 if (super_check_offset.is_register()) { 3879 assert_different_registers(sub_klass, super_klass, 3880 super_check_offset.as_register()); 3881 } else if (must_load_sco) { 3882 assert(temp_reg != noreg, "supply either a temp or a register offset"); 3883 } 3884 3885 Label L_fallthrough; 3886 int label_nulls = 0; 3887 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3888 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3889 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 3890 assert(label_nulls <= 1, "at most one null in the batch"); 3891 3892 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3893 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 3894 Address super_check_offset_addr(super_klass, sco_offset); 3895 3896 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 3897 // range of a jccb. If this routine grows larger, reconsider at 3898 // least some of these. 3899 #define local_jcc(assembler_cond, label) \ 3900 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 3901 else jcc( assembler_cond, label) /*omit semi*/ 3902 3903 // Hacked jmp, which may only be used just before L_fallthrough. 3904 #define final_jmp(label) \ 3905 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3906 else jmp(label) /*omit semi*/ 3907 3908 // If the pointers are equal, we are done (e.g., String[] elements). 3909 // This self-check enables sharing of secondary supertype arrays among 3910 // non-primary types such as array-of-interface. Otherwise, each such 3911 // type would need its own customized SSA. 3912 // We move this check to the front of the fast path because many 3913 // type checks are in fact trivially successful in this manner, 3914 // so we get a nicely predicted branch right at the start of the check. 3915 cmpptr(sub_klass, super_klass); 3916 local_jcc(Assembler::equal, *L_success); 3917 3918 // Check the supertype display: 3919 if (must_load_sco) { 3920 // Positive movl does right thing on LP64. 3921 movl(temp_reg, super_check_offset_addr); 3922 super_check_offset = RegisterOrConstant(temp_reg); 3923 } 3924 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 3925 cmpptr(super_klass, super_check_addr); // load displayed supertype 3926 3927 // This check has worked decisively for primary supers. 3928 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3929 // (Secondary supers are interfaces and very deeply nested subtypes.) 3930 // This works in the same check above because of a tricky aliasing 3931 // between the super_cache and the primary super display elements. 3932 // (The 'super_check_addr' can address either, as the case requires.) 3933 // Note that the cache is updated below if it does not help us find 3934 // what we need immediately. 3935 // So if it was a primary super, we can just fail immediately. 3936 // Otherwise, it's the slow path for us (no success at this point). 3937 3938 if (super_check_offset.is_register()) { 3939 local_jcc(Assembler::equal, *L_success); 3940 cmpl(super_check_offset.as_register(), sc_offset); 3941 if (L_failure == &L_fallthrough) { 3942 local_jcc(Assembler::equal, *L_slow_path); 3943 } else { 3944 local_jcc(Assembler::notEqual, *L_failure); 3945 final_jmp(*L_slow_path); 3946 } 3947 } else if (super_check_offset.as_constant() == sc_offset) { 3948 // Need a slow path; fast failure is impossible. 3949 if (L_slow_path == &L_fallthrough) { 3950 local_jcc(Assembler::equal, *L_success); 3951 } else { 3952 local_jcc(Assembler::notEqual, *L_slow_path); 3953 final_jmp(*L_success); 3954 } 3955 } else { 3956 // No slow path; it's a fast decision. 3957 if (L_failure == &L_fallthrough) { 3958 local_jcc(Assembler::equal, *L_success); 3959 } else { 3960 local_jcc(Assembler::notEqual, *L_failure); 3961 final_jmp(*L_success); 3962 } 3963 } 3964 3965 bind(L_fallthrough); 3966 3967 #undef local_jcc 3968 #undef final_jmp 3969 } 3970 3971 3972 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 3973 Register super_klass, 3974 Register temp_reg, 3975 Register temp2_reg, 3976 Label* L_success, 3977 Label* L_failure, 3978 bool set_cond_codes) { 3979 assert_different_registers(sub_klass, super_klass, temp_reg); 3980 if (temp2_reg != noreg) 3981 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 3982 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 3983 3984 Label L_fallthrough; 3985 int label_nulls = 0; 3986 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 3987 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 3988 assert(label_nulls <= 1, "at most one null in the batch"); 3989 3990 // a couple of useful fields in sub_klass: 3991 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3992 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3993 Address secondary_supers_addr(sub_klass, ss_offset); 3994 Address super_cache_addr( sub_klass, sc_offset); 3995 3996 // Do a linear scan of the secondary super-klass chain. 3997 // This code is rarely used, so simplicity is a virtue here. 3998 // The repne_scan instruction uses fixed registers, which we must spill. 3999 // Don't worry too much about pre-existing connections with the input regs. 4000 4001 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4002 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4003 4004 // Get super_klass value into rax (even if it was in rdi or rcx). 4005 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4006 if (super_klass != rax) { 4007 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4008 mov(rax, super_klass); 4009 } 4010 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4011 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4012 4013 #ifndef PRODUCT 4014 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4015 ExternalAddress pst_counter_addr((address) pst_counter); 4016 lea(rcx, pst_counter_addr); 4017 incrementl(Address(rcx, 0)); 4018 #endif //PRODUCT 4019 4020 // We will consult the secondary-super array. 4021 movptr(rdi, secondary_supers_addr); 4022 // Load the array length. (Positive movl does right thing on LP64.) 4023 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4024 // Skip to start of data. 4025 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4026 4027 // Scan RCX words at [RDI] for an occurrence of RAX. 4028 // Set NZ/Z based on last compare. 4029 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4030 // not change flags (only scas instruction which is repeated sets flags). 4031 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4032 4033 testptr(rax,rax); // Set Z = 0 4034 repne_scan(); 4035 4036 // Unspill the temp. registers: 4037 if (pushed_rdi) pop(rdi); 4038 if (pushed_rcx) pop(rcx); 4039 if (pushed_rax) pop(rax); 4040 4041 if (set_cond_codes) { 4042 // Special hack for the AD files: rdi is guaranteed non-zero. 4043 assert(!pushed_rdi, "rdi must be left non-null"); 4044 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4045 } 4046 4047 if (L_failure == &L_fallthrough) 4048 jccb(Assembler::notEqual, *L_failure); 4049 else jcc(Assembler::notEqual, *L_failure); 4050 4051 // Success. Cache the super we found and proceed in triumph. 4052 movptr(super_cache_addr, super_klass); 4053 4054 if (L_success != &L_fallthrough) { 4055 jmp(*L_success); 4056 } 4057 4058 #undef IS_A_TEMP 4059 4060 bind(L_fallthrough); 4061 } 4062 4063 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4064 Register super_klass, 4065 Register temp_reg, 4066 Register temp2_reg, 4067 Label* L_success, 4068 Label* L_failure, 4069 bool set_cond_codes) { 4070 assert(set_cond_codes == false, "must be false on 64-bit x86"); 4071 check_klass_subtype_slow_path 4072 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg, 4073 L_success, L_failure); 4074 } 4075 4076 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4077 Register super_klass, 4078 Register temp_reg, 4079 Register temp2_reg, 4080 Register temp3_reg, 4081 Register temp4_reg, 4082 Label* L_success, 4083 Label* L_failure) { 4084 if (UseSecondarySupersTable) { 4085 check_klass_subtype_slow_path_table 4086 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg, 4087 L_success, L_failure); 4088 } else { 4089 check_klass_subtype_slow_path_linear 4090 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false); 4091 } 4092 } 4093 4094 Register MacroAssembler::allocate_if_noreg(Register r, 4095 RegSetIterator<Register> &available_regs, 4096 RegSet ®s_to_push) { 4097 if (!r->is_valid()) { 4098 r = *available_regs++; 4099 regs_to_push += r; 4100 } 4101 return r; 4102 } 4103 4104 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 4105 Register super_klass, 4106 Register temp_reg, 4107 Register temp2_reg, 4108 Register temp3_reg, 4109 Register result_reg, 4110 Label* L_success, 4111 Label* L_failure) { 4112 // NB! Callers may assume that, when temp2_reg is a valid register, 4113 // this code sets it to a nonzero value. 4114 bool temp2_reg_was_valid = temp2_reg->is_valid(); 4115 4116 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 4117 4118 Label L_fallthrough; 4119 int label_nulls = 0; 4120 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4121 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4122 assert(label_nulls <= 1, "at most one null in the batch"); 4123 4124 BLOCK_COMMENT("check_klass_subtype_slow_path_table"); 4125 4126 RegSetIterator<Register> available_regs 4127 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin(); 4128 4129 RegSet pushed_regs; 4130 4131 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 4132 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 4133 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 4134 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 4135 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs); 4136 4137 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg); 4138 4139 { 4140 4141 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4142 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4143 subptr(rsp, aligned_size); 4144 push_set(pushed_regs, 0); 4145 4146 lookup_secondary_supers_table_var(sub_klass, 4147 super_klass, 4148 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg); 4149 cmpq(result_reg, 0); 4150 4151 // Unspill the temp. registers: 4152 pop_set(pushed_regs, 0); 4153 // Increment SP but do not clobber flags. 4154 lea(rsp, Address(rsp, aligned_size)); 4155 } 4156 4157 if (temp2_reg_was_valid) { 4158 movq(temp2_reg, 1); 4159 } 4160 4161 jcc(Assembler::notEqual, *L_failure); 4162 4163 if (L_success != &L_fallthrough) { 4164 jmp(*L_success); 4165 } 4166 4167 bind(L_fallthrough); 4168 } 4169 4170 // population_count variant for running without the POPCNT 4171 // instruction, which was introduced with SSE4.2 in 2008. 4172 void MacroAssembler::population_count(Register dst, Register src, 4173 Register scratch1, Register scratch2) { 4174 assert_different_registers(src, scratch1, scratch2); 4175 if (UsePopCountInstruction) { 4176 Assembler::popcntq(dst, src); 4177 } else { 4178 assert_different_registers(src, scratch1, scratch2); 4179 assert_different_registers(dst, scratch1, scratch2); 4180 Label loop, done; 4181 4182 mov(scratch1, src); 4183 // dst = 0; 4184 // while(scratch1 != 0) { 4185 // dst++; 4186 // scratch1 &= (scratch1 - 1); 4187 // } 4188 xorl(dst, dst); 4189 testq(scratch1, scratch1); 4190 jccb(Assembler::equal, done); 4191 { 4192 bind(loop); 4193 incq(dst); 4194 movq(scratch2, scratch1); 4195 decq(scratch2); 4196 andq(scratch1, scratch2); 4197 jccb(Assembler::notEqual, loop); 4198 } 4199 bind(done); 4200 } 4201 #ifdef ASSERT 4202 mov64(scratch1, 0xCafeBabeDeadBeef); 4203 movq(scratch2, scratch1); 4204 #endif 4205 } 4206 4207 // Ensure that the inline code and the stub are using the same registers. 4208 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 4209 do { \ 4210 assert(r_super_klass == rax, "mismatch"); \ 4211 assert(r_array_base == rbx, "mismatch"); \ 4212 assert(r_array_length == rcx, "mismatch"); \ 4213 assert(r_array_index == rdx, "mismatch"); \ 4214 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 4215 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 4216 assert(result == rdi || result == noreg, "mismatch"); \ 4217 } while(0) 4218 4219 // Versions of salq and rorq that don't need count to be in rcx 4220 4221 void MacroAssembler::salq(Register dest, Register count) { 4222 if (count == rcx) { 4223 Assembler::salq(dest); 4224 } else { 4225 assert_different_registers(rcx, dest); 4226 xchgq(rcx, count); 4227 Assembler::salq(dest); 4228 xchgq(rcx, count); 4229 } 4230 } 4231 4232 void MacroAssembler::rorq(Register dest, Register count) { 4233 if (count == rcx) { 4234 Assembler::rorq(dest); 4235 } else { 4236 assert_different_registers(rcx, dest); 4237 xchgq(rcx, count); 4238 Assembler::rorq(dest); 4239 xchgq(rcx, count); 4240 } 4241 } 4242 4243 // Return true: we succeeded in generating this code 4244 // 4245 // At runtime, return 0 in result if r_super_klass is a superclass of 4246 // r_sub_klass, otherwise return nonzero. Use this if you know the 4247 // super_klass_slot of the class you're looking for. This is always 4248 // the case for instanceof and checkcast. 4249 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 4250 Register r_super_klass, 4251 Register temp1, 4252 Register temp2, 4253 Register temp3, 4254 Register temp4, 4255 Register result, 4256 u1 super_klass_slot) { 4257 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4258 4259 Label L_fallthrough, L_success, L_failure; 4260 4261 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4262 4263 const Register 4264 r_array_index = temp1, 4265 r_array_length = temp2, 4266 r_array_base = temp3, 4267 r_bitmap = temp4; 4268 4269 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 4270 4271 xorq(result, result); // = 0 4272 4273 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4274 movq(r_array_index, r_bitmap); 4275 4276 // First check the bitmap to see if super_klass might be present. If 4277 // the bit is zero, we are certain that super_klass is not one of 4278 // the secondary supers. 4279 u1 bit = super_klass_slot; 4280 { 4281 // NB: If the count in a x86 shift instruction is 0, the flags are 4282 // not affected, so we do a testq instead. 4283 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 4284 if (shift_count != 0) { 4285 salq(r_array_index, shift_count); 4286 } else { 4287 testq(r_array_index, r_array_index); 4288 } 4289 } 4290 // We test the MSB of r_array_index, i.e. its sign bit 4291 jcc(Assembler::positive, L_failure); 4292 4293 // Get the first array index that can contain super_klass into r_array_index. 4294 if (bit != 0) { 4295 population_count(r_array_index, r_array_index, temp2, temp3); 4296 } else { 4297 movl(r_array_index, 1); 4298 } 4299 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4300 4301 // We will consult the secondary-super array. 4302 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4303 4304 // We're asserting that the first word in an Array<Klass*> is the 4305 // length, and the second word is the first word of the data. If 4306 // that ever changes, r_array_base will have to be adjusted here. 4307 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4308 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4309 4310 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4311 jccb(Assembler::equal, L_success); 4312 4313 // Is there another entry to check? Consult the bitmap. 4314 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 4315 jccb(Assembler::carryClear, L_failure); 4316 4317 // Linear probe. Rotate the bitmap so that the next bit to test is 4318 // in Bit 1. 4319 if (bit != 0) { 4320 rorq(r_bitmap, bit); 4321 } 4322 4323 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4324 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4325 // Kills: r_array_length. 4326 // Returns: result. 4327 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 4328 // Result (0/1) is in rdi 4329 jmpb(L_fallthrough); 4330 4331 bind(L_failure); 4332 incq(result); // 0 => 1 4333 4334 bind(L_success); 4335 // result = 0; 4336 4337 bind(L_fallthrough); 4338 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4339 4340 if (VerifySecondarySupers) { 4341 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4342 temp1, temp2, temp3); 4343 } 4344 } 4345 4346 // At runtime, return 0 in result if r_super_klass is a superclass of 4347 // r_sub_klass, otherwise return nonzero. Use this version of 4348 // lookup_secondary_supers_table() if you don't know ahead of time 4349 // which superclass will be searched for. Used by interpreter and 4350 // runtime stubs. It is larger and has somewhat greater latency than 4351 // the version above, which takes a constant super_klass_slot. 4352 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 4353 Register r_super_klass, 4354 Register temp1, 4355 Register temp2, 4356 Register temp3, 4357 Register temp4, 4358 Register result) { 4359 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 4360 assert_different_registers(r_sub_klass, r_super_klass, rcx); 4361 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4); 4362 4363 Label L_fallthrough, L_success, L_failure; 4364 4365 BLOCK_COMMENT("lookup_secondary_supers_table {"); 4366 4367 RegSetIterator<Register> available_regs = (temps - rcx).begin(); 4368 4369 // FIXME. Once we are sure that all paths reaching this point really 4370 // do pass rcx as one of our temps we can get rid of the following 4371 // workaround. 4372 assert(temps.contains(rcx), "fix this code"); 4373 4374 // We prefer to have our shift count in rcx. If rcx is one of our 4375 // temps, use it for slot. If not, pick any of our temps. 4376 Register slot; 4377 if (!temps.contains(rcx)) { 4378 slot = *available_regs++; 4379 } else { 4380 slot = rcx; 4381 } 4382 4383 const Register r_array_index = *available_regs++; 4384 const Register r_bitmap = *available_regs++; 4385 4386 // The logic above guarantees this property, but we state it here. 4387 assert_different_registers(r_array_index, r_bitmap, rcx); 4388 4389 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 4390 movq(r_array_index, r_bitmap); 4391 4392 // First check the bitmap to see if super_klass might be present. If 4393 // the bit is zero, we are certain that super_klass is not one of 4394 // the secondary supers. 4395 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4396 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 4397 salq(r_array_index, slot); 4398 4399 testq(r_array_index, r_array_index); 4400 // We test the MSB of r_array_index, i.e. its sign bit 4401 jcc(Assembler::positive, L_failure); 4402 4403 const Register r_array_base = *available_regs++; 4404 4405 // Get the first array index that can contain super_klass into r_array_index. 4406 // Note: Clobbers r_array_base and slot. 4407 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot); 4408 4409 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 4410 4411 // We will consult the secondary-super array. 4412 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4413 4414 // We're asserting that the first word in an Array<Klass*> is the 4415 // length, and the second word is the first word of the data. If 4416 // that ever changes, r_array_base will have to be adjusted here. 4417 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 4418 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 4419 4420 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4421 jccb(Assembler::equal, L_success); 4422 4423 // Restore slot to its true value 4424 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 4425 4426 // Linear probe. Rotate the bitmap so that the next bit to test is 4427 // in Bit 1. 4428 rorq(r_bitmap, slot); 4429 4430 // Is there another entry to check? Consult the bitmap. 4431 btq(r_bitmap, 1); 4432 jccb(Assembler::carryClear, L_failure); 4433 4434 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 4435 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 4436 // Kills: r_array_length. 4437 // Returns: result. 4438 lookup_secondary_supers_table_slow_path(r_super_klass, 4439 r_array_base, 4440 r_array_index, 4441 r_bitmap, 4442 /*temp1*/result, 4443 /*temp2*/slot, 4444 &L_success, 4445 nullptr); 4446 4447 bind(L_failure); 4448 movq(result, 1); 4449 jmpb(L_fallthrough); 4450 4451 bind(L_success); 4452 xorq(result, result); // = 0 4453 4454 bind(L_fallthrough); 4455 BLOCK_COMMENT("} lookup_secondary_supers_table"); 4456 4457 if (VerifySecondarySupers) { 4458 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 4459 temp1, temp2, temp3); 4460 } 4461 } 4462 4463 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 4464 Label* L_success, Label* L_failure) { 4465 Label L_loop, L_fallthrough; 4466 { 4467 int label_nulls = 0; 4468 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4469 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4470 assert(label_nulls <= 1, "at most one null in the batch"); 4471 } 4472 bind(L_loop); 4473 cmpq(value, Address(addr, count, Address::times_8)); 4474 jcc(Assembler::equal, *L_success); 4475 addl(count, 1); 4476 cmpl(count, limit); 4477 jcc(Assembler::less, L_loop); 4478 4479 if (&L_fallthrough != L_failure) { 4480 jmp(*L_failure); 4481 } 4482 bind(L_fallthrough); 4483 } 4484 4485 // Called by code generated by check_klass_subtype_slow_path 4486 // above. This is called when there is a collision in the hashed 4487 // lookup in the secondary supers array. 4488 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 4489 Register r_array_base, 4490 Register r_array_index, 4491 Register r_bitmap, 4492 Register temp1, 4493 Register temp2, 4494 Label* L_success, 4495 Label* L_failure) { 4496 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 4497 4498 const Register 4499 r_array_length = temp1, 4500 r_sub_klass = noreg, 4501 result = noreg; 4502 4503 Label L_fallthrough; 4504 int label_nulls = 0; 4505 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4506 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4507 assert(label_nulls <= 1, "at most one null in the batch"); 4508 4509 // Load the array length. 4510 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4511 // And adjust the array base to point to the data. 4512 // NB! Effectively increments current slot index by 1. 4513 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 4514 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4515 4516 // Linear probe 4517 Label L_huge; 4518 4519 // The bitmap is full to bursting. 4520 // Implicit invariant: BITMAP_FULL implies (length > 0) 4521 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 4522 jcc(Assembler::greater, L_huge); 4523 4524 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 4525 // current slot (at secondary_supers[r_array_index]) has not yet 4526 // been inspected, and r_array_index may be out of bounds if we 4527 // wrapped around the end of the array. 4528 4529 { // This is conventional linear probing, but instead of terminating 4530 // when a null entry is found in the table, we maintain a bitmap 4531 // in which a 0 indicates missing entries. 4532 // The check above guarantees there are 0s in the bitmap, so the loop 4533 // eventually terminates. 4534 4535 xorl(temp2, temp2); // = 0; 4536 4537 Label L_again; 4538 bind(L_again); 4539 4540 // Check for array wraparound. 4541 cmpl(r_array_index, r_array_length); 4542 cmovl(Assembler::greaterEqual, r_array_index, temp2); 4543 4544 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 4545 jcc(Assembler::equal, *L_success); 4546 4547 // If the next bit in bitmap is zero, we're done. 4548 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 4549 jcc(Assembler::carryClear, *L_failure); 4550 4551 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 4552 addl(r_array_index, 1); 4553 4554 jmp(L_again); 4555 } 4556 4557 { // Degenerate case: more than 64 secondary supers. 4558 // FIXME: We could do something smarter here, maybe a vectorized 4559 // comparison or a binary search, but is that worth any added 4560 // complexity? 4561 bind(L_huge); 4562 xorl(r_array_index, r_array_index); // = 0 4563 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 4564 L_success, 4565 (&L_fallthrough != L_failure ? L_failure : nullptr)); 4566 4567 bind(L_fallthrough); 4568 } 4569 } 4570 4571 struct VerifyHelperArguments { 4572 Klass* _super; 4573 Klass* _sub; 4574 intptr_t _linear_result; 4575 intptr_t _table_result; 4576 }; 4577 4578 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 4579 Klass::on_secondary_supers_verification_failure(args->_super, 4580 args->_sub, 4581 args->_linear_result, 4582 args->_table_result, 4583 msg); 4584 } 4585 4586 // Make sure that the hashed lookup and a linear scan agree. 4587 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 4588 Register r_super_klass, 4589 Register result, 4590 Register temp1, 4591 Register temp2, 4592 Register temp3) { 4593 const Register 4594 r_array_index = temp1, 4595 r_array_length = temp2, 4596 r_array_base = temp3, 4597 r_bitmap = noreg; 4598 4599 BLOCK_COMMENT("verify_secondary_supers_table {"); 4600 4601 Label L_success, L_failure, L_check, L_done; 4602 4603 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 4604 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 4605 // And adjust the array base to point to the data. 4606 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 4607 4608 testl(r_array_length, r_array_length); // array_length == 0? 4609 jcc(Assembler::zero, L_failure); 4610 4611 movl(r_array_index, 0); 4612 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 4613 // fall through to L_failure 4614 4615 const Register linear_result = r_array_index; // reuse temp1 4616 4617 bind(L_failure); // not present 4618 movl(linear_result, 1); 4619 jmp(L_check); 4620 4621 bind(L_success); // present 4622 movl(linear_result, 0); 4623 4624 bind(L_check); 4625 cmpl(linear_result, result); 4626 jcc(Assembler::equal, L_done); 4627 4628 { // To avoid calling convention issues, build a record on the stack 4629 // and pass the pointer to that instead. 4630 push(result); 4631 push(linear_result); 4632 push(r_sub_klass); 4633 push(r_super_klass); 4634 movptr(c_rarg1, rsp); 4635 movptr(c_rarg0, (uintptr_t) "mismatch"); 4636 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 4637 should_not_reach_here(); 4638 } 4639 bind(L_done); 4640 4641 BLOCK_COMMENT("} verify_secondary_supers_table"); 4642 } 4643 4644 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 4645 4646 void MacroAssembler::clinit_barrier(Register klass, Label* L_fast_path, Label* L_slow_path) { 4647 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 4648 4649 Label L_fallthrough; 4650 if (L_fast_path == nullptr) { 4651 L_fast_path = &L_fallthrough; 4652 } else if (L_slow_path == nullptr) { 4653 L_slow_path = &L_fallthrough; 4654 } 4655 4656 // Fast path check: class is fully initialized. 4657 // init_state needs acquire, but x86 is TSO, and so we are already good. 4658 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 4659 jcc(Assembler::equal, *L_fast_path); 4660 4661 // Fast path check: current thread is initializer thread 4662 cmpptr(r15_thread, Address(klass, InstanceKlass::init_thread_offset())); 4663 if (L_slow_path == &L_fallthrough) { 4664 jcc(Assembler::equal, *L_fast_path); 4665 bind(*L_slow_path); 4666 } else if (L_fast_path == &L_fallthrough) { 4667 jcc(Assembler::notEqual, *L_slow_path); 4668 bind(*L_fast_path); 4669 } else { 4670 Unimplemented(); 4671 } 4672 } 4673 4674 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 4675 if (VM_Version::supports_cmov()) { 4676 cmovl(cc, dst, src); 4677 } else { 4678 Label L; 4679 jccb(negate_condition(cc), L); 4680 movl(dst, src); 4681 bind(L); 4682 } 4683 } 4684 4685 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 4686 if (VM_Version::supports_cmov()) { 4687 cmovl(cc, dst, src); 4688 } else { 4689 Label L; 4690 jccb(negate_condition(cc), L); 4691 movl(dst, src); 4692 bind(L); 4693 } 4694 } 4695 4696 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 4697 if (!VerifyOops) return; 4698 4699 BLOCK_COMMENT("verify_oop {"); 4700 push(rscratch1); 4701 push(rax); // save rax 4702 push(reg); // pass register argument 4703 4704 // Pass register number to verify_oop_subroutine 4705 const char* b = nullptr; 4706 { 4707 ResourceMark rm; 4708 stringStream ss; 4709 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 4710 b = code_string(ss.as_string()); 4711 } 4712 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4713 pushptr(buffer.addr(), rscratch1); 4714 4715 // call indirectly to solve generation ordering problem 4716 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4717 call(rax); 4718 // Caller pops the arguments (oop, message) and restores rax, r10 4719 BLOCK_COMMENT("} verify_oop"); 4720 } 4721 4722 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 4723 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 4724 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 4725 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 4726 vpternlogd(dst, 0xFF, dst, dst, vector_len); 4727 } else if (VM_Version::supports_avx()) { 4728 vpcmpeqd(dst, dst, dst, vector_len); 4729 } else { 4730 pcmpeqd(dst, dst); 4731 } 4732 } 4733 4734 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 4735 int extra_slot_offset) { 4736 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 4737 int stackElementSize = Interpreter::stackElementSize; 4738 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 4739 #ifdef ASSERT 4740 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 4741 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 4742 #endif 4743 Register scale_reg = noreg; 4744 Address::ScaleFactor scale_factor = Address::no_scale; 4745 if (arg_slot.is_constant()) { 4746 offset += arg_slot.as_constant() * stackElementSize; 4747 } else { 4748 scale_reg = arg_slot.as_register(); 4749 scale_factor = Address::times(stackElementSize); 4750 } 4751 offset += wordSize; // return PC is on stack 4752 return Address(rsp, scale_reg, scale_factor, offset); 4753 } 4754 4755 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 4756 if (!VerifyOops) return; 4757 4758 push(rscratch1); 4759 push(rax); // save rax, 4760 // addr may contain rsp so we will have to adjust it based on the push 4761 // we just did (and on 64 bit we do two pushes) 4762 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 4763 // stores rax into addr which is backwards of what was intended. 4764 if (addr.uses(rsp)) { 4765 lea(rax, addr); 4766 pushptr(Address(rax, 2 * BytesPerWord)); 4767 } else { 4768 pushptr(addr); 4769 } 4770 4771 // Pass register number to verify_oop_subroutine 4772 const char* b = nullptr; 4773 { 4774 ResourceMark rm; 4775 stringStream ss; 4776 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 4777 b = code_string(ss.as_string()); 4778 } 4779 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 4780 pushptr(buffer.addr(), rscratch1); 4781 4782 // call indirectly to solve generation ordering problem 4783 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4784 call(rax); 4785 // Caller pops the arguments (addr, message) and restores rax, r10. 4786 } 4787 4788 void MacroAssembler::verify_tlab() { 4789 #ifdef ASSERT 4790 if (UseTLAB && VerifyOops) { 4791 Label next, ok; 4792 Register t1 = rsi; 4793 4794 push(t1); 4795 4796 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4797 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset()))); 4798 jcc(Assembler::aboveEqual, next); 4799 STOP("assert(top >= start)"); 4800 should_not_reach_here(); 4801 4802 bind(next); 4803 movptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); 4804 cmpptr(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); 4805 jcc(Assembler::aboveEqual, ok); 4806 STOP("assert(top <= end)"); 4807 should_not_reach_here(); 4808 4809 bind(ok); 4810 pop(t1); 4811 } 4812 #endif 4813 } 4814 4815 class ControlWord { 4816 public: 4817 int32_t _value; 4818 4819 int rounding_control() const { return (_value >> 10) & 3 ; } 4820 int precision_control() const { return (_value >> 8) & 3 ; } 4821 bool precision() const { return ((_value >> 5) & 1) != 0; } 4822 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4823 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4824 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4825 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4826 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4827 4828 void print() const { 4829 // rounding control 4830 const char* rc; 4831 switch (rounding_control()) { 4832 case 0: rc = "round near"; break; 4833 case 1: rc = "round down"; break; 4834 case 2: rc = "round up "; break; 4835 case 3: rc = "chop "; break; 4836 default: 4837 rc = nullptr; // silence compiler warnings 4838 fatal("Unknown rounding control: %d", rounding_control()); 4839 }; 4840 // precision control 4841 const char* pc; 4842 switch (precision_control()) { 4843 case 0: pc = "24 bits "; break; 4844 case 1: pc = "reserved"; break; 4845 case 2: pc = "53 bits "; break; 4846 case 3: pc = "64 bits "; break; 4847 default: 4848 pc = nullptr; // silence compiler warnings 4849 fatal("Unknown precision control: %d", precision_control()); 4850 }; 4851 // flags 4852 char f[9]; 4853 f[0] = ' '; 4854 f[1] = ' '; 4855 f[2] = (precision ()) ? 'P' : 'p'; 4856 f[3] = (underflow ()) ? 'U' : 'u'; 4857 f[4] = (overflow ()) ? 'O' : 'o'; 4858 f[5] = (zero_divide ()) ? 'Z' : 'z'; 4859 f[6] = (denormalized()) ? 'D' : 'd'; 4860 f[7] = (invalid ()) ? 'I' : 'i'; 4861 f[8] = '\x0'; 4862 // output 4863 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 4864 } 4865 4866 }; 4867 4868 class StatusWord { 4869 public: 4870 int32_t _value; 4871 4872 bool busy() const { return ((_value >> 15) & 1) != 0; } 4873 bool C3() const { return ((_value >> 14) & 1) != 0; } 4874 bool C2() const { return ((_value >> 10) & 1) != 0; } 4875 bool C1() const { return ((_value >> 9) & 1) != 0; } 4876 bool C0() const { return ((_value >> 8) & 1) != 0; } 4877 int top() const { return (_value >> 11) & 7 ; } 4878 bool error_status() const { return ((_value >> 7) & 1) != 0; } 4879 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 4880 bool precision() const { return ((_value >> 5) & 1) != 0; } 4881 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4882 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4883 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4884 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4885 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4886 4887 void print() const { 4888 // condition codes 4889 char c[5]; 4890 c[0] = (C3()) ? '3' : '-'; 4891 c[1] = (C2()) ? '2' : '-'; 4892 c[2] = (C1()) ? '1' : '-'; 4893 c[3] = (C0()) ? '0' : '-'; 4894 c[4] = '\x0'; 4895 // flags 4896 char f[9]; 4897 f[0] = (error_status()) ? 'E' : '-'; 4898 f[1] = (stack_fault ()) ? 'S' : '-'; 4899 f[2] = (precision ()) ? 'P' : '-'; 4900 f[3] = (underflow ()) ? 'U' : '-'; 4901 f[4] = (overflow ()) ? 'O' : '-'; 4902 f[5] = (zero_divide ()) ? 'Z' : '-'; 4903 f[6] = (denormalized()) ? 'D' : '-'; 4904 f[7] = (invalid ()) ? 'I' : '-'; 4905 f[8] = '\x0'; 4906 // output 4907 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 4908 } 4909 4910 }; 4911 4912 class TagWord { 4913 public: 4914 int32_t _value; 4915 4916 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 4917 4918 void print() const { 4919 printf("%04x", _value & 0xFFFF); 4920 } 4921 4922 }; 4923 4924 class FPU_Register { 4925 public: 4926 int32_t _m0; 4927 int32_t _m1; 4928 int16_t _ex; 4929 4930 bool is_indefinite() const { 4931 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 4932 } 4933 4934 void print() const { 4935 char sign = (_ex < 0) ? '-' : '+'; 4936 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 4937 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 4938 }; 4939 4940 }; 4941 4942 class FPU_State { 4943 public: 4944 enum { 4945 register_size = 10, 4946 number_of_registers = 8, 4947 register_mask = 7 4948 }; 4949 4950 ControlWord _control_word; 4951 StatusWord _status_word; 4952 TagWord _tag_word; 4953 int32_t _error_offset; 4954 int32_t _error_selector; 4955 int32_t _data_offset; 4956 int32_t _data_selector; 4957 int8_t _register[register_size * number_of_registers]; 4958 4959 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 4960 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 4961 4962 const char* tag_as_string(int tag) const { 4963 switch (tag) { 4964 case 0: return "valid"; 4965 case 1: return "zero"; 4966 case 2: return "special"; 4967 case 3: return "empty"; 4968 } 4969 ShouldNotReachHere(); 4970 return nullptr; 4971 } 4972 4973 void print() const { 4974 // print computation registers 4975 { int t = _status_word.top(); 4976 for (int i = 0; i < number_of_registers; i++) { 4977 int j = (i - t) & register_mask; 4978 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 4979 st(j)->print(); 4980 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 4981 } 4982 } 4983 printf("\n"); 4984 // print control registers 4985 printf("ctrl = "); _control_word.print(); printf("\n"); 4986 printf("stat = "); _status_word .print(); printf("\n"); 4987 printf("tags = "); _tag_word .print(); printf("\n"); 4988 } 4989 4990 }; 4991 4992 class Flag_Register { 4993 public: 4994 int32_t _value; 4995 4996 bool overflow() const { return ((_value >> 11) & 1) != 0; } 4997 bool direction() const { return ((_value >> 10) & 1) != 0; } 4998 bool sign() const { return ((_value >> 7) & 1) != 0; } 4999 bool zero() const { return ((_value >> 6) & 1) != 0; } 5000 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5001 bool parity() const { return ((_value >> 2) & 1) != 0; } 5002 bool carry() const { return ((_value >> 0) & 1) != 0; } 5003 5004 void print() const { 5005 // flags 5006 char f[8]; 5007 f[0] = (overflow ()) ? 'O' : '-'; 5008 f[1] = (direction ()) ? 'D' : '-'; 5009 f[2] = (sign ()) ? 'S' : '-'; 5010 f[3] = (zero ()) ? 'Z' : '-'; 5011 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5012 f[5] = (parity ()) ? 'P' : '-'; 5013 f[6] = (carry ()) ? 'C' : '-'; 5014 f[7] = '\x0'; 5015 // output 5016 printf("%08x flags = %s", _value, f); 5017 } 5018 5019 }; 5020 5021 class IU_Register { 5022 public: 5023 int32_t _value; 5024 5025 void print() const { 5026 printf("%08x %11d", _value, _value); 5027 } 5028 5029 }; 5030 5031 class IU_State { 5032 public: 5033 Flag_Register _eflags; 5034 IU_Register _rdi; 5035 IU_Register _rsi; 5036 IU_Register _rbp; 5037 IU_Register _rsp; 5038 IU_Register _rbx; 5039 IU_Register _rdx; 5040 IU_Register _rcx; 5041 IU_Register _rax; 5042 5043 void print() const { 5044 // computation registers 5045 printf("rax, = "); _rax.print(); printf("\n"); 5046 printf("rbx, = "); _rbx.print(); printf("\n"); 5047 printf("rcx = "); _rcx.print(); printf("\n"); 5048 printf("rdx = "); _rdx.print(); printf("\n"); 5049 printf("rdi = "); _rdi.print(); printf("\n"); 5050 printf("rsi = "); _rsi.print(); printf("\n"); 5051 printf("rbp, = "); _rbp.print(); printf("\n"); 5052 printf("rsp = "); _rsp.print(); printf("\n"); 5053 printf("\n"); 5054 // control registers 5055 printf("flgs = "); _eflags.print(); printf("\n"); 5056 } 5057 }; 5058 5059 5060 class CPU_State { 5061 public: 5062 FPU_State _fpu_state; 5063 IU_State _iu_state; 5064 5065 void print() const { 5066 printf("--------------------------------------------------\n"); 5067 _iu_state .print(); 5068 printf("\n"); 5069 _fpu_state.print(); 5070 printf("--------------------------------------------------\n"); 5071 } 5072 5073 }; 5074 5075 5076 static void _print_CPU_state(CPU_State* state) { 5077 state->print(); 5078 }; 5079 5080 5081 void MacroAssembler::print_CPU_state() { 5082 push_CPU_state(); 5083 push(rsp); // pass CPU state 5084 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5085 addptr(rsp, wordSize); // discard argument 5086 pop_CPU_state(); 5087 } 5088 5089 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5090 // Either restore the MXCSR register after returning from the JNI Call 5091 // or verify that it wasn't changed (with -Xcheck:jni flag). 5092 if (VM_Version::supports_sse()) { 5093 if (RestoreMXCSROnJNICalls) { 5094 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5095 } else if (CheckJNICalls) { 5096 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5097 } 5098 } 5099 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5100 vzeroupper(); 5101 } 5102 5103 // ((OopHandle)result).resolve(); 5104 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5105 assert_different_registers(result, tmp); 5106 5107 // Only 64 bit platforms support GCs that require a tmp register 5108 // Only IN_HEAP loads require a thread_tmp register 5109 // OopHandle::resolve is an indirection like jobject. 5110 access_load_at(T_OBJECT, IN_NATIVE, 5111 result, Address(result, 0), tmp); 5112 } 5113 5114 // ((WeakHandle)result).resolve(); 5115 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5116 assert_different_registers(rresult, rtmp); 5117 Label resolved; 5118 5119 // A null weak handle resolves to null. 5120 cmpptr(rresult, 0); 5121 jcc(Assembler::equal, resolved); 5122 5123 // Only 64 bit platforms support GCs that require a tmp register 5124 // Only IN_HEAP loads require a thread_tmp register 5125 // WeakHandle::resolve is an indirection like jweak. 5126 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5127 rresult, Address(rresult, 0), rtmp); 5128 bind(resolved); 5129 } 5130 5131 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5132 // get mirror 5133 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5134 load_method_holder(mirror, method); 5135 movptr(mirror, Address(mirror, mirror_offset)); 5136 resolve_oop_handle(mirror, tmp); 5137 } 5138 5139 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5140 load_method_holder(rresult, rmethod); 5141 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5142 } 5143 5144 void MacroAssembler::load_method_holder(Register holder, Register method) { 5145 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5146 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5147 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5148 } 5149 5150 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5151 assert(UseCompactObjectHeaders, "expect compact object headers"); 5152 movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5153 shrq(dst, markWord::klass_shift); 5154 } 5155 5156 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5157 assert_different_registers(src, tmp); 5158 assert_different_registers(dst, tmp); 5159 5160 if (UseCompactObjectHeaders) { 5161 load_narrow_klass_compact(dst, src); 5162 decode_klass_not_null(dst, tmp); 5163 } else if (UseCompressedClassPointers) { 5164 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5165 decode_klass_not_null(dst, tmp); 5166 } else { 5167 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5168 } 5169 } 5170 5171 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5172 assert(!UseCompactObjectHeaders, "not with compact headers"); 5173 assert_different_registers(src, tmp); 5174 assert_different_registers(dst, tmp); 5175 if (UseCompressedClassPointers) { 5176 encode_klass_not_null(src, tmp); 5177 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5178 } else { 5179 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5180 } 5181 } 5182 5183 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { 5184 if (UseCompactObjectHeaders) { 5185 assert(tmp != noreg, "need tmp"); 5186 assert_different_registers(klass, obj, tmp); 5187 load_narrow_klass_compact(tmp, obj); 5188 cmpl(klass, tmp); 5189 } else if (UseCompressedClassPointers) { 5190 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5191 } else { 5192 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5193 } 5194 } 5195 5196 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5197 if (UseCompactObjectHeaders) { 5198 assert(tmp2 != noreg, "need tmp2"); 5199 assert_different_registers(obj1, obj2, tmp1, tmp2); 5200 load_narrow_klass_compact(tmp1, obj1); 5201 load_narrow_klass_compact(tmp2, obj2); 5202 cmpl(tmp1, tmp2); 5203 } else if (UseCompressedClassPointers) { 5204 movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5205 cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5206 } else { 5207 movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5208 cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); 5209 } 5210 } 5211 5212 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5213 Register tmp1) { 5214 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5215 decorators = AccessInternal::decorator_fixup(decorators, type); 5216 bool as_raw = (decorators & AS_RAW) != 0; 5217 if (as_raw) { 5218 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1); 5219 } else { 5220 bs->load_at(this, decorators, type, dst, src, tmp1); 5221 } 5222 } 5223 5224 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5225 Register tmp1, Register tmp2, Register tmp3) { 5226 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5227 decorators = AccessInternal::decorator_fixup(decorators, type); 5228 bool as_raw = (decorators & AS_RAW) != 0; 5229 if (as_raw) { 5230 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5231 } else { 5232 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5233 } 5234 } 5235 5236 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5237 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1); 5238 } 5239 5240 // Doesn't do verification, generates fixed size code 5241 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, DecoratorSet decorators) { 5242 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1); 5243 } 5244 5245 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5246 Register tmp2, Register tmp3, DecoratorSet decorators) { 5247 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5248 } 5249 5250 // Used for storing nulls. 5251 void MacroAssembler::store_heap_oop_null(Address dst) { 5252 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5253 } 5254 5255 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5256 assert(!UseCompactObjectHeaders, "Don't use with compact headers"); 5257 if (UseCompressedClassPointers) { 5258 // Store to klass gap in destination 5259 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5260 } 5261 } 5262 5263 #ifdef ASSERT 5264 void MacroAssembler::verify_heapbase(const char* msg) { 5265 assert (UseCompressedOops, "should be compressed"); 5266 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5267 if (CheckCompressedOops) { 5268 Label ok; 5269 ExternalAddress src2(CompressedOops::base_addr()); 5270 const bool is_src2_reachable = reachable(src2); 5271 if (!is_src2_reachable) { 5272 push(rscratch1); // cmpptr trashes rscratch1 5273 } 5274 cmpptr(r12_heapbase, src2, rscratch1); 5275 jcc(Assembler::equal, ok); 5276 STOP(msg); 5277 bind(ok); 5278 if (!is_src2_reachable) { 5279 pop(rscratch1); 5280 } 5281 } 5282 } 5283 #endif 5284 5285 // Algorithm must match oop.inline.hpp encode_heap_oop. 5286 void MacroAssembler::encode_heap_oop(Register r) { 5287 #ifdef ASSERT 5288 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5289 #endif 5290 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5291 if (CompressedOops::base() == nullptr) { 5292 if (CompressedOops::shift() != 0) { 5293 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5294 shrq(r, LogMinObjAlignmentInBytes); 5295 } 5296 return; 5297 } 5298 testq(r, r); 5299 cmovq(Assembler::equal, r, r12_heapbase); 5300 subq(r, r12_heapbase); 5301 shrq(r, LogMinObjAlignmentInBytes); 5302 } 5303 5304 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5305 #ifdef ASSERT 5306 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5307 if (CheckCompressedOops) { 5308 Label ok; 5309 testq(r, r); 5310 jcc(Assembler::notEqual, ok); 5311 STOP("null oop passed to encode_heap_oop_not_null"); 5312 bind(ok); 5313 } 5314 #endif 5315 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5316 if (CompressedOops::base() != nullptr) { 5317 subq(r, r12_heapbase); 5318 } 5319 if (CompressedOops::shift() != 0) { 5320 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5321 shrq(r, LogMinObjAlignmentInBytes); 5322 } 5323 } 5324 5325 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5326 #ifdef ASSERT 5327 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5328 if (CheckCompressedOops) { 5329 Label ok; 5330 testq(src, src); 5331 jcc(Assembler::notEqual, ok); 5332 STOP("null oop passed to encode_heap_oop_not_null2"); 5333 bind(ok); 5334 } 5335 #endif 5336 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5337 if (dst != src) { 5338 movq(dst, src); 5339 } 5340 if (CompressedOops::base() != nullptr) { 5341 subq(dst, r12_heapbase); 5342 } 5343 if (CompressedOops::shift() != 0) { 5344 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5345 shrq(dst, LogMinObjAlignmentInBytes); 5346 } 5347 } 5348 5349 void MacroAssembler::decode_heap_oop(Register r) { 5350 #ifdef ASSERT 5351 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5352 #endif 5353 if (CompressedOops::base() == nullptr) { 5354 if (CompressedOops::shift() != 0) { 5355 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5356 shlq(r, LogMinObjAlignmentInBytes); 5357 } 5358 } else { 5359 Label done; 5360 shlq(r, LogMinObjAlignmentInBytes); 5361 jccb(Assembler::equal, done); 5362 addq(r, r12_heapbase); 5363 bind(done); 5364 } 5365 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5366 } 5367 5368 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5369 // Note: it will change flags 5370 assert (UseCompressedOops, "should only be used for compressed headers"); 5371 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5372 // Cannot assert, unverified entry point counts instructions (see .ad file) 5373 // vtableStubs also counts instructions in pd_code_size_limit. 5374 // Also do not verify_oop as this is called by verify_oop. 5375 if (CompressedOops::shift() != 0) { 5376 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5377 shlq(r, LogMinObjAlignmentInBytes); 5378 if (CompressedOops::base() != nullptr) { 5379 addq(r, r12_heapbase); 5380 } 5381 } else { 5382 assert (CompressedOops::base() == nullptr, "sanity"); 5383 } 5384 } 5385 5386 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5387 // Note: it will change flags 5388 assert (UseCompressedOops, "should only be used for compressed headers"); 5389 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5390 // Cannot assert, unverified entry point counts instructions (see .ad file) 5391 // vtableStubs also counts instructions in pd_code_size_limit. 5392 // Also do not verify_oop as this is called by verify_oop. 5393 if (CompressedOops::shift() != 0) { 5394 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5395 if (LogMinObjAlignmentInBytes == Address::times_8) { 5396 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5397 } else { 5398 if (dst != src) { 5399 movq(dst, src); 5400 } 5401 shlq(dst, LogMinObjAlignmentInBytes); 5402 if (CompressedOops::base() != nullptr) { 5403 addq(dst, r12_heapbase); 5404 } 5405 } 5406 } else { 5407 assert (CompressedOops::base() == nullptr, "sanity"); 5408 if (dst != src) { 5409 movq(dst, src); 5410 } 5411 } 5412 } 5413 5414 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5415 BLOCK_COMMENT("encode_klass_not_null {"); 5416 assert_different_registers(r, tmp); 5417 if (CompressedKlassPointers::base() != nullptr) { 5418 if (AOTCodeCache::is_on_for_dump()) { 5419 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5420 } else { 5421 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5422 } 5423 subq(r, tmp); 5424 } 5425 if (CompressedKlassPointers::shift() != 0) { 5426 shrq(r, CompressedKlassPointers::shift()); 5427 } 5428 BLOCK_COMMENT("} encode_klass_not_null"); 5429 } 5430 5431 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5432 BLOCK_COMMENT("encode_and_move_klass_not_null {"); 5433 assert_different_registers(src, dst); 5434 if (CompressedKlassPointers::base() != nullptr) { 5435 if (AOTCodeCache::is_on_for_dump()) { 5436 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5437 negq(dst); 5438 } else { 5439 movptr(dst, -(intptr_t)CompressedKlassPointers::base()); 5440 } 5441 addq(dst, src); 5442 } else { 5443 movptr(dst, src); 5444 } 5445 if (CompressedKlassPointers::shift() != 0) { 5446 shrq(dst, CompressedKlassPointers::shift()); 5447 } 5448 BLOCK_COMMENT("} encode_and_move_klass_not_null"); 5449 } 5450 5451 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5452 BLOCK_COMMENT("decode_klass_not_null {"); 5453 assert_different_registers(r, tmp); 5454 // Note: it will change flags 5455 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5456 // Cannot assert, unverified entry point counts instructions (see .ad file) 5457 // vtableStubs also counts instructions in pd_code_size_limit. 5458 // Also do not verify_oop as this is called by verify_oop. 5459 if (CompressedKlassPointers::shift() != 0) { 5460 shlq(r, CompressedKlassPointers::shift()); 5461 } 5462 if (CompressedKlassPointers::base() != nullptr) { 5463 if (AOTCodeCache::is_on_for_dump()) { 5464 movptr(tmp, ExternalAddress(CompressedKlassPointers::base_addr())); 5465 } else { 5466 movptr(tmp, (intptr_t)CompressedKlassPointers::base()); 5467 } 5468 addq(r, tmp); 5469 } 5470 BLOCK_COMMENT("} decode_klass_not_null"); 5471 } 5472 5473 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5474 BLOCK_COMMENT("decode_and_move_klass_not_null {"); 5475 assert_different_registers(src, dst); 5476 // Note: it will change flags 5477 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5478 // Cannot assert, unverified entry point counts instructions (see .ad file) 5479 // vtableStubs also counts instructions in pd_code_size_limit. 5480 // Also do not verify_oop as this is called by verify_oop. 5481 5482 if (CompressedKlassPointers::base() == nullptr && 5483 CompressedKlassPointers::shift() == 0) { 5484 // The best case scenario is that there is no base or shift. Then it is already 5485 // a pointer that needs nothing but a register rename. 5486 movptr(dst, src); 5487 } else { 5488 if (CompressedKlassPointers::shift() <= Address::times_8) { 5489 if (CompressedKlassPointers::base() != nullptr) { 5490 if (AOTCodeCache::is_on_for_dump()) { 5491 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5492 } else { 5493 movptr(dst, (intptr_t)CompressedKlassPointers::base()); 5494 } 5495 } else { 5496 xorq(dst, dst); 5497 } 5498 if (CompressedKlassPointers::shift() != 0) { 5499 assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?"); 5500 leaq(dst, Address(dst, src, Address::times_8, 0)); 5501 } else { 5502 addq(dst, src); 5503 } 5504 } else { 5505 if (CompressedKlassPointers::base() != nullptr) { 5506 if (AOTCodeCache::is_on_for_dump()) { 5507 movptr(dst, ExternalAddress(CompressedKlassPointers::base_addr())); 5508 shrq(dst, CompressedKlassPointers::shift()); 5509 } else { 5510 const intptr_t base_right_shifted = 5511 (intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5512 movptr(dst, base_right_shifted); 5513 } 5514 } else { 5515 xorq(dst, dst); 5516 } 5517 addq(dst, src); 5518 shlq(dst, CompressedKlassPointers::shift()); 5519 } 5520 } 5521 BLOCK_COMMENT("} decode_and_move_klass_not_null"); 5522 } 5523 5524 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5525 assert (UseCompressedOops, "should only be used for compressed headers"); 5526 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5527 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5528 int oop_index = oop_recorder()->find_index(obj); 5529 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5530 mov_narrow_oop(dst, oop_index, rspec); 5531 } 5532 5533 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5534 assert (UseCompressedOops, "should only be used for compressed headers"); 5535 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5536 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5537 int oop_index = oop_recorder()->find_index(obj); 5538 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5539 mov_narrow_oop(dst, oop_index, rspec); 5540 } 5541 5542 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5543 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5544 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5545 int klass_index = oop_recorder()->find_index(k); 5546 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5547 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5548 } 5549 5550 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 5551 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5552 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5553 int klass_index = oop_recorder()->find_index(k); 5554 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5555 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5556 } 5557 5558 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 5559 assert (UseCompressedOops, "should only be used for compressed headers"); 5560 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5561 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5562 int oop_index = oop_recorder()->find_index(obj); 5563 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5564 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5565 } 5566 5567 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 5568 assert (UseCompressedOops, "should only be used for compressed headers"); 5569 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5570 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5571 int oop_index = oop_recorder()->find_index(obj); 5572 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5573 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5574 } 5575 5576 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 5577 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5578 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5579 int klass_index = oop_recorder()->find_index(k); 5580 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5581 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5582 } 5583 5584 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 5585 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5586 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5587 int klass_index = oop_recorder()->find_index(k); 5588 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5589 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5590 } 5591 5592 void MacroAssembler::reinit_heapbase() { 5593 if (UseCompressedOops) { 5594 if (Universe::heap() != nullptr) { // GC was initialized 5595 if (CompressedOops::base() == nullptr) { 5596 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 5597 } else if (AOTCodeCache::is_on_for_dump()) { 5598 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 5599 } else { 5600 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 5601 } 5602 } else { 5603 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 5604 } 5605 } 5606 } 5607 5608 #if COMPILER2_OR_JVMCI 5609 5610 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 5611 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5612 // cnt - number of qwords (8-byte words). 5613 // base - start address, qword aligned. 5614 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 5615 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 5616 if (use64byteVector) { 5617 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 5618 } else if (MaxVectorSize >= 32) { 5619 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 5620 } else { 5621 pxor(xtmp, xtmp); 5622 } 5623 jmp(L_zero_64_bytes); 5624 5625 BIND(L_loop); 5626 if (MaxVectorSize >= 32) { 5627 fill64(base, 0, xtmp, use64byteVector); 5628 } else { 5629 movdqu(Address(base, 0), xtmp); 5630 movdqu(Address(base, 16), xtmp); 5631 movdqu(Address(base, 32), xtmp); 5632 movdqu(Address(base, 48), xtmp); 5633 } 5634 addptr(base, 64); 5635 5636 BIND(L_zero_64_bytes); 5637 subptr(cnt, 8); 5638 jccb(Assembler::greaterEqual, L_loop); 5639 5640 // Copy trailing 64 bytes 5641 if (use64byteVector) { 5642 addptr(cnt, 8); 5643 jccb(Assembler::equal, L_end); 5644 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 5645 jmp(L_end); 5646 } else { 5647 addptr(cnt, 4); 5648 jccb(Assembler::less, L_tail); 5649 if (MaxVectorSize >= 32) { 5650 vmovdqu(Address(base, 0), xtmp); 5651 } else { 5652 movdqu(Address(base, 0), xtmp); 5653 movdqu(Address(base, 16), xtmp); 5654 } 5655 } 5656 addptr(base, 32); 5657 subptr(cnt, 4); 5658 5659 BIND(L_tail); 5660 addptr(cnt, 4); 5661 jccb(Assembler::lessEqual, L_end); 5662 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 5663 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 5664 } else { 5665 decrement(cnt); 5666 5667 BIND(L_sloop); 5668 movq(Address(base, 0), xtmp); 5669 addptr(base, 8); 5670 decrement(cnt); 5671 jccb(Assembler::greaterEqual, L_sloop); 5672 } 5673 BIND(L_end); 5674 } 5675 5676 // Clearing constant sized memory using YMM/ZMM registers. 5677 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5678 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 5679 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 5680 5681 int vector64_count = (cnt & (~0x7)) >> 3; 5682 cnt = cnt & 0x7; 5683 const int fill64_per_loop = 4; 5684 const int max_unrolled_fill64 = 8; 5685 5686 // 64 byte initialization loop. 5687 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 5688 int start64 = 0; 5689 if (vector64_count > max_unrolled_fill64) { 5690 Label LOOP; 5691 Register index = rtmp; 5692 5693 start64 = vector64_count - (vector64_count % fill64_per_loop); 5694 5695 movl(index, 0); 5696 BIND(LOOP); 5697 for (int i = 0; i < fill64_per_loop; i++) { 5698 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 5699 } 5700 addl(index, fill64_per_loop * 64); 5701 cmpl(index, start64 * 64); 5702 jccb(Assembler::less, LOOP); 5703 } 5704 for (int i = start64; i < vector64_count; i++) { 5705 fill64(base, i * 64, xtmp, use64byteVector); 5706 } 5707 5708 // Clear remaining 64 byte tail. 5709 int disp = vector64_count * 64; 5710 if (cnt) { 5711 switch (cnt) { 5712 case 1: 5713 movq(Address(base, disp), xtmp); 5714 break; 5715 case 2: 5716 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 5717 break; 5718 case 3: 5719 movl(rtmp, 0x7); 5720 kmovwl(mask, rtmp); 5721 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 5722 break; 5723 case 4: 5724 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5725 break; 5726 case 5: 5727 if (use64byteVector) { 5728 movl(rtmp, 0x1F); 5729 kmovwl(mask, rtmp); 5730 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5731 } else { 5732 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5733 movq(Address(base, disp + 32), xtmp); 5734 } 5735 break; 5736 case 6: 5737 if (use64byteVector) { 5738 movl(rtmp, 0x3F); 5739 kmovwl(mask, rtmp); 5740 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5741 } else { 5742 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5743 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 5744 } 5745 break; 5746 case 7: 5747 if (use64byteVector) { 5748 movl(rtmp, 0x7F); 5749 kmovwl(mask, rtmp); 5750 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5751 } else { 5752 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5753 movl(rtmp, 0x7); 5754 kmovwl(mask, rtmp); 5755 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 5756 } 5757 break; 5758 default: 5759 fatal("Unexpected length : %d\n",cnt); 5760 break; 5761 } 5762 } 5763 } 5764 5765 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 5766 bool is_large, KRegister mask) { 5767 // cnt - number of qwords (8-byte words). 5768 // base - start address, qword aligned. 5769 // is_large - if optimizers know cnt is larger than InitArrayShortSize 5770 assert(base==rdi, "base register must be edi for rep stos"); 5771 assert(tmp==rax, "tmp register must be eax for rep stos"); 5772 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 5773 assert(InitArrayShortSize % BytesPerLong == 0, 5774 "InitArrayShortSize should be the multiple of BytesPerLong"); 5775 5776 Label DONE; 5777 if (!is_large || !UseXMMForObjInit) { 5778 xorptr(tmp, tmp); 5779 } 5780 5781 if (!is_large) { 5782 Label LOOP, LONG; 5783 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 5784 jccb(Assembler::greater, LONG); 5785 5786 decrement(cnt); 5787 jccb(Assembler::negative, DONE); // Zero length 5788 5789 // Use individual pointer-sized stores for small counts: 5790 BIND(LOOP); 5791 movptr(Address(base, cnt, Address::times_ptr), tmp); 5792 decrement(cnt); 5793 jccb(Assembler::greaterEqual, LOOP); 5794 jmpb(DONE); 5795 5796 BIND(LONG); 5797 } 5798 5799 // Use longer rep-prefixed ops for non-small counts: 5800 if (UseFastStosb) { 5801 shlptr(cnt, 3); // convert to number of bytes 5802 rep_stosb(); 5803 } else if (UseXMMForObjInit) { 5804 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 5805 } else { 5806 rep_stos(); 5807 } 5808 5809 BIND(DONE); 5810 } 5811 5812 #endif //COMPILER2_OR_JVMCI 5813 5814 5815 void MacroAssembler::generate_fill(BasicType t, bool aligned, 5816 Register to, Register value, Register count, 5817 Register rtmp, XMMRegister xtmp) { 5818 ShortBranchVerifier sbv(this); 5819 assert_different_registers(to, value, count, rtmp); 5820 Label L_exit; 5821 Label L_fill_2_bytes, L_fill_4_bytes; 5822 5823 #if defined(COMPILER2) 5824 if(MaxVectorSize >=32 && 5825 VM_Version::supports_avx512vlbw() && 5826 VM_Version::supports_bmi2()) { 5827 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 5828 return; 5829 } 5830 #endif 5831 5832 int shift = -1; 5833 switch (t) { 5834 case T_BYTE: 5835 shift = 2; 5836 break; 5837 case T_SHORT: 5838 shift = 1; 5839 break; 5840 case T_INT: 5841 shift = 0; 5842 break; 5843 default: ShouldNotReachHere(); 5844 } 5845 5846 if (t == T_BYTE) { 5847 andl(value, 0xff); 5848 movl(rtmp, value); 5849 shll(rtmp, 8); 5850 orl(value, rtmp); 5851 } 5852 if (t == T_SHORT) { 5853 andl(value, 0xffff); 5854 } 5855 if (t == T_BYTE || t == T_SHORT) { 5856 movl(rtmp, value); 5857 shll(rtmp, 16); 5858 orl(value, rtmp); 5859 } 5860 5861 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 5862 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 5863 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 5864 Label L_skip_align2; 5865 // align source address at 4 bytes address boundary 5866 if (t == T_BYTE) { 5867 Label L_skip_align1; 5868 // One byte misalignment happens only for byte arrays 5869 testptr(to, 1); 5870 jccb(Assembler::zero, L_skip_align1); 5871 movb(Address(to, 0), value); 5872 increment(to); 5873 decrement(count); 5874 BIND(L_skip_align1); 5875 } 5876 // Two bytes misalignment happens only for byte and short (char) arrays 5877 testptr(to, 2); 5878 jccb(Assembler::zero, L_skip_align2); 5879 movw(Address(to, 0), value); 5880 addptr(to, 2); 5881 subptr(count, 1<<(shift-1)); 5882 BIND(L_skip_align2); 5883 } 5884 { 5885 Label L_fill_32_bytes; 5886 if (!UseUnalignedLoadStores) { 5887 // align to 8 bytes, we know we are 4 byte aligned to start 5888 testptr(to, 4); 5889 jccb(Assembler::zero, L_fill_32_bytes); 5890 movl(Address(to, 0), value); 5891 addptr(to, 4); 5892 subptr(count, 1<<shift); 5893 } 5894 BIND(L_fill_32_bytes); 5895 { 5896 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 5897 movdl(xtmp, value); 5898 if (UseAVX >= 2 && UseUnalignedLoadStores) { 5899 Label L_check_fill_32_bytes; 5900 if (UseAVX > 2) { 5901 // Fill 64-byte chunks 5902 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 5903 5904 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 5905 cmpptr(count, VM_Version::avx3_threshold()); 5906 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 5907 5908 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 5909 5910 subptr(count, 16 << shift); 5911 jccb(Assembler::less, L_check_fill_32_bytes); 5912 align(16); 5913 5914 BIND(L_fill_64_bytes_loop_avx3); 5915 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 5916 addptr(to, 64); 5917 subptr(count, 16 << shift); 5918 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 5919 jmpb(L_check_fill_32_bytes); 5920 5921 BIND(L_check_fill_64_bytes_avx2); 5922 } 5923 // Fill 64-byte chunks 5924 Label L_fill_64_bytes_loop; 5925 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 5926 5927 subptr(count, 16 << shift); 5928 jcc(Assembler::less, L_check_fill_32_bytes); 5929 align(16); 5930 5931 BIND(L_fill_64_bytes_loop); 5932 vmovdqu(Address(to, 0), xtmp); 5933 vmovdqu(Address(to, 32), xtmp); 5934 addptr(to, 64); 5935 subptr(count, 16 << shift); 5936 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 5937 5938 BIND(L_check_fill_32_bytes); 5939 addptr(count, 8 << shift); 5940 jccb(Assembler::less, L_check_fill_8_bytes); 5941 vmovdqu(Address(to, 0), xtmp); 5942 addptr(to, 32); 5943 subptr(count, 8 << shift); 5944 5945 BIND(L_check_fill_8_bytes); 5946 // clean upper bits of YMM registers 5947 movdl(xtmp, value); 5948 pshufd(xtmp, xtmp, 0); 5949 } else { 5950 // Fill 32-byte chunks 5951 pshufd(xtmp, xtmp, 0); 5952 5953 subptr(count, 8 << shift); 5954 jcc(Assembler::less, L_check_fill_8_bytes); 5955 align(16); 5956 5957 BIND(L_fill_32_bytes_loop); 5958 5959 if (UseUnalignedLoadStores) { 5960 movdqu(Address(to, 0), xtmp); 5961 movdqu(Address(to, 16), xtmp); 5962 } else { 5963 movq(Address(to, 0), xtmp); 5964 movq(Address(to, 8), xtmp); 5965 movq(Address(to, 16), xtmp); 5966 movq(Address(to, 24), xtmp); 5967 } 5968 5969 addptr(to, 32); 5970 subptr(count, 8 << shift); 5971 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 5972 5973 BIND(L_check_fill_8_bytes); 5974 } 5975 addptr(count, 8 << shift); 5976 jccb(Assembler::zero, L_exit); 5977 jmpb(L_fill_8_bytes); 5978 5979 // 5980 // length is too short, just fill qwords 5981 // 5982 BIND(L_fill_8_bytes_loop); 5983 movq(Address(to, 0), xtmp); 5984 addptr(to, 8); 5985 BIND(L_fill_8_bytes); 5986 subptr(count, 1 << (shift + 1)); 5987 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 5988 } 5989 } 5990 // fill trailing 4 bytes 5991 BIND(L_fill_4_bytes); 5992 testl(count, 1<<shift); 5993 jccb(Assembler::zero, L_fill_2_bytes); 5994 movl(Address(to, 0), value); 5995 if (t == T_BYTE || t == T_SHORT) { 5996 Label L_fill_byte; 5997 addptr(to, 4); 5998 BIND(L_fill_2_bytes); 5999 // fill trailing 2 bytes 6000 testl(count, 1<<(shift-1)); 6001 jccb(Assembler::zero, L_fill_byte); 6002 movw(Address(to, 0), value); 6003 if (t == T_BYTE) { 6004 addptr(to, 2); 6005 BIND(L_fill_byte); 6006 // fill trailing byte 6007 testl(count, 1); 6008 jccb(Assembler::zero, L_exit); 6009 movb(Address(to, 0), value); 6010 } else { 6011 BIND(L_fill_byte); 6012 } 6013 } else { 6014 BIND(L_fill_2_bytes); 6015 } 6016 BIND(L_exit); 6017 } 6018 6019 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6020 switch(type) { 6021 case T_BYTE: 6022 case T_BOOLEAN: 6023 evpbroadcastb(dst, src, vector_len); 6024 break; 6025 case T_SHORT: 6026 case T_CHAR: 6027 evpbroadcastw(dst, src, vector_len); 6028 break; 6029 case T_INT: 6030 case T_FLOAT: 6031 evpbroadcastd(dst, src, vector_len); 6032 break; 6033 case T_LONG: 6034 case T_DOUBLE: 6035 evpbroadcastq(dst, src, vector_len); 6036 break; 6037 default: 6038 fatal("Unhandled type : %s", type2name(type)); 6039 break; 6040 } 6041 } 6042 6043 // encode char[] to byte[] in ISO_8859_1 or ASCII 6044 //@IntrinsicCandidate 6045 //private static int implEncodeISOArray(byte[] sa, int sp, 6046 //byte[] da, int dp, int len) { 6047 // int i = 0; 6048 // for (; i < len; i++) { 6049 // char c = StringUTF16.getChar(sa, sp++); 6050 // if (c > '\u00FF') 6051 // break; 6052 // da[dp++] = (byte)c; 6053 // } 6054 // return i; 6055 //} 6056 // 6057 //@IntrinsicCandidate 6058 //private static int implEncodeAsciiArray(char[] sa, int sp, 6059 // byte[] da, int dp, int len) { 6060 // int i = 0; 6061 // for (; i < len; i++) { 6062 // char c = sa[sp++]; 6063 // if (c >= '\u0080') 6064 // break; 6065 // da[dp++] = (byte)c; 6066 // } 6067 // return i; 6068 //} 6069 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6070 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6071 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6072 Register tmp5, Register result, bool ascii) { 6073 6074 // rsi: src 6075 // rdi: dst 6076 // rdx: len 6077 // rcx: tmp5 6078 // rax: result 6079 ShortBranchVerifier sbv(this); 6080 assert_different_registers(src, dst, len, tmp5, result); 6081 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6082 6083 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6084 int short_mask = ascii ? 0xff80 : 0xff00; 6085 6086 // set result 6087 xorl(result, result); 6088 // check for zero length 6089 testl(len, len); 6090 jcc(Assembler::zero, L_done); 6091 6092 movl(result, len); 6093 6094 // Setup pointers 6095 lea(src, Address(src, len, Address::times_2)); // char[] 6096 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6097 negptr(len); 6098 6099 if (UseSSE42Intrinsics || UseAVX >= 2) { 6100 Label L_copy_8_chars, L_copy_8_chars_exit; 6101 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6102 6103 if (UseAVX >= 2) { 6104 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6105 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6106 movdl(tmp1Reg, tmp5); 6107 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6108 jmp(L_chars_32_check); 6109 6110 bind(L_copy_32_chars); 6111 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6112 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6113 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6114 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6115 jccb(Assembler::notZero, L_copy_32_chars_exit); 6116 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6117 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6118 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6119 6120 bind(L_chars_32_check); 6121 addptr(len, 32); 6122 jcc(Assembler::lessEqual, L_copy_32_chars); 6123 6124 bind(L_copy_32_chars_exit); 6125 subptr(len, 16); 6126 jccb(Assembler::greater, L_copy_16_chars_exit); 6127 6128 } else if (UseSSE42Intrinsics) { 6129 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6130 movdl(tmp1Reg, tmp5); 6131 pshufd(tmp1Reg, tmp1Reg, 0); 6132 jmpb(L_chars_16_check); 6133 } 6134 6135 bind(L_copy_16_chars); 6136 if (UseAVX >= 2) { 6137 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6138 vptest(tmp2Reg, tmp1Reg); 6139 jcc(Assembler::notZero, L_copy_16_chars_exit); 6140 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6141 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6142 } else { 6143 if (UseAVX > 0) { 6144 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6145 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6146 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6147 } else { 6148 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6149 por(tmp2Reg, tmp3Reg); 6150 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6151 por(tmp2Reg, tmp4Reg); 6152 } 6153 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6154 jccb(Assembler::notZero, L_copy_16_chars_exit); 6155 packuswb(tmp3Reg, tmp4Reg); 6156 } 6157 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6158 6159 bind(L_chars_16_check); 6160 addptr(len, 16); 6161 jcc(Assembler::lessEqual, L_copy_16_chars); 6162 6163 bind(L_copy_16_chars_exit); 6164 if (UseAVX >= 2) { 6165 // clean upper bits of YMM registers 6166 vpxor(tmp2Reg, tmp2Reg); 6167 vpxor(tmp3Reg, tmp3Reg); 6168 vpxor(tmp4Reg, tmp4Reg); 6169 movdl(tmp1Reg, tmp5); 6170 pshufd(tmp1Reg, tmp1Reg, 0); 6171 } 6172 subptr(len, 8); 6173 jccb(Assembler::greater, L_copy_8_chars_exit); 6174 6175 bind(L_copy_8_chars); 6176 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6177 ptest(tmp3Reg, tmp1Reg); 6178 jccb(Assembler::notZero, L_copy_8_chars_exit); 6179 packuswb(tmp3Reg, tmp1Reg); 6180 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6181 addptr(len, 8); 6182 jccb(Assembler::lessEqual, L_copy_8_chars); 6183 6184 bind(L_copy_8_chars_exit); 6185 subptr(len, 8); 6186 jccb(Assembler::zero, L_done); 6187 } 6188 6189 bind(L_copy_1_char); 6190 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6191 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6192 jccb(Assembler::notZero, L_copy_1_char_exit); 6193 movb(Address(dst, len, Address::times_1, 0), tmp5); 6194 addptr(len, 1); 6195 jccb(Assembler::less, L_copy_1_char); 6196 6197 bind(L_copy_1_char_exit); 6198 addptr(result, len); // len is negative count of not processed elements 6199 6200 bind(L_done); 6201 } 6202 6203 /** 6204 * Helper for multiply_to_len(). 6205 */ 6206 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6207 addq(dest_lo, src1); 6208 adcq(dest_hi, 0); 6209 addq(dest_lo, src2); 6210 adcq(dest_hi, 0); 6211 } 6212 6213 /** 6214 * Multiply 64 bit by 64 bit first loop. 6215 */ 6216 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6217 Register y, Register y_idx, Register z, 6218 Register carry, Register product, 6219 Register idx, Register kdx) { 6220 // 6221 // jlong carry, x[], y[], z[]; 6222 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6223 // huge_128 product = y[idx] * x[xstart] + carry; 6224 // z[kdx] = (jlong)product; 6225 // carry = (jlong)(product >>> 64); 6226 // } 6227 // z[xstart] = carry; 6228 // 6229 6230 Label L_first_loop, L_first_loop_exit; 6231 Label L_one_x, L_one_y, L_multiply; 6232 6233 decrementl(xstart); 6234 jcc(Assembler::negative, L_one_x); 6235 6236 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6237 rorq(x_xstart, 32); // convert big-endian to little-endian 6238 6239 bind(L_first_loop); 6240 decrementl(idx); 6241 jcc(Assembler::negative, L_first_loop_exit); 6242 decrementl(idx); 6243 jcc(Assembler::negative, L_one_y); 6244 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6245 rorq(y_idx, 32); // convert big-endian to little-endian 6246 bind(L_multiply); 6247 movq(product, x_xstart); 6248 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6249 addq(product, carry); 6250 adcq(rdx, 0); 6251 subl(kdx, 2); 6252 movl(Address(z, kdx, Address::times_4, 4), product); 6253 shrq(product, 32); 6254 movl(Address(z, kdx, Address::times_4, 0), product); 6255 movq(carry, rdx); 6256 jmp(L_first_loop); 6257 6258 bind(L_one_y); 6259 movl(y_idx, Address(y, 0)); 6260 jmp(L_multiply); 6261 6262 bind(L_one_x); 6263 movl(x_xstart, Address(x, 0)); 6264 jmp(L_first_loop); 6265 6266 bind(L_first_loop_exit); 6267 } 6268 6269 /** 6270 * Multiply 64 bit by 64 bit and add 128 bit. 6271 */ 6272 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6273 Register yz_idx, Register idx, 6274 Register carry, Register product, int offset) { 6275 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6276 // z[kdx] = (jlong)product; 6277 6278 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6279 rorq(yz_idx, 32); // convert big-endian to little-endian 6280 movq(product, x_xstart); 6281 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6282 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6283 rorq(yz_idx, 32); // convert big-endian to little-endian 6284 6285 add2_with_carry(rdx, product, carry, yz_idx); 6286 6287 movl(Address(z, idx, Address::times_4, offset+4), product); 6288 shrq(product, 32); 6289 movl(Address(z, idx, Address::times_4, offset), product); 6290 6291 } 6292 6293 /** 6294 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6295 */ 6296 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6297 Register yz_idx, Register idx, Register jdx, 6298 Register carry, Register product, 6299 Register carry2) { 6300 // jlong carry, x[], y[], z[]; 6301 // int kdx = ystart+1; 6302 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6303 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6304 // z[kdx+idx+1] = (jlong)product; 6305 // jlong carry2 = (jlong)(product >>> 64); 6306 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6307 // z[kdx+idx] = (jlong)product; 6308 // carry = (jlong)(product >>> 64); 6309 // } 6310 // idx += 2; 6311 // if (idx > 0) { 6312 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6313 // z[kdx+idx] = (jlong)product; 6314 // carry = (jlong)(product >>> 64); 6315 // } 6316 // 6317 6318 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6319 6320 movl(jdx, idx); 6321 andl(jdx, 0xFFFFFFFC); 6322 shrl(jdx, 2); 6323 6324 bind(L_third_loop); 6325 subl(jdx, 1); 6326 jcc(Assembler::negative, L_third_loop_exit); 6327 subl(idx, 4); 6328 6329 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6330 movq(carry2, rdx); 6331 6332 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6333 movq(carry, rdx); 6334 jmp(L_third_loop); 6335 6336 bind (L_third_loop_exit); 6337 6338 andl (idx, 0x3); 6339 jcc(Assembler::zero, L_post_third_loop_done); 6340 6341 Label L_check_1; 6342 subl(idx, 2); 6343 jcc(Assembler::negative, L_check_1); 6344 6345 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6346 movq(carry, rdx); 6347 6348 bind (L_check_1); 6349 addl (idx, 0x2); 6350 andl (idx, 0x1); 6351 subl(idx, 1); 6352 jcc(Assembler::negative, L_post_third_loop_done); 6353 6354 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6355 movq(product, x_xstart); 6356 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6357 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6358 6359 add2_with_carry(rdx, product, yz_idx, carry); 6360 6361 movl(Address(z, idx, Address::times_4, 0), product); 6362 shrq(product, 32); 6363 6364 shlq(rdx, 32); 6365 orq(product, rdx); 6366 movq(carry, product); 6367 6368 bind(L_post_third_loop_done); 6369 } 6370 6371 /** 6372 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6373 * 6374 */ 6375 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6376 Register carry, Register carry2, 6377 Register idx, Register jdx, 6378 Register yz_idx1, Register yz_idx2, 6379 Register tmp, Register tmp3, Register tmp4) { 6380 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6381 6382 // jlong carry, x[], y[], z[]; 6383 // int kdx = ystart+1; 6384 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6385 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6386 // jlong carry2 = (jlong)(tmp3 >>> 64); 6387 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6388 // carry = (jlong)(tmp4 >>> 64); 6389 // z[kdx+idx+1] = (jlong)tmp3; 6390 // z[kdx+idx] = (jlong)tmp4; 6391 // } 6392 // idx += 2; 6393 // if (idx > 0) { 6394 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6395 // z[kdx+idx] = (jlong)yz_idx1; 6396 // carry = (jlong)(yz_idx1 >>> 64); 6397 // } 6398 // 6399 6400 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6401 6402 movl(jdx, idx); 6403 andl(jdx, 0xFFFFFFFC); 6404 shrl(jdx, 2); 6405 6406 bind(L_third_loop); 6407 subl(jdx, 1); 6408 jcc(Assembler::negative, L_third_loop_exit); 6409 subl(idx, 4); 6410 6411 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6412 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6413 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6414 rorxq(yz_idx2, yz_idx2, 32); 6415 6416 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6417 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6418 6419 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6420 rorxq(yz_idx1, yz_idx1, 32); 6421 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6422 rorxq(yz_idx2, yz_idx2, 32); 6423 6424 if (VM_Version::supports_adx()) { 6425 adcxq(tmp3, carry); 6426 adoxq(tmp3, yz_idx1); 6427 6428 adcxq(tmp4, tmp); 6429 adoxq(tmp4, yz_idx2); 6430 6431 movl(carry, 0); // does not affect flags 6432 adcxq(carry2, carry); 6433 adoxq(carry2, carry); 6434 } else { 6435 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6436 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6437 } 6438 movq(carry, carry2); 6439 6440 movl(Address(z, idx, Address::times_4, 12), tmp3); 6441 shrq(tmp3, 32); 6442 movl(Address(z, idx, Address::times_4, 8), tmp3); 6443 6444 movl(Address(z, idx, Address::times_4, 4), tmp4); 6445 shrq(tmp4, 32); 6446 movl(Address(z, idx, Address::times_4, 0), tmp4); 6447 6448 jmp(L_third_loop); 6449 6450 bind (L_third_loop_exit); 6451 6452 andl (idx, 0x3); 6453 jcc(Assembler::zero, L_post_third_loop_done); 6454 6455 Label L_check_1; 6456 subl(idx, 2); 6457 jcc(Assembler::negative, L_check_1); 6458 6459 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6460 rorxq(yz_idx1, yz_idx1, 32); 6461 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6462 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6463 rorxq(yz_idx2, yz_idx2, 32); 6464 6465 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6466 6467 movl(Address(z, idx, Address::times_4, 4), tmp3); 6468 shrq(tmp3, 32); 6469 movl(Address(z, idx, Address::times_4, 0), tmp3); 6470 movq(carry, tmp4); 6471 6472 bind (L_check_1); 6473 addl (idx, 0x2); 6474 andl (idx, 0x1); 6475 subl(idx, 1); 6476 jcc(Assembler::negative, L_post_third_loop_done); 6477 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6478 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6479 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6480 6481 add2_with_carry(carry2, tmp3, tmp4, carry); 6482 6483 movl(Address(z, idx, Address::times_4, 0), tmp3); 6484 shrq(tmp3, 32); 6485 6486 shlq(carry2, 32); 6487 orq(tmp3, carry2); 6488 movq(carry, tmp3); 6489 6490 bind(L_post_third_loop_done); 6491 } 6492 6493 /** 6494 * Code for BigInteger::multiplyToLen() intrinsic. 6495 * 6496 * rdi: x 6497 * rax: xlen 6498 * rsi: y 6499 * rcx: ylen 6500 * r8: z 6501 * r11: tmp0 6502 * r12: tmp1 6503 * r13: tmp2 6504 * r14: tmp3 6505 * r15: tmp4 6506 * rbx: tmp5 6507 * 6508 */ 6509 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 6510 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 6511 ShortBranchVerifier sbv(this); 6512 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 6513 6514 push(tmp0); 6515 push(tmp1); 6516 push(tmp2); 6517 push(tmp3); 6518 push(tmp4); 6519 push(tmp5); 6520 6521 push(xlen); 6522 6523 const Register idx = tmp1; 6524 const Register kdx = tmp2; 6525 const Register xstart = tmp3; 6526 6527 const Register y_idx = tmp4; 6528 const Register carry = tmp5; 6529 const Register product = xlen; 6530 const Register x_xstart = tmp0; 6531 6532 // First Loop. 6533 // 6534 // final static long LONG_MASK = 0xffffffffL; 6535 // int xstart = xlen - 1; 6536 // int ystart = ylen - 1; 6537 // long carry = 0; 6538 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6539 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6540 // z[kdx] = (int)product; 6541 // carry = product >>> 32; 6542 // } 6543 // z[xstart] = (int)carry; 6544 // 6545 6546 movl(idx, ylen); // idx = ylen; 6547 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 6548 xorq(carry, carry); // carry = 0; 6549 6550 Label L_done; 6551 6552 movl(xstart, xlen); 6553 decrementl(xstart); 6554 jcc(Assembler::negative, L_done); 6555 6556 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6557 6558 Label L_second_loop; 6559 testl(kdx, kdx); 6560 jcc(Assembler::zero, L_second_loop); 6561 6562 Label L_carry; 6563 subl(kdx, 1); 6564 jcc(Assembler::zero, L_carry); 6565 6566 movl(Address(z, kdx, Address::times_4, 0), carry); 6567 shrq(carry, 32); 6568 subl(kdx, 1); 6569 6570 bind(L_carry); 6571 movl(Address(z, kdx, Address::times_4, 0), carry); 6572 6573 // Second and third (nested) loops. 6574 // 6575 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6576 // carry = 0; 6577 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6578 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6579 // (z[k] & LONG_MASK) + carry; 6580 // z[k] = (int)product; 6581 // carry = product >>> 32; 6582 // } 6583 // z[i] = (int)carry; 6584 // } 6585 // 6586 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6587 6588 const Register jdx = tmp1; 6589 6590 bind(L_second_loop); 6591 xorl(carry, carry); // carry = 0; 6592 movl(jdx, ylen); // j = ystart+1 6593 6594 subl(xstart, 1); // i = xstart-1; 6595 jcc(Assembler::negative, L_done); 6596 6597 push (z); 6598 6599 Label L_last_x; 6600 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 6601 subl(xstart, 1); // i = xstart-1; 6602 jcc(Assembler::negative, L_last_x); 6603 6604 if (UseBMI2Instructions) { 6605 movq(rdx, Address(x, xstart, Address::times_4, 0)); 6606 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 6607 } else { 6608 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6609 rorq(x_xstart, 32); // convert big-endian to little-endian 6610 } 6611 6612 Label L_third_loop_prologue; 6613 bind(L_third_loop_prologue); 6614 6615 push (x); 6616 push (xstart); 6617 push (ylen); 6618 6619 6620 if (UseBMI2Instructions) { 6621 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 6622 } else { // !UseBMI2Instructions 6623 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6624 } 6625 6626 pop(ylen); 6627 pop(xlen); 6628 pop(x); 6629 pop(z); 6630 6631 movl(tmp3, xlen); 6632 addl(tmp3, 1); 6633 movl(Address(z, tmp3, Address::times_4, 0), carry); 6634 subl(tmp3, 1); 6635 jccb(Assembler::negative, L_done); 6636 6637 shrq(carry, 32); 6638 movl(Address(z, tmp3, Address::times_4, 0), carry); 6639 jmp(L_second_loop); 6640 6641 // Next infrequent code is moved outside loops. 6642 bind(L_last_x); 6643 if (UseBMI2Instructions) { 6644 movl(rdx, Address(x, 0)); 6645 } else { 6646 movl(x_xstart, Address(x, 0)); 6647 } 6648 jmp(L_third_loop_prologue); 6649 6650 bind(L_done); 6651 6652 pop(xlen); 6653 6654 pop(tmp5); 6655 pop(tmp4); 6656 pop(tmp3); 6657 pop(tmp2); 6658 pop(tmp1); 6659 pop(tmp0); 6660 } 6661 6662 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 6663 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 6664 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 6665 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 6666 Label VECTOR8_TAIL, VECTOR4_TAIL; 6667 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 6668 Label SAME_TILL_END, DONE; 6669 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 6670 6671 //scale is in rcx in both Win64 and Unix 6672 ShortBranchVerifier sbv(this); 6673 6674 shlq(length); 6675 xorq(result, result); 6676 6677 if ((AVX3Threshold == 0) && (UseAVX > 2) && 6678 VM_Version::supports_avx512vlbw()) { 6679 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 6680 6681 cmpq(length, 64); 6682 jcc(Assembler::less, VECTOR32_TAIL); 6683 6684 movq(tmp1, length); 6685 andq(tmp1, 0x3F); // tail count 6686 andq(length, ~(0x3F)); //vector count 6687 6688 bind(VECTOR64_LOOP); 6689 // AVX512 code to compare 64 byte vectors. 6690 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 6691 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 6692 kortestql(k7, k7); 6693 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 6694 addq(result, 64); 6695 subq(length, 64); 6696 jccb(Assembler::notZero, VECTOR64_LOOP); 6697 6698 //bind(VECTOR64_TAIL); 6699 testq(tmp1, tmp1); 6700 jcc(Assembler::zero, SAME_TILL_END); 6701 6702 //bind(VECTOR64_TAIL); 6703 // AVX512 code to compare up to 63 byte vectors. 6704 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 6705 shlxq(tmp2, tmp2, tmp1); 6706 notq(tmp2); 6707 kmovql(k3, tmp2); 6708 6709 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 6710 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 6711 6712 ktestql(k7, k3); 6713 jcc(Assembler::below, SAME_TILL_END); // not mismatch 6714 6715 bind(VECTOR64_NOT_EQUAL); 6716 kmovql(tmp1, k7); 6717 notq(tmp1); 6718 tzcntq(tmp1, tmp1); 6719 addq(result, tmp1); 6720 shrq(result); 6721 jmp(DONE); 6722 bind(VECTOR32_TAIL); 6723 } 6724 6725 cmpq(length, 8); 6726 jcc(Assembler::equal, VECTOR8_LOOP); 6727 jcc(Assembler::less, VECTOR4_TAIL); 6728 6729 if (UseAVX >= 2) { 6730 Label VECTOR16_TAIL, VECTOR32_LOOP; 6731 6732 cmpq(length, 16); 6733 jcc(Assembler::equal, VECTOR16_LOOP); 6734 jcc(Assembler::less, VECTOR8_LOOP); 6735 6736 cmpq(length, 32); 6737 jccb(Assembler::less, VECTOR16_TAIL); 6738 6739 subq(length, 32); 6740 bind(VECTOR32_LOOP); 6741 vmovdqu(rymm0, Address(obja, result)); 6742 vmovdqu(rymm1, Address(objb, result)); 6743 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 6744 vptest(rymm2, rymm2); 6745 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 6746 addq(result, 32); 6747 subq(length, 32); 6748 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 6749 addq(length, 32); 6750 jcc(Assembler::equal, SAME_TILL_END); 6751 //falling through if less than 32 bytes left //close the branch here. 6752 6753 bind(VECTOR16_TAIL); 6754 cmpq(length, 16); 6755 jccb(Assembler::less, VECTOR8_TAIL); 6756 bind(VECTOR16_LOOP); 6757 movdqu(rymm0, Address(obja, result)); 6758 movdqu(rymm1, Address(objb, result)); 6759 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 6760 ptest(rymm2, rymm2); 6761 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6762 addq(result, 16); 6763 subq(length, 16); 6764 jcc(Assembler::equal, SAME_TILL_END); 6765 //falling through if less than 16 bytes left 6766 } else {//regular intrinsics 6767 6768 cmpq(length, 16); 6769 jccb(Assembler::less, VECTOR8_TAIL); 6770 6771 subq(length, 16); 6772 bind(VECTOR16_LOOP); 6773 movdqu(rymm0, Address(obja, result)); 6774 movdqu(rymm1, Address(objb, result)); 6775 pxor(rymm0, rymm1); 6776 ptest(rymm0, rymm0); 6777 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6778 addq(result, 16); 6779 subq(length, 16); 6780 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 6781 addq(length, 16); 6782 jcc(Assembler::equal, SAME_TILL_END); 6783 //falling through if less than 16 bytes left 6784 } 6785 6786 bind(VECTOR8_TAIL); 6787 cmpq(length, 8); 6788 jccb(Assembler::less, VECTOR4_TAIL); 6789 bind(VECTOR8_LOOP); 6790 movq(tmp1, Address(obja, result)); 6791 movq(tmp2, Address(objb, result)); 6792 xorq(tmp1, tmp2); 6793 testq(tmp1, tmp1); 6794 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 6795 addq(result, 8); 6796 subq(length, 8); 6797 jcc(Assembler::equal, SAME_TILL_END); 6798 //falling through if less than 8 bytes left 6799 6800 bind(VECTOR4_TAIL); 6801 cmpq(length, 4); 6802 jccb(Assembler::less, BYTES_TAIL); 6803 bind(VECTOR4_LOOP); 6804 movl(tmp1, Address(obja, result)); 6805 xorl(tmp1, Address(objb, result)); 6806 testl(tmp1, tmp1); 6807 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 6808 addq(result, 4); 6809 subq(length, 4); 6810 jcc(Assembler::equal, SAME_TILL_END); 6811 //falling through if less than 4 bytes left 6812 6813 bind(BYTES_TAIL); 6814 bind(BYTES_LOOP); 6815 load_unsigned_byte(tmp1, Address(obja, result)); 6816 load_unsigned_byte(tmp2, Address(objb, result)); 6817 xorl(tmp1, tmp2); 6818 testl(tmp1, tmp1); 6819 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6820 decq(length); 6821 jcc(Assembler::zero, SAME_TILL_END); 6822 incq(result); 6823 load_unsigned_byte(tmp1, Address(obja, result)); 6824 load_unsigned_byte(tmp2, Address(objb, result)); 6825 xorl(tmp1, tmp2); 6826 testl(tmp1, tmp1); 6827 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6828 decq(length); 6829 jcc(Assembler::zero, SAME_TILL_END); 6830 incq(result); 6831 load_unsigned_byte(tmp1, Address(obja, result)); 6832 load_unsigned_byte(tmp2, Address(objb, result)); 6833 xorl(tmp1, tmp2); 6834 testl(tmp1, tmp1); 6835 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6836 jmp(SAME_TILL_END); 6837 6838 if (UseAVX >= 2) { 6839 bind(VECTOR32_NOT_EQUAL); 6840 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 6841 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 6842 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 6843 vpmovmskb(tmp1, rymm0); 6844 bsfq(tmp1, tmp1); 6845 addq(result, tmp1); 6846 shrq(result); 6847 jmp(DONE); 6848 } 6849 6850 bind(VECTOR16_NOT_EQUAL); 6851 if (UseAVX >= 2) { 6852 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 6853 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 6854 pxor(rymm0, rymm2); 6855 } else { 6856 pcmpeqb(rymm2, rymm2); 6857 pxor(rymm0, rymm1); 6858 pcmpeqb(rymm0, rymm1); 6859 pxor(rymm0, rymm2); 6860 } 6861 pmovmskb(tmp1, rymm0); 6862 bsfq(tmp1, tmp1); 6863 addq(result, tmp1); 6864 shrq(result); 6865 jmpb(DONE); 6866 6867 bind(VECTOR8_NOT_EQUAL); 6868 bind(VECTOR4_NOT_EQUAL); 6869 bsfq(tmp1, tmp1); 6870 shrq(tmp1, 3); 6871 addq(result, tmp1); 6872 bind(BYTES_NOT_EQUAL); 6873 shrq(result); 6874 jmpb(DONE); 6875 6876 bind(SAME_TILL_END); 6877 mov64(result, -1); 6878 6879 bind(DONE); 6880 } 6881 6882 //Helper functions for square_to_len() 6883 6884 /** 6885 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 6886 * Preserves x and z and modifies rest of the registers. 6887 */ 6888 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 6889 // Perform square and right shift by 1 6890 // Handle odd xlen case first, then for even xlen do the following 6891 // jlong carry = 0; 6892 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 6893 // huge_128 product = x[j:j+1] * x[j:j+1]; 6894 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 6895 // z[i+2:i+3] = (jlong)(product >>> 1); 6896 // carry = (jlong)product; 6897 // } 6898 6899 xorq(tmp5, tmp5); // carry 6900 xorq(rdxReg, rdxReg); 6901 xorl(tmp1, tmp1); // index for x 6902 xorl(tmp4, tmp4); // index for z 6903 6904 Label L_first_loop, L_first_loop_exit; 6905 6906 testl(xlen, 1); 6907 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 6908 6909 // Square and right shift by 1 the odd element using 32 bit multiply 6910 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 6911 imulq(raxReg, raxReg); 6912 shrq(raxReg, 1); 6913 adcq(tmp5, 0); 6914 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 6915 incrementl(tmp1); 6916 addl(tmp4, 2); 6917 6918 // Square and right shift by 1 the rest using 64 bit multiply 6919 bind(L_first_loop); 6920 cmpptr(tmp1, xlen); 6921 jccb(Assembler::equal, L_first_loop_exit); 6922 6923 // Square 6924 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 6925 rorq(raxReg, 32); // convert big-endian to little-endian 6926 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 6927 6928 // Right shift by 1 and save carry 6929 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 6930 rcrq(rdxReg, 1); 6931 rcrq(raxReg, 1); 6932 adcq(tmp5, 0); 6933 6934 // Store result in z 6935 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 6936 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 6937 6938 // Update indices for x and z 6939 addl(tmp1, 2); 6940 addl(tmp4, 4); 6941 jmp(L_first_loop); 6942 6943 bind(L_first_loop_exit); 6944 } 6945 6946 6947 /** 6948 * Perform the following multiply add operation using BMI2 instructions 6949 * carry:sum = sum + op1*op2 + carry 6950 * op2 should be in rdx 6951 * op2 is preserved, all other registers are modified 6952 */ 6953 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 6954 // assert op2 is rdx 6955 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 6956 addq(sum, carry); 6957 adcq(tmp2, 0); 6958 addq(sum, op1); 6959 adcq(tmp2, 0); 6960 movq(carry, tmp2); 6961 } 6962 6963 /** 6964 * Perform the following multiply add operation: 6965 * carry:sum = sum + op1*op2 + carry 6966 * Preserves op1, op2 and modifies rest of registers 6967 */ 6968 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 6969 // rdx:rax = op1 * op2 6970 movq(raxReg, op2); 6971 mulq(op1); 6972 6973 // rdx:rax = sum + carry + rdx:rax 6974 addq(sum, carry); 6975 adcq(rdxReg, 0); 6976 addq(sum, raxReg); 6977 adcq(rdxReg, 0); 6978 6979 // carry:sum = rdx:sum 6980 movq(carry, rdxReg); 6981 } 6982 6983 /** 6984 * Add 64 bit long carry into z[] with carry propagation. 6985 * Preserves z and carry register values and modifies rest of registers. 6986 * 6987 */ 6988 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 6989 Label L_fourth_loop, L_fourth_loop_exit; 6990 6991 movl(tmp1, 1); 6992 subl(zlen, 2); 6993 addq(Address(z, zlen, Address::times_4, 0), carry); 6994 6995 bind(L_fourth_loop); 6996 jccb(Assembler::carryClear, L_fourth_loop_exit); 6997 subl(zlen, 2); 6998 jccb(Assembler::negative, L_fourth_loop_exit); 6999 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7000 jmp(L_fourth_loop); 7001 bind(L_fourth_loop_exit); 7002 } 7003 7004 /** 7005 * Shift z[] left by 1 bit. 7006 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7007 * 7008 */ 7009 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7010 7011 Label L_fifth_loop, L_fifth_loop_exit; 7012 7013 // Fifth loop 7014 // Perform primitiveLeftShift(z, zlen, 1) 7015 7016 const Register prev_carry = tmp1; 7017 const Register new_carry = tmp4; 7018 const Register value = tmp2; 7019 const Register zidx = tmp3; 7020 7021 // int zidx, carry; 7022 // long value; 7023 // carry = 0; 7024 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7025 // (carry:value) = (z[i] << 1) | carry ; 7026 // z[i] = value; 7027 // } 7028 7029 movl(zidx, zlen); 7030 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7031 7032 bind(L_fifth_loop); 7033 decl(zidx); // Use decl to preserve carry flag 7034 decl(zidx); 7035 jccb(Assembler::negative, L_fifth_loop_exit); 7036 7037 if (UseBMI2Instructions) { 7038 movq(value, Address(z, zidx, Address::times_4, 0)); 7039 rclq(value, 1); 7040 rorxq(value, value, 32); 7041 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7042 } 7043 else { 7044 // clear new_carry 7045 xorl(new_carry, new_carry); 7046 7047 // Shift z[i] by 1, or in previous carry and save new carry 7048 movq(value, Address(z, zidx, Address::times_4, 0)); 7049 shlq(value, 1); 7050 adcl(new_carry, 0); 7051 7052 orq(value, prev_carry); 7053 rorq(value, 0x20); 7054 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7055 7056 // Set previous carry = new carry 7057 movl(prev_carry, new_carry); 7058 } 7059 jmp(L_fifth_loop); 7060 7061 bind(L_fifth_loop_exit); 7062 } 7063 7064 7065 /** 7066 * Code for BigInteger::squareToLen() intrinsic 7067 * 7068 * rdi: x 7069 * rsi: len 7070 * r8: z 7071 * rcx: zlen 7072 * r12: tmp1 7073 * r13: tmp2 7074 * r14: tmp3 7075 * r15: tmp4 7076 * rbx: tmp5 7077 * 7078 */ 7079 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7080 7081 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7082 push(tmp1); 7083 push(tmp2); 7084 push(tmp3); 7085 push(tmp4); 7086 push(tmp5); 7087 7088 // First loop 7089 // Store the squares, right shifted one bit (i.e., divided by 2). 7090 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7091 7092 // Add in off-diagonal sums. 7093 // 7094 // Second, third (nested) and fourth loops. 7095 // zlen +=2; 7096 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7097 // carry = 0; 7098 // long op2 = x[xidx:xidx+1]; 7099 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7100 // k -= 2; 7101 // long op1 = x[j:j+1]; 7102 // long sum = z[k:k+1]; 7103 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7104 // z[k:k+1] = sum; 7105 // } 7106 // add_one_64(z, k, carry, tmp_regs); 7107 // } 7108 7109 const Register carry = tmp5; 7110 const Register sum = tmp3; 7111 const Register op1 = tmp4; 7112 Register op2 = tmp2; 7113 7114 push(zlen); 7115 push(len); 7116 addl(zlen,2); 7117 bind(L_second_loop); 7118 xorq(carry, carry); 7119 subl(zlen, 4); 7120 subl(len, 2); 7121 push(zlen); 7122 push(len); 7123 cmpl(len, 0); 7124 jccb(Assembler::lessEqual, L_second_loop_exit); 7125 7126 // Multiply an array by one 64 bit long. 7127 if (UseBMI2Instructions) { 7128 op2 = rdxReg; 7129 movq(op2, Address(x, len, Address::times_4, 0)); 7130 rorxq(op2, op2, 32); 7131 } 7132 else { 7133 movq(op2, Address(x, len, Address::times_4, 0)); 7134 rorq(op2, 32); 7135 } 7136 7137 bind(L_third_loop); 7138 decrementl(len); 7139 jccb(Assembler::negative, L_third_loop_exit); 7140 decrementl(len); 7141 jccb(Assembler::negative, L_last_x); 7142 7143 movq(op1, Address(x, len, Address::times_4, 0)); 7144 rorq(op1, 32); 7145 7146 bind(L_multiply); 7147 subl(zlen, 2); 7148 movq(sum, Address(z, zlen, Address::times_4, 0)); 7149 7150 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7151 if (UseBMI2Instructions) { 7152 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7153 } 7154 else { 7155 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7156 } 7157 7158 movq(Address(z, zlen, Address::times_4, 0), sum); 7159 7160 jmp(L_third_loop); 7161 bind(L_third_loop_exit); 7162 7163 // Fourth loop 7164 // Add 64 bit long carry into z with carry propagation. 7165 // Uses offsetted zlen. 7166 add_one_64(z, zlen, carry, tmp1); 7167 7168 pop(len); 7169 pop(zlen); 7170 jmp(L_second_loop); 7171 7172 // Next infrequent code is moved outside loops. 7173 bind(L_last_x); 7174 movl(op1, Address(x, 0)); 7175 jmp(L_multiply); 7176 7177 bind(L_second_loop_exit); 7178 pop(len); 7179 pop(zlen); 7180 pop(len); 7181 pop(zlen); 7182 7183 // Fifth loop 7184 // Shift z left 1 bit. 7185 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7186 7187 // z[zlen-1] |= x[len-1] & 1; 7188 movl(tmp3, Address(x, len, Address::times_4, -4)); 7189 andl(tmp3, 1); 7190 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7191 7192 pop(tmp5); 7193 pop(tmp4); 7194 pop(tmp3); 7195 pop(tmp2); 7196 pop(tmp1); 7197 } 7198 7199 /** 7200 * Helper function for mul_add() 7201 * Multiply the in[] by int k and add to out[] starting at offset offs using 7202 * 128 bit by 32 bit multiply and return the carry in tmp5. 7203 * Only quad int aligned length of in[] is operated on in this function. 7204 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7205 * This function preserves out, in and k registers. 7206 * len and offset point to the appropriate index in "in" & "out" correspondingly 7207 * tmp5 has the carry. 7208 * other registers are temporary and are modified. 7209 * 7210 */ 7211 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7212 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7213 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7214 7215 Label L_first_loop, L_first_loop_exit; 7216 7217 movl(tmp1, len); 7218 shrl(tmp1, 2); 7219 7220 bind(L_first_loop); 7221 subl(tmp1, 1); 7222 jccb(Assembler::negative, L_first_loop_exit); 7223 7224 subl(len, 4); 7225 subl(offset, 4); 7226 7227 Register op2 = tmp2; 7228 const Register sum = tmp3; 7229 const Register op1 = tmp4; 7230 const Register carry = tmp5; 7231 7232 if (UseBMI2Instructions) { 7233 op2 = rdxReg; 7234 } 7235 7236 movq(op1, Address(in, len, Address::times_4, 8)); 7237 rorq(op1, 32); 7238 movq(sum, Address(out, offset, Address::times_4, 8)); 7239 rorq(sum, 32); 7240 if (UseBMI2Instructions) { 7241 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7242 } 7243 else { 7244 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7245 } 7246 // Store back in big endian from little endian 7247 rorq(sum, 0x20); 7248 movq(Address(out, offset, Address::times_4, 8), sum); 7249 7250 movq(op1, Address(in, len, Address::times_4, 0)); 7251 rorq(op1, 32); 7252 movq(sum, Address(out, offset, Address::times_4, 0)); 7253 rorq(sum, 32); 7254 if (UseBMI2Instructions) { 7255 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7256 } 7257 else { 7258 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7259 } 7260 // Store back in big endian from little endian 7261 rorq(sum, 0x20); 7262 movq(Address(out, offset, Address::times_4, 0), sum); 7263 7264 jmp(L_first_loop); 7265 bind(L_first_loop_exit); 7266 } 7267 7268 /** 7269 * Code for BigInteger::mulAdd() intrinsic 7270 * 7271 * rdi: out 7272 * rsi: in 7273 * r11: offs (out.length - offset) 7274 * rcx: len 7275 * r8: k 7276 * r12: tmp1 7277 * r13: tmp2 7278 * r14: tmp3 7279 * r15: tmp4 7280 * rbx: tmp5 7281 * Multiply the in[] by word k and add to out[], return the carry in rax 7282 */ 7283 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7284 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7285 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7286 7287 Label L_carry, L_last_in, L_done; 7288 7289 // carry = 0; 7290 // for (int j=len-1; j >= 0; j--) { 7291 // long product = (in[j] & LONG_MASK) * kLong + 7292 // (out[offs] & LONG_MASK) + carry; 7293 // out[offs--] = (int)product; 7294 // carry = product >>> 32; 7295 // } 7296 // 7297 push(tmp1); 7298 push(tmp2); 7299 push(tmp3); 7300 push(tmp4); 7301 push(tmp5); 7302 7303 Register op2 = tmp2; 7304 const Register sum = tmp3; 7305 const Register op1 = tmp4; 7306 const Register carry = tmp5; 7307 7308 if (UseBMI2Instructions) { 7309 op2 = rdxReg; 7310 movl(op2, k); 7311 } 7312 else { 7313 movl(op2, k); 7314 } 7315 7316 xorq(carry, carry); 7317 7318 //First loop 7319 7320 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7321 //The carry is in tmp5 7322 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7323 7324 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7325 decrementl(len); 7326 jccb(Assembler::negative, L_carry); 7327 decrementl(len); 7328 jccb(Assembler::negative, L_last_in); 7329 7330 movq(op1, Address(in, len, Address::times_4, 0)); 7331 rorq(op1, 32); 7332 7333 subl(offs, 2); 7334 movq(sum, Address(out, offs, Address::times_4, 0)); 7335 rorq(sum, 32); 7336 7337 if (UseBMI2Instructions) { 7338 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7339 } 7340 else { 7341 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7342 } 7343 7344 // Store back in big endian from little endian 7345 rorq(sum, 0x20); 7346 movq(Address(out, offs, Address::times_4, 0), sum); 7347 7348 testl(len, len); 7349 jccb(Assembler::zero, L_carry); 7350 7351 //Multiply the last in[] entry, if any 7352 bind(L_last_in); 7353 movl(op1, Address(in, 0)); 7354 movl(sum, Address(out, offs, Address::times_4, -4)); 7355 7356 movl(raxReg, k); 7357 mull(op1); //tmp4 * eax -> edx:eax 7358 addl(sum, carry); 7359 adcl(rdxReg, 0); 7360 addl(sum, raxReg); 7361 adcl(rdxReg, 0); 7362 movl(carry, rdxReg); 7363 7364 movl(Address(out, offs, Address::times_4, -4), sum); 7365 7366 bind(L_carry); 7367 //return tmp5/carry as carry in rax 7368 movl(rax, carry); 7369 7370 bind(L_done); 7371 pop(tmp5); 7372 pop(tmp4); 7373 pop(tmp3); 7374 pop(tmp2); 7375 pop(tmp1); 7376 } 7377 7378 /** 7379 * Emits code to update CRC-32 with a byte value according to constants in table 7380 * 7381 * @param [in,out]crc Register containing the crc. 7382 * @param [in]val Register containing the byte to fold into the CRC. 7383 * @param [in]table Register containing the table of crc constants. 7384 * 7385 * uint32_t crc; 7386 * val = crc_table[(val ^ crc) & 0xFF]; 7387 * crc = val ^ (crc >> 8); 7388 * 7389 */ 7390 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7391 xorl(val, crc); 7392 andl(val, 0xFF); 7393 shrl(crc, 8); // unsigned shift 7394 xorl(crc, Address(table, val, Address::times_4, 0)); 7395 } 7396 7397 /** 7398 * Fold 128-bit data chunk 7399 */ 7400 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7401 if (UseAVX > 0) { 7402 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7403 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7404 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7405 pxor(xcrc, xtmp); 7406 } else { 7407 movdqa(xtmp, xcrc); 7408 pclmulhdq(xtmp, xK); // [123:64] 7409 pclmulldq(xcrc, xK); // [63:0] 7410 pxor(xcrc, xtmp); 7411 movdqu(xtmp, Address(buf, offset)); 7412 pxor(xcrc, xtmp); 7413 } 7414 } 7415 7416 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7417 if (UseAVX > 0) { 7418 vpclmulhdq(xtmp, xK, xcrc); 7419 vpclmulldq(xcrc, xK, xcrc); 7420 pxor(xcrc, xbuf); 7421 pxor(xcrc, xtmp); 7422 } else { 7423 movdqa(xtmp, xcrc); 7424 pclmulhdq(xtmp, xK); 7425 pclmulldq(xcrc, xK); 7426 pxor(xcrc, xbuf); 7427 pxor(xcrc, xtmp); 7428 } 7429 } 7430 7431 /** 7432 * 8-bit folds to compute 32-bit CRC 7433 * 7434 * uint64_t xcrc; 7435 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7436 */ 7437 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7438 movdl(tmp, xcrc); 7439 andl(tmp, 0xFF); 7440 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7441 psrldq(xcrc, 1); // unsigned shift one byte 7442 pxor(xcrc, xtmp); 7443 } 7444 7445 /** 7446 * uint32_t crc; 7447 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7448 */ 7449 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7450 movl(tmp, crc); 7451 andl(tmp, 0xFF); 7452 shrl(crc, 8); 7453 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7454 } 7455 7456 /** 7457 * @param crc register containing existing CRC (32-bit) 7458 * @param buf register pointing to input byte buffer (byte*) 7459 * @param len register containing number of bytes 7460 * @param table register that will contain address of CRC table 7461 * @param tmp scratch register 7462 */ 7463 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7464 assert_different_registers(crc, buf, len, table, tmp, rax); 7465 7466 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7467 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7468 7469 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7470 // context for the registers used, where all instructions below are using 128-bit mode 7471 // On EVEX without VL and BW, these instructions will all be AVX. 7472 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7473 notl(crc); // ~crc 7474 cmpl(len, 16); 7475 jcc(Assembler::less, L_tail); 7476 7477 // Align buffer to 16 bytes 7478 movl(tmp, buf); 7479 andl(tmp, 0xF); 7480 jccb(Assembler::zero, L_aligned); 7481 subl(tmp, 16); 7482 addl(len, tmp); 7483 7484 align(4); 7485 BIND(L_align_loop); 7486 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7487 update_byte_crc32(crc, rax, table); 7488 increment(buf); 7489 incrementl(tmp); 7490 jccb(Assembler::less, L_align_loop); 7491 7492 BIND(L_aligned); 7493 movl(tmp, len); // save 7494 shrl(len, 4); 7495 jcc(Assembler::zero, L_tail_restore); 7496 7497 // Fold crc into first bytes of vector 7498 movdqa(xmm1, Address(buf, 0)); 7499 movdl(rax, xmm1); 7500 xorl(crc, rax); 7501 if (VM_Version::supports_sse4_1()) { 7502 pinsrd(xmm1, crc, 0); 7503 } else { 7504 pinsrw(xmm1, crc, 0); 7505 shrl(crc, 16); 7506 pinsrw(xmm1, crc, 1); 7507 } 7508 addptr(buf, 16); 7509 subl(len, 4); // len > 0 7510 jcc(Assembler::less, L_fold_tail); 7511 7512 movdqa(xmm2, Address(buf, 0)); 7513 movdqa(xmm3, Address(buf, 16)); 7514 movdqa(xmm4, Address(buf, 32)); 7515 addptr(buf, 48); 7516 subl(len, 3); 7517 jcc(Assembler::lessEqual, L_fold_512b); 7518 7519 // Fold total 512 bits of polynomial on each iteration, 7520 // 128 bits per each of 4 parallel streams. 7521 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 7522 7523 align32(); 7524 BIND(L_fold_512b_loop); 7525 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7526 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 7527 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 7528 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 7529 addptr(buf, 64); 7530 subl(len, 4); 7531 jcc(Assembler::greater, L_fold_512b_loop); 7532 7533 // Fold 512 bits to 128 bits. 7534 BIND(L_fold_512b); 7535 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7536 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 7537 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 7538 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 7539 7540 // Fold the rest of 128 bits data chunks 7541 BIND(L_fold_tail); 7542 addl(len, 3); 7543 jccb(Assembler::lessEqual, L_fold_128b); 7544 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7545 7546 BIND(L_fold_tail_loop); 7547 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7548 addptr(buf, 16); 7549 decrementl(len); 7550 jccb(Assembler::greater, L_fold_tail_loop); 7551 7552 // Fold 128 bits in xmm1 down into 32 bits in crc register. 7553 BIND(L_fold_128b); 7554 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 7555 if (UseAVX > 0) { 7556 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 7557 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 7558 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 7559 } else { 7560 movdqa(xmm2, xmm0); 7561 pclmulqdq(xmm2, xmm1, 0x1); 7562 movdqa(xmm3, xmm0); 7563 pand(xmm3, xmm2); 7564 pclmulqdq(xmm0, xmm3, 0x1); 7565 } 7566 psrldq(xmm1, 8); 7567 psrldq(xmm2, 4); 7568 pxor(xmm0, xmm1); 7569 pxor(xmm0, xmm2); 7570 7571 // 8 8-bit folds to compute 32-bit CRC. 7572 for (int j = 0; j < 4; j++) { 7573 fold_8bit_crc32(xmm0, table, xmm1, rax); 7574 } 7575 movdl(crc, xmm0); // mov 32 bits to general register 7576 for (int j = 0; j < 4; j++) { 7577 fold_8bit_crc32(crc, table, rax); 7578 } 7579 7580 BIND(L_tail_restore); 7581 movl(len, tmp); // restore 7582 BIND(L_tail); 7583 andl(len, 0xf); 7584 jccb(Assembler::zero, L_exit); 7585 7586 // Fold the rest of bytes 7587 align(4); 7588 BIND(L_tail_loop); 7589 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7590 update_byte_crc32(crc, rax, table); 7591 increment(buf); 7592 decrementl(len); 7593 jccb(Assembler::greater, L_tail_loop); 7594 7595 BIND(L_exit); 7596 notl(crc); // ~c 7597 } 7598 7599 // Helper function for AVX 512 CRC32 7600 // Fold 512-bit data chunks 7601 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 7602 Register pos, int offset) { 7603 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 7604 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 7605 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 7606 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 7607 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 7608 } 7609 7610 // Helper function for AVX 512 CRC32 7611 // Compute CRC32 for < 256B buffers 7612 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 7613 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 7614 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 7615 7616 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 7617 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 7618 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 7619 7620 // check if there is enough buffer to be able to fold 16B at a time 7621 cmpl(len, 32); 7622 jcc(Assembler::less, L_less_than_32); 7623 7624 // if there is, load the constants 7625 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 7626 movdl(xmm0, crc); // get the initial crc value 7627 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7628 pxor(xmm7, xmm0); 7629 7630 // update the buffer pointer 7631 addl(pos, 16); 7632 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 7633 subl(len, 32); 7634 jmp(L_16B_reduction_loop); 7635 7636 bind(L_less_than_32); 7637 //mov initial crc to the return value. this is necessary for zero - length buffers. 7638 movl(rax, crc); 7639 testl(len, len); 7640 jcc(Assembler::equal, L_cleanup); 7641 7642 movdl(xmm0, crc); //get the initial crc value 7643 7644 cmpl(len, 16); 7645 jcc(Assembler::equal, L_exact_16_left); 7646 jcc(Assembler::less, L_less_than_16_left); 7647 7648 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7649 pxor(xmm7, xmm0); //xor the initial crc value 7650 addl(pos, 16); 7651 subl(len, 16); 7652 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 7653 jmp(L_get_last_two_xmms); 7654 7655 bind(L_less_than_16_left); 7656 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 7657 pxor(xmm1, xmm1); 7658 movptr(tmp1, rsp); 7659 movdqu(Address(tmp1, 0 * 16), xmm1); 7660 7661 cmpl(len, 4); 7662 jcc(Assembler::less, L_only_less_than_4); 7663 7664 //backup the counter value 7665 movl(tmp2, len); 7666 cmpl(len, 8); 7667 jcc(Assembler::less, L_less_than_8_left); 7668 7669 //load 8 Bytes 7670 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 7671 movq(Address(tmp1, 0 * 16), rax); 7672 addptr(tmp1, 8); 7673 subl(len, 8); 7674 addl(pos, 8); 7675 7676 bind(L_less_than_8_left); 7677 cmpl(len, 4); 7678 jcc(Assembler::less, L_less_than_4_left); 7679 7680 //load 4 Bytes 7681 movl(rax, Address(buf, pos, Address::times_1, 0)); 7682 movl(Address(tmp1, 0 * 16), rax); 7683 addptr(tmp1, 4); 7684 subl(len, 4); 7685 addl(pos, 4); 7686 7687 bind(L_less_than_4_left); 7688 cmpl(len, 2); 7689 jcc(Assembler::less, L_less_than_2_left); 7690 7691 // load 2 Bytes 7692 movw(rax, Address(buf, pos, Address::times_1, 0)); 7693 movl(Address(tmp1, 0 * 16), rax); 7694 addptr(tmp1, 2); 7695 subl(len, 2); 7696 addl(pos, 2); 7697 7698 bind(L_less_than_2_left); 7699 cmpl(len, 1); 7700 jcc(Assembler::less, L_zero_left); 7701 7702 // load 1 Byte 7703 movb(rax, Address(buf, pos, Address::times_1, 0)); 7704 movb(Address(tmp1, 0 * 16), rax); 7705 7706 bind(L_zero_left); 7707 movdqu(xmm7, Address(rsp, 0)); 7708 pxor(xmm7, xmm0); //xor the initial crc value 7709 7710 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7711 movdqu(xmm0, Address(rax, tmp2)); 7712 pshufb(xmm7, xmm0); 7713 jmp(L_128_done); 7714 7715 bind(L_exact_16_left); 7716 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 7717 pxor(xmm7, xmm0); //xor the initial crc value 7718 jmp(L_128_done); 7719 7720 bind(L_only_less_than_4); 7721 cmpl(len, 3); 7722 jcc(Assembler::less, L_only_less_than_3); 7723 7724 // load 3 Bytes 7725 movb(rax, Address(buf, pos, Address::times_1, 0)); 7726 movb(Address(tmp1, 0), rax); 7727 7728 movb(rax, Address(buf, pos, Address::times_1, 1)); 7729 movb(Address(tmp1, 1), rax); 7730 7731 movb(rax, Address(buf, pos, Address::times_1, 2)); 7732 movb(Address(tmp1, 2), rax); 7733 7734 movdqu(xmm7, Address(rsp, 0)); 7735 pxor(xmm7, xmm0); //xor the initial crc value 7736 7737 pslldq(xmm7, 0x5); 7738 jmp(L_barrett); 7739 bind(L_only_less_than_3); 7740 cmpl(len, 2); 7741 jcc(Assembler::less, L_only_less_than_2); 7742 7743 // load 2 Bytes 7744 movb(rax, Address(buf, pos, Address::times_1, 0)); 7745 movb(Address(tmp1, 0), rax); 7746 7747 movb(rax, Address(buf, pos, Address::times_1, 1)); 7748 movb(Address(tmp1, 1), rax); 7749 7750 movdqu(xmm7, Address(rsp, 0)); 7751 pxor(xmm7, xmm0); //xor the initial crc value 7752 7753 pslldq(xmm7, 0x6); 7754 jmp(L_barrett); 7755 7756 bind(L_only_less_than_2); 7757 //load 1 Byte 7758 movb(rax, Address(buf, pos, Address::times_1, 0)); 7759 movb(Address(tmp1, 0), rax); 7760 7761 movdqu(xmm7, Address(rsp, 0)); 7762 pxor(xmm7, xmm0); //xor the initial crc value 7763 7764 pslldq(xmm7, 0x7); 7765 } 7766 7767 /** 7768 * Compute CRC32 using AVX512 instructions 7769 * param crc register containing existing CRC (32-bit) 7770 * param buf register pointing to input byte buffer (byte*) 7771 * param len register containing number of bytes 7772 * param table address of crc or crc32c table 7773 * param tmp1 scratch register 7774 * param tmp2 scratch register 7775 * return rax result register 7776 * 7777 * This routine is identical for crc32c with the exception of the precomputed constant 7778 * table which will be passed as the table argument. The calculation steps are 7779 * the same for both variants. 7780 */ 7781 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 7782 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 7783 7784 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7785 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7786 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 7787 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 7788 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 7789 7790 const Register pos = r12; 7791 push(r12); 7792 subptr(rsp, 16 * 2 + 8); 7793 7794 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7795 // context for the registers used, where all instructions below are using 128-bit mode 7796 // On EVEX without VL and BW, these instructions will all be AVX. 7797 movl(pos, 0); 7798 7799 // check if smaller than 256B 7800 cmpl(len, 256); 7801 jcc(Assembler::less, L_less_than_256); 7802 7803 // load the initial crc value 7804 movdl(xmm10, crc); 7805 7806 // receive the initial 64B data, xor the initial crc value 7807 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 7808 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 7809 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 7810 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 7811 7812 subl(len, 256); 7813 cmpl(len, 256); 7814 jcc(Assembler::less, L_fold_128_B_loop); 7815 7816 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 7817 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 7818 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 7819 subl(len, 256); 7820 7821 bind(L_fold_256_B_loop); 7822 addl(pos, 256); 7823 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 7824 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 7825 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 7826 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 7827 7828 subl(len, 256); 7829 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 7830 7831 // Fold 256 into 128 7832 addl(pos, 256); 7833 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 7834 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 7835 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 7836 7837 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 7838 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 7839 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 7840 7841 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 7842 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 7843 7844 addl(len, 128); 7845 jmp(L_fold_128_B_register); 7846 7847 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 7848 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 7849 7850 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 7851 bind(L_fold_128_B_loop); 7852 addl(pos, 128); 7853 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 7854 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 7855 7856 subl(len, 128); 7857 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 7858 7859 addl(pos, 128); 7860 7861 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 7862 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 7863 bind(L_fold_128_B_register); 7864 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 7865 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 7866 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 7867 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 7868 // save last that has no multiplicand 7869 vextracti64x2(xmm7, xmm4, 3); 7870 7871 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 7872 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 7873 // Needed later in reduction loop 7874 movdqu(xmm10, Address(table, 1 * 16)); 7875 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 7876 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 7877 7878 // Swap 1,0,3,2 - 01 00 11 10 7879 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 7880 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 7881 vextracti128(xmm5, xmm8, 1); 7882 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 7883 7884 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 7885 // instead of a cmp instruction, we use the negative flag with the jl instruction 7886 addl(len, 128 - 16); 7887 jcc(Assembler::less, L_final_reduction_for_128); 7888 7889 bind(L_16B_reduction_loop); 7890 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7891 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7892 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7893 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 7894 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7895 addl(pos, 16); 7896 subl(len, 16); 7897 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 7898 7899 bind(L_final_reduction_for_128); 7900 addl(len, 16); 7901 jcc(Assembler::equal, L_128_done); 7902 7903 bind(L_get_last_two_xmms); 7904 movdqu(xmm2, xmm7); 7905 addl(pos, len); 7906 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 7907 subl(pos, len); 7908 7909 // get rid of the extra data that was loaded before 7910 // load the shift constant 7911 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7912 movdqu(xmm0, Address(rax, len)); 7913 addl(rax, len); 7914 7915 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7916 //Change mask to 512 7917 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 7918 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 7919 7920 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 7921 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 7922 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7923 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 7924 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 7925 7926 bind(L_128_done); 7927 // compute crc of a 128-bit value 7928 movdqu(xmm10, Address(table, 3 * 16)); 7929 movdqu(xmm0, xmm7); 7930 7931 // 64b fold 7932 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 7933 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 7934 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7935 7936 // 32b fold 7937 movdqu(xmm0, xmm7); 7938 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 7939 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 7940 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 7941 jmp(L_barrett); 7942 7943 bind(L_less_than_256); 7944 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 7945 7946 //barrett reduction 7947 bind(L_barrett); 7948 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 7949 movdqu(xmm1, xmm7); 7950 movdqu(xmm2, xmm7); 7951 movdqu(xmm10, Address(table, 4 * 16)); 7952 7953 pclmulqdq(xmm7, xmm10, 0x0); 7954 pxor(xmm7, xmm2); 7955 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 7956 movdqu(xmm2, xmm7); 7957 pclmulqdq(xmm7, xmm10, 0x10); 7958 pxor(xmm7, xmm2); 7959 pxor(xmm7, xmm1); 7960 pextrd(crc, xmm7, 2); 7961 7962 bind(L_cleanup); 7963 addptr(rsp, 16 * 2 + 8); 7964 pop(r12); 7965 } 7966 7967 // S. Gueron / Information Processing Letters 112 (2012) 184 7968 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 7969 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 7970 // Output: the 64-bit carry-less product of B * CONST 7971 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 7972 Register tmp1, Register tmp2, Register tmp3) { 7973 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 7974 if (n > 0) { 7975 addq(tmp3, n * 256 * 8); 7976 } 7977 // Q1 = TABLEExt[n][B & 0xFF]; 7978 movl(tmp1, in); 7979 andl(tmp1, 0x000000FF); 7980 shll(tmp1, 3); 7981 addq(tmp1, tmp3); 7982 movq(tmp1, Address(tmp1, 0)); 7983 7984 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 7985 movl(tmp2, in); 7986 shrl(tmp2, 8); 7987 andl(tmp2, 0x000000FF); 7988 shll(tmp2, 3); 7989 addq(tmp2, tmp3); 7990 movq(tmp2, Address(tmp2, 0)); 7991 7992 shlq(tmp2, 8); 7993 xorq(tmp1, tmp2); 7994 7995 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 7996 movl(tmp2, in); 7997 shrl(tmp2, 16); 7998 andl(tmp2, 0x000000FF); 7999 shll(tmp2, 3); 8000 addq(tmp2, tmp3); 8001 movq(tmp2, Address(tmp2, 0)); 8002 8003 shlq(tmp2, 16); 8004 xorq(tmp1, tmp2); 8005 8006 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8007 shrl(in, 24); 8008 andl(in, 0x000000FF); 8009 shll(in, 3); 8010 addq(in, tmp3); 8011 movq(in, Address(in, 0)); 8012 8013 shlq(in, 24); 8014 xorq(in, tmp1); 8015 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8016 } 8017 8018 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8019 Register in_out, 8020 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8021 XMMRegister w_xtmp2, 8022 Register tmp1, 8023 Register n_tmp2, Register n_tmp3) { 8024 if (is_pclmulqdq_supported) { 8025 movdl(w_xtmp1, in_out); // modified blindly 8026 8027 movl(tmp1, const_or_pre_comp_const_index); 8028 movdl(w_xtmp2, tmp1); 8029 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8030 8031 movdq(in_out, w_xtmp1); 8032 } else { 8033 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8034 } 8035 } 8036 8037 // Recombination Alternative 2: No bit-reflections 8038 // T1 = (CRC_A * U1) << 1 8039 // T2 = (CRC_B * U2) << 1 8040 // C1 = T1 >> 32 8041 // C2 = T2 >> 32 8042 // T1 = T1 & 0xFFFFFFFF 8043 // T2 = T2 & 0xFFFFFFFF 8044 // T1 = CRC32(0, T1) 8045 // T2 = CRC32(0, T2) 8046 // C1 = C1 ^ T1 8047 // C2 = C2 ^ T2 8048 // CRC = C1 ^ C2 ^ CRC_C 8049 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8050 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8051 Register tmp1, Register tmp2, 8052 Register n_tmp3) { 8053 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8054 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8055 shlq(in_out, 1); 8056 movl(tmp1, in_out); 8057 shrq(in_out, 32); 8058 xorl(tmp2, tmp2); 8059 crc32(tmp2, tmp1, 4); 8060 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8061 shlq(in1, 1); 8062 movl(tmp1, in1); 8063 shrq(in1, 32); 8064 xorl(tmp2, tmp2); 8065 crc32(tmp2, tmp1, 4); 8066 xorl(in1, tmp2); 8067 xorl(in_out, in1); 8068 xorl(in_out, in2); 8069 } 8070 8071 // Set N to predefined value 8072 // Subtract from a length of a buffer 8073 // execute in a loop: 8074 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8075 // for i = 1 to N do 8076 // CRC_A = CRC32(CRC_A, A[i]) 8077 // CRC_B = CRC32(CRC_B, B[i]) 8078 // CRC_C = CRC32(CRC_C, C[i]) 8079 // end for 8080 // Recombine 8081 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8082 Register in_out1, Register in_out2, Register in_out3, 8083 Register tmp1, Register tmp2, Register tmp3, 8084 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8085 Register tmp4, Register tmp5, 8086 Register n_tmp6) { 8087 Label L_processPartitions; 8088 Label L_processPartition; 8089 Label L_exit; 8090 8091 bind(L_processPartitions); 8092 cmpl(in_out1, 3 * size); 8093 jcc(Assembler::less, L_exit); 8094 xorl(tmp1, tmp1); 8095 xorl(tmp2, tmp2); 8096 movq(tmp3, in_out2); 8097 addq(tmp3, size); 8098 8099 bind(L_processPartition); 8100 crc32(in_out3, Address(in_out2, 0), 8); 8101 crc32(tmp1, Address(in_out2, size), 8); 8102 crc32(tmp2, Address(in_out2, size * 2), 8); 8103 addq(in_out2, 8); 8104 cmpq(in_out2, tmp3); 8105 jcc(Assembler::less, L_processPartition); 8106 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8107 w_xtmp1, w_xtmp2, w_xtmp3, 8108 tmp4, tmp5, 8109 n_tmp6); 8110 addq(in_out2, 2 * size); 8111 subl(in_out1, 3 * size); 8112 jmp(L_processPartitions); 8113 8114 bind(L_exit); 8115 } 8116 8117 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8118 // Input: A buffer I of L bytes. 8119 // Output: the CRC32C value of the buffer. 8120 // Notations: 8121 // Write L = 24N + r, with N = floor (L/24). 8122 // r = L mod 24 (0 <= r < 24). 8123 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8124 // N quadwords, and R consists of r bytes. 8125 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8126 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8127 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8128 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8129 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8130 Register tmp1, Register tmp2, Register tmp3, 8131 Register tmp4, Register tmp5, Register tmp6, 8132 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8133 bool is_pclmulqdq_supported) { 8134 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8135 Label L_wordByWord; 8136 Label L_byteByByteProlog; 8137 Label L_byteByByte; 8138 Label L_exit; 8139 8140 if (is_pclmulqdq_supported ) { 8141 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr(); 8142 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1); 8143 8144 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2); 8145 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3); 8146 8147 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4); 8148 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5); 8149 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8150 } else { 8151 const_or_pre_comp_const_index[0] = 1; 8152 const_or_pre_comp_const_index[1] = 0; 8153 8154 const_or_pre_comp_const_index[2] = 3; 8155 const_or_pre_comp_const_index[3] = 2; 8156 8157 const_or_pre_comp_const_index[4] = 5; 8158 const_or_pre_comp_const_index[5] = 4; 8159 } 8160 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8161 in2, in1, in_out, 8162 tmp1, tmp2, tmp3, 8163 w_xtmp1, w_xtmp2, w_xtmp3, 8164 tmp4, tmp5, 8165 tmp6); 8166 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8167 in2, in1, in_out, 8168 tmp1, tmp2, tmp3, 8169 w_xtmp1, w_xtmp2, w_xtmp3, 8170 tmp4, tmp5, 8171 tmp6); 8172 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8173 in2, in1, in_out, 8174 tmp1, tmp2, tmp3, 8175 w_xtmp1, w_xtmp2, w_xtmp3, 8176 tmp4, tmp5, 8177 tmp6); 8178 movl(tmp1, in2); 8179 andl(tmp1, 0x00000007); 8180 negl(tmp1); 8181 addl(tmp1, in2); 8182 addq(tmp1, in1); 8183 8184 cmpq(in1, tmp1); 8185 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8186 align(16); 8187 BIND(L_wordByWord); 8188 crc32(in_out, Address(in1, 0), 8); 8189 addq(in1, 8); 8190 cmpq(in1, tmp1); 8191 jcc(Assembler::less, L_wordByWord); 8192 8193 BIND(L_byteByByteProlog); 8194 andl(in2, 0x00000007); 8195 movl(tmp2, 1); 8196 8197 cmpl(tmp2, in2); 8198 jccb(Assembler::greater, L_exit); 8199 BIND(L_byteByByte); 8200 crc32(in_out, Address(in1, 0), 1); 8201 incq(in1); 8202 incl(tmp2); 8203 cmpl(tmp2, in2); 8204 jcc(Assembler::lessEqual, L_byteByByte); 8205 8206 BIND(L_exit); 8207 } 8208 #undef BIND 8209 #undef BLOCK_COMMENT 8210 8211 // Compress char[] array to byte[]. 8212 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 8213 // Return the array length if every element in array can be encoded, 8214 // otherwise, the index of first non-latin1 (> 0xff) character. 8215 // @IntrinsicCandidate 8216 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8217 // for (int i = 0; i < len; i++) { 8218 // char c = src[srcOff]; 8219 // if (c > 0xff) { 8220 // return i; // return index of non-latin1 char 8221 // } 8222 // dst[dstOff] = (byte)c; 8223 // srcOff++; 8224 // dstOff++; 8225 // } 8226 // return len; 8227 // } 8228 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8229 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8230 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8231 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8232 Label copy_chars_loop, done, reset_sp, copy_tail; 8233 8234 // rsi: src 8235 // rdi: dst 8236 // rdx: len 8237 // rcx: tmp5 8238 // rax: result 8239 8240 // rsi holds start addr of source char[] to be compressed 8241 // rdi holds start addr of destination byte[] 8242 // rdx holds length 8243 8244 assert(len != result, ""); 8245 8246 // save length for return 8247 movl(result, len); 8248 8249 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8250 VM_Version::supports_avx512vlbw() && 8251 VM_Version::supports_bmi2()) { 8252 8253 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 8254 8255 // alignment 8256 Label post_alignment; 8257 8258 // if length of the string is less than 32, handle it the old fashioned way 8259 testl(len, -32); 8260 jcc(Assembler::zero, below_threshold); 8261 8262 // First check whether a character is compressible ( <= 0xFF). 8263 // Create mask to test for Unicode chars inside zmm vector 8264 movl(tmp5, 0x00FF); 8265 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 8266 8267 testl(len, -64); 8268 jccb(Assembler::zero, post_alignment); 8269 8270 movl(tmp5, dst); 8271 andl(tmp5, (32 - 1)); 8272 negl(tmp5); 8273 andl(tmp5, (32 - 1)); 8274 8275 // bail out when there is nothing to be done 8276 testl(tmp5, 0xFFFFFFFF); 8277 jccb(Assembler::zero, post_alignment); 8278 8279 // ~(~0 << len), where len is the # of remaining elements to process 8280 movl(len, 0xFFFFFFFF); 8281 shlxl(len, len, tmp5); 8282 notl(len); 8283 kmovdl(mask2, len); 8284 movl(len, result); 8285 8286 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8287 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8288 ktestd(mask1, mask2); 8289 jcc(Assembler::carryClear, copy_tail); 8290 8291 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8292 8293 addptr(src, tmp5); 8294 addptr(src, tmp5); 8295 addptr(dst, tmp5); 8296 subl(len, tmp5); 8297 8298 bind(post_alignment); 8299 // end of alignment 8300 8301 movl(tmp5, len); 8302 andl(tmp5, (32 - 1)); // tail count (in chars) 8303 andl(len, ~(32 - 1)); // vector count (in chars) 8304 jccb(Assembler::zero, copy_loop_tail); 8305 8306 lea(src, Address(src, len, Address::times_2)); 8307 lea(dst, Address(dst, len, Address::times_1)); 8308 negptr(len); 8309 8310 bind(copy_32_loop); 8311 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 8312 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 8313 kortestdl(mask1, mask1); 8314 jccb(Assembler::carryClear, reset_for_copy_tail); 8315 8316 // All elements in current processed chunk are valid candidates for 8317 // compression. Write a truncated byte elements to the memory. 8318 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 8319 addptr(len, 32); 8320 jccb(Assembler::notZero, copy_32_loop); 8321 8322 bind(copy_loop_tail); 8323 // bail out when there is nothing to be done 8324 testl(tmp5, 0xFFFFFFFF); 8325 jcc(Assembler::zero, done); 8326 8327 movl(len, tmp5); 8328 8329 // ~(~0 << len), where len is the # of remaining elements to process 8330 movl(tmp5, 0xFFFFFFFF); 8331 shlxl(tmp5, tmp5, len); 8332 notl(tmp5); 8333 8334 kmovdl(mask2, tmp5); 8335 8336 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8337 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8338 ktestd(mask1, mask2); 8339 jcc(Assembler::carryClear, copy_tail); 8340 8341 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8342 jmp(done); 8343 8344 bind(reset_for_copy_tail); 8345 lea(src, Address(src, tmp5, Address::times_2)); 8346 lea(dst, Address(dst, tmp5, Address::times_1)); 8347 subptr(len, tmp5); 8348 jmp(copy_chars_loop); 8349 8350 bind(below_threshold); 8351 } 8352 8353 if (UseSSE42Intrinsics) { 8354 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 8355 8356 // vectored compression 8357 testl(len, 0xfffffff8); 8358 jcc(Assembler::zero, copy_tail); 8359 8360 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 8361 movdl(tmp1Reg, tmp5); 8362 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 8363 8364 andl(len, 0xfffffff0); 8365 jccb(Assembler::zero, copy_16); 8366 8367 // compress 16 chars per iter 8368 pxor(tmp4Reg, tmp4Reg); 8369 8370 lea(src, Address(src, len, Address::times_2)); 8371 lea(dst, Address(dst, len, Address::times_1)); 8372 negptr(len); 8373 8374 bind(copy_32_loop); 8375 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 8376 por(tmp4Reg, tmp2Reg); 8377 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 8378 por(tmp4Reg, tmp3Reg); 8379 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 8380 jccb(Assembler::notZero, reset_for_copy_tail); 8381 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 8382 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 8383 addptr(len, 16); 8384 jccb(Assembler::notZero, copy_32_loop); 8385 8386 // compress next vector of 8 chars (if any) 8387 bind(copy_16); 8388 // len = 0 8389 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 8390 jccb(Assembler::zero, copy_tail_sse); 8391 8392 pxor(tmp3Reg, tmp3Reg); 8393 8394 movdqu(tmp2Reg, Address(src, 0)); 8395 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8396 jccb(Assembler::notZero, reset_for_copy_tail); 8397 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 8398 movq(Address(dst, 0), tmp2Reg); 8399 addptr(src, 16); 8400 addptr(dst, 8); 8401 jmpb(copy_tail_sse); 8402 8403 bind(reset_for_copy_tail); 8404 movl(tmp5, result); 8405 andl(tmp5, 0x0000000f); 8406 lea(src, Address(src, tmp5, Address::times_2)); 8407 lea(dst, Address(dst, tmp5, Address::times_1)); 8408 subptr(len, tmp5); 8409 jmpb(copy_chars_loop); 8410 8411 bind(copy_tail_sse); 8412 movl(len, result); 8413 andl(len, 0x00000007); // tail count (in chars) 8414 } 8415 // compress 1 char per iter 8416 bind(copy_tail); 8417 testl(len, len); 8418 jccb(Assembler::zero, done); 8419 lea(src, Address(src, len, Address::times_2)); 8420 lea(dst, Address(dst, len, Address::times_1)); 8421 negptr(len); 8422 8423 bind(copy_chars_loop); 8424 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 8425 testl(tmp5, 0xff00); // check if Unicode char 8426 jccb(Assembler::notZero, reset_sp); 8427 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 8428 increment(len); 8429 jccb(Assembler::notZero, copy_chars_loop); 8430 8431 // add len then return (len will be zero if compress succeeded, otherwise negative) 8432 bind(reset_sp); 8433 addl(result, len); 8434 8435 bind(done); 8436 } 8437 8438 // Inflate byte[] array to char[]. 8439 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 8440 // @IntrinsicCandidate 8441 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 8442 // for (int i = 0; i < len; i++) { 8443 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 8444 // } 8445 // } 8446 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 8447 XMMRegister tmp1, Register tmp2, KRegister mask) { 8448 Label copy_chars_loop, done, below_threshold, avx3_threshold; 8449 // rsi: src 8450 // rdi: dst 8451 // rdx: len 8452 // rcx: tmp2 8453 8454 // rsi holds start addr of source byte[] to be inflated 8455 // rdi holds start addr of destination char[] 8456 // rdx holds length 8457 assert_different_registers(src, dst, len, tmp2); 8458 movl(tmp2, len); 8459 if ((UseAVX > 2) && // AVX512 8460 VM_Version::supports_avx512vlbw() && 8461 VM_Version::supports_bmi2()) { 8462 8463 Label copy_32_loop, copy_tail; 8464 Register tmp3_aliased = len; 8465 8466 // if length of the string is less than 16, handle it in an old fashioned way 8467 testl(len, -16); 8468 jcc(Assembler::zero, below_threshold); 8469 8470 testl(len, -1 * AVX3Threshold); 8471 jcc(Assembler::zero, avx3_threshold); 8472 8473 // In order to use only one arithmetic operation for the main loop we use 8474 // this pre-calculation 8475 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 8476 andl(len, -32); // vector count 8477 jccb(Assembler::zero, copy_tail); 8478 8479 lea(src, Address(src, len, Address::times_1)); 8480 lea(dst, Address(dst, len, Address::times_2)); 8481 negptr(len); 8482 8483 8484 // inflate 32 chars per iter 8485 bind(copy_32_loop); 8486 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 8487 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 8488 addptr(len, 32); 8489 jcc(Assembler::notZero, copy_32_loop); 8490 8491 bind(copy_tail); 8492 // bail out when there is nothing to be done 8493 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 8494 jcc(Assembler::zero, done); 8495 8496 // ~(~0 << length), where length is the # of remaining elements to process 8497 movl(tmp3_aliased, -1); 8498 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 8499 notl(tmp3_aliased); 8500 kmovdl(mask, tmp3_aliased); 8501 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 8502 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 8503 8504 jmp(done); 8505 bind(avx3_threshold); 8506 } 8507 if (UseSSE42Intrinsics) { 8508 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 8509 8510 if (UseAVX > 1) { 8511 andl(tmp2, (16 - 1)); 8512 andl(len, -16); 8513 jccb(Assembler::zero, copy_new_tail); 8514 } else { 8515 andl(tmp2, 0x00000007); // tail count (in chars) 8516 andl(len, 0xfffffff8); // vector count (in chars) 8517 jccb(Assembler::zero, copy_tail); 8518 } 8519 8520 // vectored inflation 8521 lea(src, Address(src, len, Address::times_1)); 8522 lea(dst, Address(dst, len, Address::times_2)); 8523 negptr(len); 8524 8525 if (UseAVX > 1) { 8526 bind(copy_16_loop); 8527 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 8528 vmovdqu(Address(dst, len, Address::times_2), tmp1); 8529 addptr(len, 16); 8530 jcc(Assembler::notZero, copy_16_loop); 8531 8532 bind(below_threshold); 8533 bind(copy_new_tail); 8534 movl(len, tmp2); 8535 andl(tmp2, 0x00000007); 8536 andl(len, 0xFFFFFFF8); 8537 jccb(Assembler::zero, copy_tail); 8538 8539 pmovzxbw(tmp1, Address(src, 0)); 8540 movdqu(Address(dst, 0), tmp1); 8541 addptr(src, 8); 8542 addptr(dst, 2 * 8); 8543 8544 jmp(copy_tail, true); 8545 } 8546 8547 // inflate 8 chars per iter 8548 bind(copy_8_loop); 8549 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 8550 movdqu(Address(dst, len, Address::times_2), tmp1); 8551 addptr(len, 8); 8552 jcc(Assembler::notZero, copy_8_loop); 8553 8554 bind(copy_tail); 8555 movl(len, tmp2); 8556 8557 cmpl(len, 4); 8558 jccb(Assembler::less, copy_bytes); 8559 8560 movdl(tmp1, Address(src, 0)); // load 4 byte chars 8561 pmovzxbw(tmp1, tmp1); 8562 movq(Address(dst, 0), tmp1); 8563 subptr(len, 4); 8564 addptr(src, 4); 8565 addptr(dst, 8); 8566 8567 bind(copy_bytes); 8568 } else { 8569 bind(below_threshold); 8570 } 8571 8572 testl(len, len); 8573 jccb(Assembler::zero, done); 8574 lea(src, Address(src, len, Address::times_1)); 8575 lea(dst, Address(dst, len, Address::times_2)); 8576 negptr(len); 8577 8578 // inflate 1 char per iter 8579 bind(copy_chars_loop); 8580 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 8581 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 8582 increment(len); 8583 jcc(Assembler::notZero, copy_chars_loop); 8584 8585 bind(done); 8586 } 8587 8588 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 8589 switch(type) { 8590 case T_BYTE: 8591 case T_BOOLEAN: 8592 evmovdqub(dst, kmask, src, merge, vector_len); 8593 break; 8594 case T_CHAR: 8595 case T_SHORT: 8596 evmovdquw(dst, kmask, src, merge, vector_len); 8597 break; 8598 case T_INT: 8599 case T_FLOAT: 8600 evmovdqul(dst, kmask, src, merge, vector_len); 8601 break; 8602 case T_LONG: 8603 case T_DOUBLE: 8604 evmovdquq(dst, kmask, src, merge, vector_len); 8605 break; 8606 default: 8607 fatal("Unexpected type argument %s", type2name(type)); 8608 break; 8609 } 8610 } 8611 8612 8613 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 8614 switch(type) { 8615 case T_BYTE: 8616 case T_BOOLEAN: 8617 evmovdqub(dst, kmask, src, merge, vector_len); 8618 break; 8619 case T_CHAR: 8620 case T_SHORT: 8621 evmovdquw(dst, kmask, src, merge, vector_len); 8622 break; 8623 case T_INT: 8624 case T_FLOAT: 8625 evmovdqul(dst, kmask, src, merge, vector_len); 8626 break; 8627 case T_LONG: 8628 case T_DOUBLE: 8629 evmovdquq(dst, kmask, src, merge, vector_len); 8630 break; 8631 default: 8632 fatal("Unexpected type argument %s", type2name(type)); 8633 break; 8634 } 8635 } 8636 8637 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 8638 switch(type) { 8639 case T_BYTE: 8640 case T_BOOLEAN: 8641 evmovdqub(dst, kmask, src, merge, vector_len); 8642 break; 8643 case T_CHAR: 8644 case T_SHORT: 8645 evmovdquw(dst, kmask, src, merge, vector_len); 8646 break; 8647 case T_INT: 8648 case T_FLOAT: 8649 evmovdqul(dst, kmask, src, merge, vector_len); 8650 break; 8651 case T_LONG: 8652 case T_DOUBLE: 8653 evmovdquq(dst, kmask, src, merge, vector_len); 8654 break; 8655 default: 8656 fatal("Unexpected type argument %s", type2name(type)); 8657 break; 8658 } 8659 } 8660 8661 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 8662 switch(masklen) { 8663 case 2: 8664 knotbl(dst, src); 8665 movl(rtmp, 3); 8666 kmovbl(ktmp, rtmp); 8667 kandbl(dst, ktmp, dst); 8668 break; 8669 case 4: 8670 knotbl(dst, src); 8671 movl(rtmp, 15); 8672 kmovbl(ktmp, rtmp); 8673 kandbl(dst, ktmp, dst); 8674 break; 8675 case 8: 8676 knotbl(dst, src); 8677 break; 8678 case 16: 8679 knotwl(dst, src); 8680 break; 8681 case 32: 8682 knotdl(dst, src); 8683 break; 8684 case 64: 8685 knotql(dst, src); 8686 break; 8687 default: 8688 fatal("Unexpected vector length %d", masklen); 8689 break; 8690 } 8691 } 8692 8693 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8694 switch(type) { 8695 case T_BOOLEAN: 8696 case T_BYTE: 8697 kandbl(dst, src1, src2); 8698 break; 8699 case T_CHAR: 8700 case T_SHORT: 8701 kandwl(dst, src1, src2); 8702 break; 8703 case T_INT: 8704 case T_FLOAT: 8705 kanddl(dst, src1, src2); 8706 break; 8707 case T_LONG: 8708 case T_DOUBLE: 8709 kandql(dst, src1, src2); 8710 break; 8711 default: 8712 fatal("Unexpected type argument %s", type2name(type)); 8713 break; 8714 } 8715 } 8716 8717 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8718 switch(type) { 8719 case T_BOOLEAN: 8720 case T_BYTE: 8721 korbl(dst, src1, src2); 8722 break; 8723 case T_CHAR: 8724 case T_SHORT: 8725 korwl(dst, src1, src2); 8726 break; 8727 case T_INT: 8728 case T_FLOAT: 8729 kordl(dst, src1, src2); 8730 break; 8731 case T_LONG: 8732 case T_DOUBLE: 8733 korql(dst, src1, src2); 8734 break; 8735 default: 8736 fatal("Unexpected type argument %s", type2name(type)); 8737 break; 8738 } 8739 } 8740 8741 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 8742 switch(type) { 8743 case T_BOOLEAN: 8744 case T_BYTE: 8745 kxorbl(dst, src1, src2); 8746 break; 8747 case T_CHAR: 8748 case T_SHORT: 8749 kxorwl(dst, src1, src2); 8750 break; 8751 case T_INT: 8752 case T_FLOAT: 8753 kxordl(dst, src1, src2); 8754 break; 8755 case T_LONG: 8756 case T_DOUBLE: 8757 kxorql(dst, src1, src2); 8758 break; 8759 default: 8760 fatal("Unexpected type argument %s", type2name(type)); 8761 break; 8762 } 8763 } 8764 8765 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8766 switch(type) { 8767 case T_BOOLEAN: 8768 case T_BYTE: 8769 evpermb(dst, mask, nds, src, merge, vector_len); break; 8770 case T_CHAR: 8771 case T_SHORT: 8772 evpermw(dst, mask, nds, src, merge, vector_len); break; 8773 case T_INT: 8774 case T_FLOAT: 8775 evpermd(dst, mask, nds, src, merge, vector_len); break; 8776 case T_LONG: 8777 case T_DOUBLE: 8778 evpermq(dst, mask, nds, src, merge, vector_len); break; 8779 default: 8780 fatal("Unexpected type argument %s", type2name(type)); break; 8781 } 8782 } 8783 8784 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8785 switch(type) { 8786 case T_BOOLEAN: 8787 case T_BYTE: 8788 evpermb(dst, mask, nds, src, merge, vector_len); break; 8789 case T_CHAR: 8790 case T_SHORT: 8791 evpermw(dst, mask, nds, src, merge, vector_len); break; 8792 case T_INT: 8793 case T_FLOAT: 8794 evpermd(dst, mask, nds, src, merge, vector_len); break; 8795 case T_LONG: 8796 case T_DOUBLE: 8797 evpermq(dst, mask, nds, src, merge, vector_len); break; 8798 default: 8799 fatal("Unexpected type argument %s", type2name(type)); break; 8800 } 8801 } 8802 8803 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8804 switch(type) { 8805 case T_BYTE: 8806 evpminub(dst, mask, nds, src, merge, vector_len); break; 8807 case T_SHORT: 8808 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8809 case T_INT: 8810 evpminud(dst, mask, nds, src, merge, vector_len); break; 8811 case T_LONG: 8812 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8813 default: 8814 fatal("Unexpected type argument %s", type2name(type)); break; 8815 } 8816 } 8817 8818 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8819 switch(type) { 8820 case T_BYTE: 8821 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8822 case T_SHORT: 8823 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8824 case T_INT: 8825 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8826 case T_LONG: 8827 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8828 default: 8829 fatal("Unexpected type argument %s", type2name(type)); break; 8830 } 8831 } 8832 8833 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8834 switch(type) { 8835 case T_BYTE: 8836 evpminub(dst, mask, nds, src, merge, vector_len); break; 8837 case T_SHORT: 8838 evpminuw(dst, mask, nds, src, merge, vector_len); break; 8839 case T_INT: 8840 evpminud(dst, mask, nds, src, merge, vector_len); break; 8841 case T_LONG: 8842 evpminuq(dst, mask, nds, src, merge, vector_len); break; 8843 default: 8844 fatal("Unexpected type argument %s", type2name(type)); break; 8845 } 8846 } 8847 8848 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8849 switch(type) { 8850 case T_BYTE: 8851 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 8852 case T_SHORT: 8853 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 8854 case T_INT: 8855 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 8856 case T_LONG: 8857 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 8858 default: 8859 fatal("Unexpected type argument %s", type2name(type)); break; 8860 } 8861 } 8862 8863 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8864 switch(type) { 8865 case T_BYTE: 8866 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8867 case T_SHORT: 8868 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8869 case T_INT: 8870 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8871 case T_LONG: 8872 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8873 default: 8874 fatal("Unexpected type argument %s", type2name(type)); break; 8875 } 8876 } 8877 8878 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8879 switch(type) { 8880 case T_BYTE: 8881 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8882 case T_SHORT: 8883 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8884 case T_INT: 8885 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8886 case T_LONG: 8887 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8888 default: 8889 fatal("Unexpected type argument %s", type2name(type)); break; 8890 } 8891 } 8892 8893 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8894 switch(type) { 8895 case T_BYTE: 8896 evpminsb(dst, mask, nds, src, merge, vector_len); break; 8897 case T_SHORT: 8898 evpminsw(dst, mask, nds, src, merge, vector_len); break; 8899 case T_INT: 8900 evpminsd(dst, mask, nds, src, merge, vector_len); break; 8901 case T_LONG: 8902 evpminsq(dst, mask, nds, src, merge, vector_len); break; 8903 default: 8904 fatal("Unexpected type argument %s", type2name(type)); break; 8905 } 8906 } 8907 8908 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8909 switch(type) { 8910 case T_BYTE: 8911 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 8912 case T_SHORT: 8913 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 8914 case T_INT: 8915 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 8916 case T_LONG: 8917 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 8918 default: 8919 fatal("Unexpected type argument %s", type2name(type)); break; 8920 } 8921 } 8922 8923 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8924 switch(type) { 8925 case T_INT: 8926 evpxord(dst, mask, nds, src, merge, vector_len); break; 8927 case T_LONG: 8928 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8929 default: 8930 fatal("Unexpected type argument %s", type2name(type)); break; 8931 } 8932 } 8933 8934 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8935 switch(type) { 8936 case T_INT: 8937 evpxord(dst, mask, nds, src, merge, vector_len); break; 8938 case T_LONG: 8939 evpxorq(dst, mask, nds, src, merge, vector_len); break; 8940 default: 8941 fatal("Unexpected type argument %s", type2name(type)); break; 8942 } 8943 } 8944 8945 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8946 switch(type) { 8947 case T_INT: 8948 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8949 case T_LONG: 8950 evporq(dst, mask, nds, src, merge, vector_len); break; 8951 default: 8952 fatal("Unexpected type argument %s", type2name(type)); break; 8953 } 8954 } 8955 8956 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8957 switch(type) { 8958 case T_INT: 8959 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 8960 case T_LONG: 8961 evporq(dst, mask, nds, src, merge, vector_len); break; 8962 default: 8963 fatal("Unexpected type argument %s", type2name(type)); break; 8964 } 8965 } 8966 8967 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 8968 switch(type) { 8969 case T_INT: 8970 evpandd(dst, mask, nds, src, merge, vector_len); break; 8971 case T_LONG: 8972 evpandq(dst, mask, nds, src, merge, vector_len); break; 8973 default: 8974 fatal("Unexpected type argument %s", type2name(type)); break; 8975 } 8976 } 8977 8978 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 8979 switch(type) { 8980 case T_INT: 8981 evpandd(dst, mask, nds, src, merge, vector_len); break; 8982 case T_LONG: 8983 evpandq(dst, mask, nds, src, merge, vector_len); break; 8984 default: 8985 fatal("Unexpected type argument %s", type2name(type)); break; 8986 } 8987 } 8988 8989 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 8990 switch(masklen) { 8991 case 8: 8992 kortestbl(src1, src2); 8993 break; 8994 case 16: 8995 kortestwl(src1, src2); 8996 break; 8997 case 32: 8998 kortestdl(src1, src2); 8999 break; 9000 case 64: 9001 kortestql(src1, src2); 9002 break; 9003 default: 9004 fatal("Unexpected mask length %d", masklen); 9005 break; 9006 } 9007 } 9008 9009 9010 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 9011 switch(masklen) { 9012 case 8: 9013 ktestbl(src1, src2); 9014 break; 9015 case 16: 9016 ktestwl(src1, src2); 9017 break; 9018 case 32: 9019 ktestdl(src1, src2); 9020 break; 9021 case 64: 9022 ktestql(src1, src2); 9023 break; 9024 default: 9025 fatal("Unexpected mask length %d", masklen); 9026 break; 9027 } 9028 } 9029 9030 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9031 switch(type) { 9032 case T_INT: 9033 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9034 case T_LONG: 9035 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9036 default: 9037 fatal("Unexpected type argument %s", type2name(type)); break; 9038 break; 9039 } 9040 } 9041 9042 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9043 switch(type) { 9044 case T_INT: 9045 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9046 case T_LONG: 9047 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9048 default: 9049 fatal("Unexpected type argument %s", type2name(type)); break; 9050 } 9051 } 9052 9053 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9054 switch(type) { 9055 case T_INT: 9056 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9057 case T_LONG: 9058 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9059 default: 9060 fatal("Unexpected type argument %s", type2name(type)); break; 9061 } 9062 } 9063 9064 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9065 switch(type) { 9066 case T_INT: 9067 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9068 case T_LONG: 9069 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9070 default: 9071 fatal("Unexpected type argument %s", type2name(type)); break; 9072 } 9073 } 9074 9075 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9076 assert(rscratch != noreg || always_reachable(src), "missing"); 9077 9078 if (reachable(src)) { 9079 evpandq(dst, nds, as_Address(src), vector_len); 9080 } else { 9081 lea(rscratch, src); 9082 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9083 } 9084 } 9085 9086 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 9087 assert(rscratch != noreg || always_reachable(src), "missing"); 9088 9089 if (reachable(src)) { 9090 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 9091 } else { 9092 lea(rscratch, src); 9093 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 9094 } 9095 } 9096 9097 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9098 assert(rscratch != noreg || always_reachable(src), "missing"); 9099 9100 if (reachable(src)) { 9101 evporq(dst, nds, as_Address(src), vector_len); 9102 } else { 9103 lea(rscratch, src); 9104 evporq(dst, nds, Address(rscratch, 0), vector_len); 9105 } 9106 } 9107 9108 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9109 assert(rscratch != noreg || always_reachable(src), "missing"); 9110 9111 if (reachable(src)) { 9112 vpshufb(dst, nds, as_Address(src), vector_len); 9113 } else { 9114 lea(rscratch, src); 9115 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 9116 } 9117 } 9118 9119 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9120 assert(rscratch != noreg || always_reachable(src), "missing"); 9121 9122 if (reachable(src)) { 9123 Assembler::vpor(dst, nds, as_Address(src), vector_len); 9124 } else { 9125 lea(rscratch, src); 9126 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 9127 } 9128 } 9129 9130 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9131 assert(rscratch != noreg || always_reachable(src3), "missing"); 9132 9133 if (reachable(src3)) { 9134 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9135 } else { 9136 lea(rscratch, src3); 9137 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9138 } 9139 } 9140 9141 #if COMPILER2_OR_JVMCI 9142 9143 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9144 Register length, Register temp, int vec_enc) { 9145 // Computing mask for predicated vector store. 9146 movptr(temp, -1); 9147 bzhiq(temp, temp, length); 9148 kmov(mask, temp); 9149 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9150 } 9151 9152 // Set memory operation for length "less than" 64 bytes. 9153 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9154 XMMRegister xmm, KRegister mask, Register length, 9155 Register temp, bool use64byteVector) { 9156 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9157 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9158 if (!use64byteVector) { 9159 fill32(dst, disp, xmm); 9160 subptr(length, 32 >> shift); 9161 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9162 } else { 9163 assert(MaxVectorSize == 64, "vector length != 64"); 9164 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9165 } 9166 } 9167 9168 9169 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9170 XMMRegister xmm, KRegister mask, Register length, 9171 Register temp) { 9172 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9173 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9174 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9175 } 9176 9177 9178 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9179 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9180 vmovdqu(dst, xmm); 9181 } 9182 9183 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9184 fill32(Address(dst, disp), xmm); 9185 } 9186 9187 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9188 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9189 if (!use64byteVector) { 9190 fill32(dst, xmm); 9191 fill32(dst.plus_disp(32), xmm); 9192 } else { 9193 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9194 } 9195 } 9196 9197 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9198 fill64(Address(dst, disp), xmm, use64byteVector); 9199 } 9200 9201 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9202 Register count, Register rtmp, XMMRegister xtmp) { 9203 Label L_exit; 9204 Label L_fill_start; 9205 Label L_fill_64_bytes; 9206 Label L_fill_96_bytes; 9207 Label L_fill_128_bytes; 9208 Label L_fill_128_bytes_loop; 9209 Label L_fill_128_loop_header; 9210 Label L_fill_128_bytes_loop_header; 9211 Label L_fill_128_bytes_loop_pre_header; 9212 Label L_fill_zmm_sequence; 9213 9214 int shift = -1; 9215 int avx3threshold = VM_Version::avx3_threshold(); 9216 switch(type) { 9217 case T_BYTE: shift = 0; 9218 break; 9219 case T_SHORT: shift = 1; 9220 break; 9221 case T_INT: shift = 2; 9222 break; 9223 /* Uncomment when LONG fill stubs are supported. 9224 case T_LONG: shift = 3; 9225 break; 9226 */ 9227 default: 9228 fatal("Unhandled type: %s\n", type2name(type)); 9229 } 9230 9231 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9232 9233 if (MaxVectorSize == 64) { 9234 cmpq(count, avx3threshold >> shift); 9235 jcc(Assembler::greater, L_fill_zmm_sequence); 9236 } 9237 9238 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9239 9240 bind(L_fill_start); 9241 9242 cmpq(count, 32 >> shift); 9243 jccb(Assembler::greater, L_fill_64_bytes); 9244 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9245 jmp(L_exit); 9246 9247 bind(L_fill_64_bytes); 9248 cmpq(count, 64 >> shift); 9249 jccb(Assembler::greater, L_fill_96_bytes); 9250 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9251 jmp(L_exit); 9252 9253 bind(L_fill_96_bytes); 9254 cmpq(count, 96 >> shift); 9255 jccb(Assembler::greater, L_fill_128_bytes); 9256 fill64(to, 0, xtmp); 9257 subq(count, 64 >> shift); 9258 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9259 jmp(L_exit); 9260 9261 bind(L_fill_128_bytes); 9262 cmpq(count, 128 >> shift); 9263 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9264 fill64(to, 0, xtmp); 9265 fill32(to, 64, xtmp); 9266 subq(count, 96 >> shift); 9267 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9268 jmp(L_exit); 9269 9270 bind(L_fill_128_bytes_loop_pre_header); 9271 { 9272 mov(rtmp, to); 9273 andq(rtmp, 31); 9274 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9275 negq(rtmp); 9276 addq(rtmp, 32); 9277 mov64(r8, -1L); 9278 bzhiq(r8, r8, rtmp); 9279 kmovql(k2, r8); 9280 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9281 addq(to, rtmp); 9282 shrq(rtmp, shift); 9283 subq(count, rtmp); 9284 } 9285 9286 cmpq(count, 128 >> shift); 9287 jcc(Assembler::less, L_fill_start); 9288 9289 bind(L_fill_128_bytes_loop_header); 9290 subq(count, 128 >> shift); 9291 9292 align32(); 9293 bind(L_fill_128_bytes_loop); 9294 fill64(to, 0, xtmp); 9295 fill64(to, 64, xtmp); 9296 addq(to, 128); 9297 subq(count, 128 >> shift); 9298 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9299 9300 addq(count, 128 >> shift); 9301 jcc(Assembler::zero, L_exit); 9302 jmp(L_fill_start); 9303 } 9304 9305 if (MaxVectorSize == 64) { 9306 // Sequence using 64 byte ZMM register. 9307 Label L_fill_128_bytes_zmm; 9308 Label L_fill_192_bytes_zmm; 9309 Label L_fill_192_bytes_loop_zmm; 9310 Label L_fill_192_bytes_loop_header_zmm; 9311 Label L_fill_192_bytes_loop_pre_header_zmm; 9312 Label L_fill_start_zmm_sequence; 9313 9314 bind(L_fill_zmm_sequence); 9315 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9316 9317 bind(L_fill_start_zmm_sequence); 9318 cmpq(count, 64 >> shift); 9319 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9320 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9321 jmp(L_exit); 9322 9323 bind(L_fill_128_bytes_zmm); 9324 cmpq(count, 128 >> shift); 9325 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9326 fill64(to, 0, xtmp, true); 9327 subq(count, 64 >> shift); 9328 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9329 jmp(L_exit); 9330 9331 bind(L_fill_192_bytes_zmm); 9332 cmpq(count, 192 >> shift); 9333 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9334 fill64(to, 0, xtmp, true); 9335 fill64(to, 64, xtmp, true); 9336 subq(count, 128 >> shift); 9337 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9338 jmp(L_exit); 9339 9340 bind(L_fill_192_bytes_loop_pre_header_zmm); 9341 { 9342 movq(rtmp, to); 9343 andq(rtmp, 63); 9344 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9345 negq(rtmp); 9346 addq(rtmp, 64); 9347 mov64(r8, -1L); 9348 bzhiq(r8, r8, rtmp); 9349 kmovql(k2, r8); 9350 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9351 addq(to, rtmp); 9352 shrq(rtmp, shift); 9353 subq(count, rtmp); 9354 } 9355 9356 cmpq(count, 192 >> shift); 9357 jcc(Assembler::less, L_fill_start_zmm_sequence); 9358 9359 bind(L_fill_192_bytes_loop_header_zmm); 9360 subq(count, 192 >> shift); 9361 9362 align32(); 9363 bind(L_fill_192_bytes_loop_zmm); 9364 fill64(to, 0, xtmp, true); 9365 fill64(to, 64, xtmp, true); 9366 fill64(to, 128, xtmp, true); 9367 addq(to, 192); 9368 subq(count, 192 >> shift); 9369 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 9370 9371 addq(count, 192 >> shift); 9372 jcc(Assembler::zero, L_exit); 9373 jmp(L_fill_start_zmm_sequence); 9374 } 9375 bind(L_exit); 9376 } 9377 #endif //COMPILER2_OR_JVMCI 9378 9379 9380 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 9381 Label done; 9382 cvttss2sil(dst, src); 9383 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9384 cmpl(dst, 0x80000000); // float_sign_flip 9385 jccb(Assembler::notEqual, done); 9386 subptr(rsp, 8); 9387 movflt(Address(rsp, 0), src); 9388 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 9389 pop(dst); 9390 bind(done); 9391 } 9392 9393 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 9394 Label done; 9395 cvttsd2sil(dst, src); 9396 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9397 cmpl(dst, 0x80000000); // float_sign_flip 9398 jccb(Assembler::notEqual, done); 9399 subptr(rsp, 8); 9400 movdbl(Address(rsp, 0), src); 9401 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 9402 pop(dst); 9403 bind(done); 9404 } 9405 9406 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 9407 Label done; 9408 cvttss2siq(dst, src); 9409 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9410 jccb(Assembler::notEqual, done); 9411 subptr(rsp, 8); 9412 movflt(Address(rsp, 0), src); 9413 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 9414 pop(dst); 9415 bind(done); 9416 } 9417 9418 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9419 // Following code is line by line assembly translation rounding algorithm. 9420 // Please refer to java.lang.Math.round(float) algorithm for details. 9421 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 9422 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 9423 const int32_t FloatConsts_EXP_BIAS = 127; 9424 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 9425 const int32_t MINUS_32 = 0xFFFFFFE0; 9426 Label L_special_case, L_block1, L_exit; 9427 movl(rtmp, FloatConsts_EXP_BIT_MASK); 9428 movdl(dst, src); 9429 andl(dst, rtmp); 9430 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 9431 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 9432 subl(rtmp, dst); 9433 movl(rcx, rtmp); 9434 movl(dst, MINUS_32); 9435 testl(rtmp, dst); 9436 jccb(Assembler::notEqual, L_special_case); 9437 movdl(dst, src); 9438 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 9439 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 9440 movdl(rtmp, src); 9441 testl(rtmp, rtmp); 9442 jccb(Assembler::greaterEqual, L_block1); 9443 negl(dst); 9444 bind(L_block1); 9445 sarl(dst); 9446 addl(dst, 0x1); 9447 sarl(dst, 0x1); 9448 jmp(L_exit); 9449 bind(L_special_case); 9450 convert_f2i(dst, src); 9451 bind(L_exit); 9452 } 9453 9454 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9455 // Following code is line by line assembly translation rounding algorithm. 9456 // Please refer to java.lang.Math.round(double) algorithm for details. 9457 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 9458 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 9459 const int64_t DoubleConsts_EXP_BIAS = 1023; 9460 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 9461 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 9462 Label L_special_case, L_block1, L_exit; 9463 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 9464 movq(dst, src); 9465 andq(dst, rtmp); 9466 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 9467 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 9468 subq(rtmp, dst); 9469 movq(rcx, rtmp); 9470 mov64(dst, MINUS_64); 9471 testq(rtmp, dst); 9472 jccb(Assembler::notEqual, L_special_case); 9473 movq(dst, src); 9474 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 9475 andq(dst, rtmp); 9476 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 9477 orq(dst, rtmp); 9478 movq(rtmp, src); 9479 testq(rtmp, rtmp); 9480 jccb(Assembler::greaterEqual, L_block1); 9481 negq(dst); 9482 bind(L_block1); 9483 sarq(dst); 9484 addq(dst, 0x1); 9485 sarq(dst, 0x1); 9486 jmp(L_exit); 9487 bind(L_special_case); 9488 convert_d2l(dst, src); 9489 bind(L_exit); 9490 } 9491 9492 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 9493 Label done; 9494 cvttsd2siq(dst, src); 9495 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9496 jccb(Assembler::notEqual, done); 9497 subptr(rsp, 8); 9498 movdbl(Address(rsp, 0), src); 9499 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 9500 pop(dst); 9501 bind(done); 9502 } 9503 9504 void MacroAssembler::cache_wb(Address line) 9505 { 9506 // 64 bit cpus always support clflush 9507 assert(VM_Version::supports_clflush(), "clflush should be available"); 9508 bool optimized = VM_Version::supports_clflushopt(); 9509 bool no_evict = VM_Version::supports_clwb(); 9510 9511 // prefer clwb (writeback without evict) otherwise 9512 // prefer clflushopt (potentially parallel writeback with evict) 9513 // otherwise fallback on clflush (serial writeback with evict) 9514 9515 if (optimized) { 9516 if (no_evict) { 9517 clwb(line); 9518 } else { 9519 clflushopt(line); 9520 } 9521 } else { 9522 // no need for fence when using CLFLUSH 9523 clflush(line); 9524 } 9525 } 9526 9527 void MacroAssembler::cache_wbsync(bool is_pre) 9528 { 9529 assert(VM_Version::supports_clflush(), "clflush should be available"); 9530 bool optimized = VM_Version::supports_clflushopt(); 9531 bool no_evict = VM_Version::supports_clwb(); 9532 9533 // pick the correct implementation 9534 9535 if (!is_pre && (optimized || no_evict)) { 9536 // need an sfence for post flush when using clflushopt or clwb 9537 // otherwise no no need for any synchroniaztion 9538 9539 sfence(); 9540 } 9541 } 9542 9543 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 9544 switch (cond) { 9545 // Note some conditions are synonyms for others 9546 case Assembler::zero: return Assembler::notZero; 9547 case Assembler::notZero: return Assembler::zero; 9548 case Assembler::less: return Assembler::greaterEqual; 9549 case Assembler::lessEqual: return Assembler::greater; 9550 case Assembler::greater: return Assembler::lessEqual; 9551 case Assembler::greaterEqual: return Assembler::less; 9552 case Assembler::below: return Assembler::aboveEqual; 9553 case Assembler::belowEqual: return Assembler::above; 9554 case Assembler::above: return Assembler::belowEqual; 9555 case Assembler::aboveEqual: return Assembler::below; 9556 case Assembler::overflow: return Assembler::noOverflow; 9557 case Assembler::noOverflow: return Assembler::overflow; 9558 case Assembler::negative: return Assembler::positive; 9559 case Assembler::positive: return Assembler::negative; 9560 case Assembler::parity: return Assembler::noParity; 9561 case Assembler::noParity: return Assembler::parity; 9562 } 9563 ShouldNotReachHere(); return Assembler::overflow; 9564 } 9565 9566 // This is simply a call to Thread::current() 9567 void MacroAssembler::get_thread_slow(Register thread) { 9568 if (thread != rax) { 9569 push(rax); 9570 } 9571 push(rdi); 9572 push(rsi); 9573 push(rdx); 9574 push(rcx); 9575 push(r8); 9576 push(r9); 9577 push(r10); 9578 push(r11); 9579 9580 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 9581 9582 pop(r11); 9583 pop(r10); 9584 pop(r9); 9585 pop(r8); 9586 pop(rcx); 9587 pop(rdx); 9588 pop(rsi); 9589 pop(rdi); 9590 if (thread != rax) { 9591 mov(thread, rax); 9592 pop(rax); 9593 } 9594 } 9595 9596 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 9597 Label L_stack_ok; 9598 if (bias == 0) { 9599 testptr(sp, 2 * wordSize - 1); 9600 } else { 9601 // lea(tmp, Address(rsp, bias); 9602 mov(tmp, sp); 9603 addptr(tmp, bias); 9604 testptr(tmp, 2 * wordSize - 1); 9605 } 9606 jcc(Assembler::equal, L_stack_ok); 9607 block_comment(msg); 9608 stop(msg); 9609 bind(L_stack_ok); 9610 } 9611 9612 // Implements lightweight-locking. 9613 // 9614 // obj: the object to be locked 9615 // reg_rax: rax 9616 // thread: the thread which attempts to lock obj 9617 // tmp: a temporary register 9618 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register tmp, Label& slow) { 9619 Register thread = r15_thread; 9620 9621 assert(reg_rax == rax, ""); 9622 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 9623 9624 Label push; 9625 const Register top = tmp; 9626 9627 // Preload the markWord. It is important that this is the first 9628 // instruction emitted as it is part of C1's null check semantics. 9629 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9630 9631 if (UseObjectMonitorTable) { 9632 // Clear cache in case fast locking succeeds or we need to take the slow-path. 9633 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 9634 } 9635 9636 if (DiagnoseSyncOnValueBasedClasses != 0) { 9637 load_klass(tmp, obj, rscratch1); 9638 testb(Address(tmp, Klass::misc_flags_offset()), KlassFlags::_misc_is_value_based_class); 9639 jcc(Assembler::notZero, slow); 9640 } 9641 9642 // Load top. 9643 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9644 9645 // Check if the lock-stack is full. 9646 cmpl(top, LockStack::end_offset()); 9647 jcc(Assembler::greaterEqual, slow); 9648 9649 // Check for recursion. 9650 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9651 jcc(Assembler::equal, push); 9652 9653 // Check header for monitor (0b10). 9654 testptr(reg_rax, markWord::monitor_value); 9655 jcc(Assembler::notZero, slow); 9656 9657 // Try to lock. Transition lock bits 0b01 => 0b00 9658 movptr(tmp, reg_rax); 9659 andptr(tmp, ~(int32_t)markWord::unlocked_value); 9660 orptr(reg_rax, markWord::unlocked_value); 9661 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9662 jcc(Assembler::notEqual, slow); 9663 9664 // Restore top, CAS clobbers register. 9665 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9666 9667 bind(push); 9668 // After successful lock, push object on lock-stack. 9669 movptr(Address(thread, top), obj); 9670 incrementl(top, oopSize); 9671 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 9672 } 9673 9674 // Implements lightweight-unlocking. 9675 // 9676 // obj: the object to be unlocked 9677 // reg_rax: rax 9678 // thread: the thread 9679 // tmp: a temporary register 9680 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register tmp, Label& slow) { 9681 Register thread = r15_thread; 9682 9683 assert(reg_rax == rax, ""); 9684 assert_different_registers(obj, reg_rax, thread, tmp); 9685 9686 Label unlocked, push_and_slow; 9687 const Register top = tmp; 9688 9689 // Check if obj is top of lock-stack. 9690 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9691 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 9692 jcc(Assembler::notEqual, slow); 9693 9694 // Pop lock-stack. 9695 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 9696 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9697 9698 // Check if recursive. 9699 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 9700 jcc(Assembler::equal, unlocked); 9701 9702 // Not recursive. Check header for monitor (0b10). 9703 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 9704 testptr(reg_rax, markWord::monitor_value); 9705 jcc(Assembler::notZero, push_and_slow); 9706 9707 #ifdef ASSERT 9708 // Check header not unlocked (0b01). 9709 Label not_unlocked; 9710 testptr(reg_rax, markWord::unlocked_value); 9711 jcc(Assembler::zero, not_unlocked); 9712 stop("lightweight_unlock already unlocked"); 9713 bind(not_unlocked); 9714 #endif 9715 9716 // Try to unlock. Transition lock bits 0b00 => 0b01 9717 movptr(tmp, reg_rax); 9718 orptr(tmp, markWord::unlocked_value); 9719 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9720 jcc(Assembler::equal, unlocked); 9721 9722 bind(push_and_slow); 9723 // Restore lock-stack and handle the unlock in runtime. 9724 #ifdef ASSERT 9725 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 9726 movptr(Address(thread, top), obj); 9727 #endif 9728 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9729 jmp(slow); 9730 9731 bind(unlocked); 9732 } 9733 9734 // Saves legacy GPRs state on stack. 9735 void MacroAssembler::save_legacy_gprs() { 9736 subq(rsp, 16 * wordSize); 9737 movq(Address(rsp, 15 * wordSize), rax); 9738 movq(Address(rsp, 14 * wordSize), rcx); 9739 movq(Address(rsp, 13 * wordSize), rdx); 9740 movq(Address(rsp, 12 * wordSize), rbx); 9741 movq(Address(rsp, 10 * wordSize), rbp); 9742 movq(Address(rsp, 9 * wordSize), rsi); 9743 movq(Address(rsp, 8 * wordSize), rdi); 9744 movq(Address(rsp, 7 * wordSize), r8); 9745 movq(Address(rsp, 6 * wordSize), r9); 9746 movq(Address(rsp, 5 * wordSize), r10); 9747 movq(Address(rsp, 4 * wordSize), r11); 9748 movq(Address(rsp, 3 * wordSize), r12); 9749 movq(Address(rsp, 2 * wordSize), r13); 9750 movq(Address(rsp, wordSize), r14); 9751 movq(Address(rsp, 0), r15); 9752 } 9753 9754 // Resotres back legacy GPRs state from stack. 9755 void MacroAssembler::restore_legacy_gprs() { 9756 movq(r15, Address(rsp, 0)); 9757 movq(r14, Address(rsp, wordSize)); 9758 movq(r13, Address(rsp, 2 * wordSize)); 9759 movq(r12, Address(rsp, 3 * wordSize)); 9760 movq(r11, Address(rsp, 4 * wordSize)); 9761 movq(r10, Address(rsp, 5 * wordSize)); 9762 movq(r9, Address(rsp, 6 * wordSize)); 9763 movq(r8, Address(rsp, 7 * wordSize)); 9764 movq(rdi, Address(rsp, 8 * wordSize)); 9765 movq(rsi, Address(rsp, 9 * wordSize)); 9766 movq(rbp, Address(rsp, 10 * wordSize)); 9767 movq(rbx, Address(rsp, 12 * wordSize)); 9768 movq(rdx, Address(rsp, 13 * wordSize)); 9769 movq(rcx, Address(rsp, 14 * wordSize)); 9770 movq(rax, Address(rsp, 15 * wordSize)); 9771 addq(rsp, 16 * wordSize); 9772 } 9773 9774 void MacroAssembler::load_aotrc_address(Register reg, address a) { 9775 #if INCLUDE_CDS 9776 assert(AOTRuntimeConstants::contains(a), "address out of range for data area"); 9777 if (AOTCodeCache::is_on_for_dump()) { 9778 // all aotrc field addresses should be registered in the AOTCodeCache address table 9779 lea(reg, ExternalAddress(a)); 9780 } else { 9781 mov64(reg, (uint64_t)a); 9782 } 9783 #else 9784 ShouldNotReachHere(); 9785 #endif 9786 } 9787 9788 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 9789 if (VM_Version::supports_apx_f()) { 9790 esetzucc(comparison, dst); 9791 } else { 9792 setb(comparison, dst); 9793 movzbl(dst, dst); 9794 } 9795 }