1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "compiler/compiler_globals.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "crc32c.h" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/barrierSetAssembler.hpp" 33 #include "gc/shared/collectedHeap.inline.hpp" 34 #include "gc/shared/tlab_globals.hpp" 35 #include "interpreter/bytecodeHistogram.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm.h" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "oops/accessDecorators.hpp" 41 #include "oops/compressedKlass.inline.hpp" 42 #include "oops/compressedOops.inline.hpp" 43 #include "oops/klass.inline.hpp" 44 #include "prims/methodHandles.hpp" 45 #include "runtime/continuation.hpp" 46 #include "runtime/interfaceSupport.inline.hpp" 47 #include "runtime/javaThread.hpp" 48 #include "runtime/jniHandles.hpp" 49 #include "runtime/objectMonitor.hpp" 50 #include "runtime/os.hpp" 51 #include "runtime/safepoint.hpp" 52 #include "runtime/safepointMechanism.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "utilities/macros.hpp" 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #define STOP(error) stop(error) 60 #else 61 #define BLOCK_COMMENT(str) block_comment(str) 62 #define STOP(error) block_comment(error); stop(error) 63 #endif 64 65 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 66 67 #ifdef ASSERT 68 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 69 #endif 70 71 static const Assembler::Condition reverse[] = { 72 Assembler::noOverflow /* overflow = 0x0 */ , 73 Assembler::overflow /* noOverflow = 0x1 */ , 74 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 75 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 76 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 77 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 78 Assembler::above /* belowEqual = 0x6 */ , 79 Assembler::belowEqual /* above = 0x7 */ , 80 Assembler::positive /* negative = 0x8 */ , 81 Assembler::negative /* positive = 0x9 */ , 82 Assembler::noParity /* parity = 0xa */ , 83 Assembler::parity /* noParity = 0xb */ , 84 Assembler::greaterEqual /* less = 0xc */ , 85 Assembler::less /* greaterEqual = 0xd */ , 86 Assembler::greater /* lessEqual = 0xe */ , 87 Assembler::lessEqual /* greater = 0xf, */ 88 89 }; 90 91 92 // Implementation of MacroAssembler 93 94 // First all the versions that have distinct versions depending on 32/64 bit 95 // Unless the difference is trivial (1 line or so). 96 97 #ifndef _LP64 98 99 // 32bit versions 100 101 Address MacroAssembler::as_Address(AddressLiteral adr) { 102 return Address(adr.target(), adr.rspec()); 103 } 104 105 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 106 assert(rscratch == noreg, ""); 107 return Address::make_array(adr); 108 } 109 110 void MacroAssembler::call_VM_leaf_base(address entry_point, 111 int number_of_arguments) { 112 call(RuntimeAddress(entry_point)); 113 increment(rsp, number_of_arguments * wordSize); 114 } 115 116 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 117 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 118 } 119 120 121 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 122 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 123 } 124 125 void MacroAssembler::cmpoop(Address src1, jobject obj) { 126 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 127 } 128 129 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 130 assert(rscratch == noreg, "redundant"); 131 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 132 } 133 134 void MacroAssembler::extend_sign(Register hi, Register lo) { 135 // According to Intel Doc. AP-526, "Integer Divide", p.18. 136 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 137 cdql(); 138 } else { 139 movl(hi, lo); 140 sarl(hi, 31); 141 } 142 } 143 144 void MacroAssembler::jC2(Register tmp, Label& L) { 145 // set parity bit if FPU flag C2 is set (via rax) 146 save_rax(tmp); 147 fwait(); fnstsw_ax(); 148 sahf(); 149 restore_rax(tmp); 150 // branch 151 jcc(Assembler::parity, L); 152 } 153 154 void MacroAssembler::jnC2(Register tmp, Label& L) { 155 // set parity bit if FPU flag C2 is set (via rax) 156 save_rax(tmp); 157 fwait(); fnstsw_ax(); 158 sahf(); 159 restore_rax(tmp); 160 // branch 161 jcc(Assembler::noParity, L); 162 } 163 164 // 32bit can do a case table jump in one instruction but we no longer allow the base 165 // to be installed in the Address class 166 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 167 assert(rscratch == noreg, "not needed"); 168 jmp(as_Address(entry, noreg)); 169 } 170 171 // Note: y_lo will be destroyed 172 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 173 // Long compare for Java (semantics as described in JVM spec.) 174 Label high, low, done; 175 176 cmpl(x_hi, y_hi); 177 jcc(Assembler::less, low); 178 jcc(Assembler::greater, high); 179 // x_hi is the return register 180 xorl(x_hi, x_hi); 181 cmpl(x_lo, y_lo); 182 jcc(Assembler::below, low); 183 jcc(Assembler::equal, done); 184 185 bind(high); 186 xorl(x_hi, x_hi); 187 increment(x_hi); 188 jmp(done); 189 190 bind(low); 191 xorl(x_hi, x_hi); 192 decrementl(x_hi); 193 194 bind(done); 195 } 196 197 void MacroAssembler::lea(Register dst, AddressLiteral src) { 198 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 199 } 200 201 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 202 assert(rscratch == noreg, "not needed"); 203 204 // leal(dst, as_Address(adr)); 205 // see note in movl as to why we must use a move 206 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 207 } 208 209 void MacroAssembler::leave() { 210 mov(rsp, rbp); 211 pop(rbp); 212 } 213 214 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 215 // Multiplication of two Java long values stored on the stack 216 // as illustrated below. Result is in rdx:rax. 217 // 218 // rsp ---> [ ?? ] \ \ 219 // .... | y_rsp_offset | 220 // [ y_lo ] / (in bytes) | x_rsp_offset 221 // [ y_hi ] | (in bytes) 222 // .... | 223 // [ x_lo ] / 224 // [ x_hi ] 225 // .... 226 // 227 // Basic idea: lo(result) = lo(x_lo * y_lo) 228 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 229 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 230 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 231 Label quick; 232 // load x_hi, y_hi and check if quick 233 // multiplication is possible 234 movl(rbx, x_hi); 235 movl(rcx, y_hi); 236 movl(rax, rbx); 237 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 238 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 239 // do full multiplication 240 // 1st step 241 mull(y_lo); // x_hi * y_lo 242 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 243 // 2nd step 244 movl(rax, x_lo); 245 mull(rcx); // x_lo * y_hi 246 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 247 // 3rd step 248 bind(quick); // note: rbx, = 0 if quick multiply! 249 movl(rax, x_lo); 250 mull(y_lo); // x_lo * y_lo 251 addl(rdx, rbx); // correct hi(x_lo * y_lo) 252 } 253 254 void MacroAssembler::lneg(Register hi, Register lo) { 255 negl(lo); 256 adcl(hi, 0); 257 negl(hi); 258 } 259 260 void MacroAssembler::lshl(Register hi, Register lo) { 261 // Java shift left long support (semantics as described in JVM spec., p.305) 262 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 263 // shift value is in rcx ! 264 assert(hi != rcx, "must not use rcx"); 265 assert(lo != rcx, "must not use rcx"); 266 const Register s = rcx; // shift count 267 const int n = BitsPerWord; 268 Label L; 269 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 270 cmpl(s, n); // if (s < n) 271 jcc(Assembler::less, L); // else (s >= n) 272 movl(hi, lo); // x := x << n 273 xorl(lo, lo); 274 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 275 bind(L); // s (mod n) < n 276 shldl(hi, lo); // x := x << s 277 shll(lo); 278 } 279 280 281 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 282 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 283 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 284 assert(hi != rcx, "must not use rcx"); 285 assert(lo != rcx, "must not use rcx"); 286 const Register s = rcx; // shift count 287 const int n = BitsPerWord; 288 Label L; 289 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 290 cmpl(s, n); // if (s < n) 291 jcc(Assembler::less, L); // else (s >= n) 292 movl(lo, hi); // x := x >> n 293 if (sign_extension) sarl(hi, 31); 294 else xorl(hi, hi); 295 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 296 bind(L); // s (mod n) < n 297 shrdl(lo, hi); // x := x >> s 298 if (sign_extension) sarl(hi); 299 else shrl(hi); 300 } 301 302 void MacroAssembler::movoop(Register dst, jobject obj) { 303 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 304 } 305 306 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 307 assert(rscratch == noreg, "redundant"); 308 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 309 } 310 311 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 312 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 313 } 314 315 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 316 assert(rscratch == noreg, "redundant"); 317 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 318 } 319 320 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 321 if (src.is_lval()) { 322 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 323 } else { 324 movl(dst, as_Address(src)); 325 } 326 } 327 328 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 329 assert(rscratch == noreg, "redundant"); 330 movl(as_Address(dst, noreg), src); 331 } 332 333 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 334 movl(dst, as_Address(src, noreg)); 335 } 336 337 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 338 assert(rscratch == noreg, "redundant"); 339 movl(dst, src); 340 } 341 342 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 343 assert(rscratch == noreg, "redundant"); 344 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 345 } 346 347 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 348 assert(rscratch == noreg, "redundant"); 349 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 350 } 351 352 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 353 assert(rscratch == noreg, "redundant"); 354 if (src.is_lval()) { 355 push_literal32((int32_t)src.target(), src.rspec()); 356 } else { 357 pushl(as_Address(src)); 358 } 359 } 360 361 static void pass_arg0(MacroAssembler* masm, Register arg) { 362 masm->push(arg); 363 } 364 365 static void pass_arg1(MacroAssembler* masm, Register arg) { 366 masm->push(arg); 367 } 368 369 static void pass_arg2(MacroAssembler* masm, Register arg) { 370 masm->push(arg); 371 } 372 373 static void pass_arg3(MacroAssembler* masm, Register arg) { 374 masm->push(arg); 375 } 376 377 #ifndef PRODUCT 378 extern "C" void findpc(intptr_t x); 379 #endif 380 381 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 382 // In order to get locks to work, we need to fake a in_VM state 383 JavaThread* thread = JavaThread::current(); 384 JavaThreadState saved_state = thread->thread_state(); 385 thread->set_thread_state(_thread_in_vm); 386 if (ShowMessageBoxOnError) { 387 JavaThread* thread = JavaThread::current(); 388 JavaThreadState saved_state = thread->thread_state(); 389 thread->set_thread_state(_thread_in_vm); 390 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 391 ttyLocker ttyl; 392 BytecodeCounter::print(); 393 } 394 // To see where a verify_oop failed, get $ebx+40/X for this frame. 395 // This is the value of eip which points to where verify_oop will return. 396 if (os::message_box(msg, "Execution stopped, print registers?")) { 397 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 398 BREAKPOINT; 399 } 400 } 401 fatal("DEBUG MESSAGE: %s", msg); 402 } 403 404 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 405 ttyLocker ttyl; 406 DebuggingContext debugging{}; 407 tty->print_cr("eip = 0x%08x", eip); 408 #ifndef PRODUCT 409 if ((WizardMode || Verbose) && PrintMiscellaneous) { 410 tty->cr(); 411 findpc(eip); 412 tty->cr(); 413 } 414 #endif 415 #define PRINT_REG(rax) \ 416 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 417 PRINT_REG(rax); 418 PRINT_REG(rbx); 419 PRINT_REG(rcx); 420 PRINT_REG(rdx); 421 PRINT_REG(rdi); 422 PRINT_REG(rsi); 423 PRINT_REG(rbp); 424 PRINT_REG(rsp); 425 #undef PRINT_REG 426 // Print some words near top of staack. 427 int* dump_sp = (int*) rsp; 428 for (int col1 = 0; col1 < 8; col1++) { 429 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 430 os::print_location(tty, *dump_sp++); 431 } 432 for (int row = 0; row < 16; row++) { 433 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 434 for (int col = 0; col < 8; col++) { 435 tty->print(" 0x%08x", *dump_sp++); 436 } 437 tty->cr(); 438 } 439 // Print some instructions around pc: 440 Disassembler::decode((address)eip-64, (address)eip); 441 tty->print_cr("--------"); 442 Disassembler::decode((address)eip, (address)eip+32); 443 } 444 445 void MacroAssembler::stop(const char* msg) { 446 // push address of message 447 ExternalAddress message((address)msg); 448 pushptr(message.addr(), noreg); 449 { Label L; call(L, relocInfo::none); bind(L); } // push eip 450 pusha(); // push registers 451 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 452 hlt(); 453 } 454 455 void MacroAssembler::warn(const char* msg) { 456 push_CPU_state(); 457 458 // push address of message 459 ExternalAddress message((address)msg); 460 pushptr(message.addr(), noreg); 461 462 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 463 addl(rsp, wordSize); // discard argument 464 pop_CPU_state(); 465 } 466 467 void MacroAssembler::print_state() { 468 { Label L; call(L, relocInfo::none); bind(L); } // push eip 469 pusha(); // push registers 470 471 push_CPU_state(); 472 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 473 pop_CPU_state(); 474 475 popa(); 476 addl(rsp, wordSize); 477 } 478 479 #else // _LP64 480 481 // 64 bit versions 482 483 Address MacroAssembler::as_Address(AddressLiteral adr) { 484 // amd64 always does this as a pc-rel 485 // we can be absolute or disp based on the instruction type 486 // jmp/call are displacements others are absolute 487 assert(!adr.is_lval(), "must be rval"); 488 assert(reachable(adr), "must be"); 489 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 490 491 } 492 493 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 494 AddressLiteral base = adr.base(); 495 lea(rscratch, base); 496 Address index = adr.index(); 497 assert(index._disp == 0, "must not have disp"); // maybe it can? 498 Address array(rscratch, index._index, index._scale, index._disp); 499 return array; 500 } 501 502 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 503 Label L, E; 504 505 #ifdef _WIN64 506 // Windows always allocates space for it's register args 507 assert(num_args <= 4, "only register arguments supported"); 508 subq(rsp, frame::arg_reg_save_area_bytes); 509 #endif 510 511 // Align stack if necessary 512 testl(rsp, 15); 513 jcc(Assembler::zero, L); 514 515 subq(rsp, 8); 516 call(RuntimeAddress(entry_point)); 517 addq(rsp, 8); 518 jmp(E); 519 520 bind(L); 521 call(RuntimeAddress(entry_point)); 522 523 bind(E); 524 525 #ifdef _WIN64 526 // restore stack pointer 527 addq(rsp, frame::arg_reg_save_area_bytes); 528 #endif 529 530 } 531 532 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 533 assert(!src2.is_lval(), "should use cmpptr"); 534 assert(rscratch != noreg || always_reachable(src2), "missing"); 535 536 if (reachable(src2)) { 537 cmpq(src1, as_Address(src2)); 538 } else { 539 lea(rscratch, src2); 540 Assembler::cmpq(src1, Address(rscratch, 0)); 541 } 542 } 543 544 int MacroAssembler::corrected_idivq(Register reg) { 545 // Full implementation of Java ldiv and lrem; checks for special 546 // case as described in JVM spec., p.243 & p.271. The function 547 // returns the (pc) offset of the idivl instruction - may be needed 548 // for implicit exceptions. 549 // 550 // normal case special case 551 // 552 // input : rax: dividend min_long 553 // reg: divisor (may not be eax/edx) -1 554 // 555 // output: rax: quotient (= rax idiv reg) min_long 556 // rdx: remainder (= rax irem reg) 0 557 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 558 static const int64_t min_long = 0x8000000000000000; 559 Label normal_case, special_case; 560 561 // check for special case 562 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 563 jcc(Assembler::notEqual, normal_case); 564 xorl(rdx, rdx); // prepare rdx for possible special case (where 565 // remainder = 0) 566 cmpq(reg, -1); 567 jcc(Assembler::equal, special_case); 568 569 // handle normal case 570 bind(normal_case); 571 cdqq(); 572 int idivq_offset = offset(); 573 idivq(reg); 574 575 // normal and special case exit 576 bind(special_case); 577 578 return idivq_offset; 579 } 580 581 void MacroAssembler::decrementq(Register reg, int value) { 582 if (value == min_jint) { subq(reg, value); return; } 583 if (value < 0) { incrementq(reg, -value); return; } 584 if (value == 0) { ; return; } 585 if (value == 1 && UseIncDec) { decq(reg) ; return; } 586 /* else */ { subq(reg, value) ; return; } 587 } 588 589 void MacroAssembler::decrementq(Address dst, int value) { 590 if (value == min_jint) { subq(dst, value); return; } 591 if (value < 0) { incrementq(dst, -value); return; } 592 if (value == 0) { ; return; } 593 if (value == 1 && UseIncDec) { decq(dst) ; return; } 594 /* else */ { subq(dst, value) ; return; } 595 } 596 597 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 598 assert(rscratch != noreg || always_reachable(dst), "missing"); 599 600 if (reachable(dst)) { 601 incrementq(as_Address(dst)); 602 } else { 603 lea(rscratch, dst); 604 incrementq(Address(rscratch, 0)); 605 } 606 } 607 608 void MacroAssembler::incrementq(Register reg, int value) { 609 if (value == min_jint) { addq(reg, value); return; } 610 if (value < 0) { decrementq(reg, -value); return; } 611 if (value == 0) { ; return; } 612 if (value == 1 && UseIncDec) { incq(reg) ; return; } 613 /* else */ { addq(reg, value) ; return; } 614 } 615 616 void MacroAssembler::incrementq(Address dst, int value) { 617 if (value == min_jint) { addq(dst, value); return; } 618 if (value < 0) { decrementq(dst, -value); return; } 619 if (value == 0) { ; return; } 620 if (value == 1 && UseIncDec) { incq(dst) ; return; } 621 /* else */ { addq(dst, value) ; return; } 622 } 623 624 // 32bit can do a case table jump in one instruction but we no longer allow the base 625 // to be installed in the Address class 626 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 627 lea(rscratch, entry.base()); 628 Address dispatch = entry.index(); 629 assert(dispatch._base == noreg, "must be"); 630 dispatch._base = rscratch; 631 jmp(dispatch); 632 } 633 634 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 635 ShouldNotReachHere(); // 64bit doesn't use two regs 636 cmpq(x_lo, y_lo); 637 } 638 639 void MacroAssembler::lea(Register dst, AddressLiteral src) { 640 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 641 } 642 643 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 644 lea(rscratch, adr); 645 movptr(dst, rscratch); 646 } 647 648 void MacroAssembler::leave() { 649 // %%% is this really better? Why not on 32bit too? 650 emit_int8((unsigned char)0xC9); // LEAVE 651 } 652 653 void MacroAssembler::lneg(Register hi, Register lo) { 654 ShouldNotReachHere(); // 64bit doesn't use two regs 655 negq(lo); 656 } 657 658 void MacroAssembler::movoop(Register dst, jobject obj) { 659 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 660 } 661 662 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 663 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 664 movq(dst, rscratch); 665 } 666 667 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 668 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 669 } 670 671 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 672 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 673 movq(dst, rscratch); 674 } 675 676 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 677 if (src.is_lval()) { 678 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 679 } else { 680 if (reachable(src)) { 681 movq(dst, as_Address(src)); 682 } else { 683 lea(dst, src); 684 movq(dst, Address(dst, 0)); 685 } 686 } 687 } 688 689 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 690 movq(as_Address(dst, rscratch), src); 691 } 692 693 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 694 movq(dst, as_Address(src, dst /*rscratch*/)); 695 } 696 697 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 698 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 699 if (is_simm32(src)) { 700 movptr(dst, checked_cast<int32_t>(src)); 701 } else { 702 mov64(rscratch, src); 703 movq(dst, rscratch); 704 } 705 } 706 707 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 708 movoop(rscratch, obj); 709 push(rscratch); 710 } 711 712 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 713 mov_metadata(rscratch, obj); 714 push(rscratch); 715 } 716 717 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 718 lea(rscratch, src); 719 if (src.is_lval()) { 720 push(rscratch); 721 } else { 722 pushq(Address(rscratch, 0)); 723 } 724 } 725 726 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 727 reset_last_Java_frame(r15_thread, clear_fp); 728 } 729 730 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 731 Register last_java_fp, 732 address last_java_pc, 733 Register rscratch) { 734 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 735 } 736 737 static void pass_arg0(MacroAssembler* masm, Register arg) { 738 if (c_rarg0 != arg ) { 739 masm->mov(c_rarg0, arg); 740 } 741 } 742 743 static void pass_arg1(MacroAssembler* masm, Register arg) { 744 if (c_rarg1 != arg ) { 745 masm->mov(c_rarg1, arg); 746 } 747 } 748 749 static void pass_arg2(MacroAssembler* masm, Register arg) { 750 if (c_rarg2 != arg ) { 751 masm->mov(c_rarg2, arg); 752 } 753 } 754 755 static void pass_arg3(MacroAssembler* masm, Register arg) { 756 if (c_rarg3 != arg ) { 757 masm->mov(c_rarg3, arg); 758 } 759 } 760 761 void MacroAssembler::stop(const char* msg) { 762 if (ShowMessageBoxOnError) { 763 address rip = pc(); 764 pusha(); // get regs on stack 765 lea(c_rarg1, InternalAddress(rip)); 766 movq(c_rarg2, rsp); // pass pointer to regs array 767 } 768 lea(c_rarg0, ExternalAddress((address) msg)); 769 andq(rsp, -16); // align stack as required by ABI 770 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 771 hlt(); 772 } 773 774 void MacroAssembler::warn(const char* msg) { 775 push(rbp); 776 movq(rbp, rsp); 777 andq(rsp, -16); // align stack as required by push_CPU_state and call 778 push_CPU_state(); // keeps alignment at 16 bytes 779 780 lea(c_rarg0, ExternalAddress((address) msg)); 781 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 782 783 pop_CPU_state(); 784 mov(rsp, rbp); 785 pop(rbp); 786 } 787 788 void MacroAssembler::print_state() { 789 address rip = pc(); 790 pusha(); // get regs on stack 791 push(rbp); 792 movq(rbp, rsp); 793 andq(rsp, -16); // align stack as required by push_CPU_state and call 794 push_CPU_state(); // keeps alignment at 16 bytes 795 796 lea(c_rarg0, InternalAddress(rip)); 797 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 798 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 799 800 pop_CPU_state(); 801 mov(rsp, rbp); 802 pop(rbp); 803 popa(); 804 } 805 806 #ifndef PRODUCT 807 extern "C" void findpc(intptr_t x); 808 #endif 809 810 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 811 // In order to get locks to work, we need to fake a in_VM state 812 if (ShowMessageBoxOnError) { 813 JavaThread* thread = JavaThread::current(); 814 JavaThreadState saved_state = thread->thread_state(); 815 thread->set_thread_state(_thread_in_vm); 816 #ifndef PRODUCT 817 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 818 ttyLocker ttyl; 819 BytecodeCounter::print(); 820 } 821 #endif 822 // To see where a verify_oop failed, get $ebx+40/X for this frame. 823 // XXX correct this offset for amd64 824 // This is the value of eip which points to where verify_oop will return. 825 if (os::message_box(msg, "Execution stopped, print registers?")) { 826 print_state64(pc, regs); 827 BREAKPOINT; 828 } 829 } 830 fatal("DEBUG MESSAGE: %s", msg); 831 } 832 833 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 834 ttyLocker ttyl; 835 DebuggingContext debugging{}; 836 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 837 #ifndef PRODUCT 838 tty->cr(); 839 findpc(pc); 840 tty->cr(); 841 #endif 842 #define PRINT_REG(rax, value) \ 843 { tty->print("%s = ", #rax); os::print_location(tty, value); } 844 PRINT_REG(rax, regs[15]); 845 PRINT_REG(rbx, regs[12]); 846 PRINT_REG(rcx, regs[14]); 847 PRINT_REG(rdx, regs[13]); 848 PRINT_REG(rdi, regs[8]); 849 PRINT_REG(rsi, regs[9]); 850 PRINT_REG(rbp, regs[10]); 851 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 852 PRINT_REG(rsp, (intptr_t)(®s[16])); 853 PRINT_REG(r8 , regs[7]); 854 PRINT_REG(r9 , regs[6]); 855 PRINT_REG(r10, regs[5]); 856 PRINT_REG(r11, regs[4]); 857 PRINT_REG(r12, regs[3]); 858 PRINT_REG(r13, regs[2]); 859 PRINT_REG(r14, regs[1]); 860 PRINT_REG(r15, regs[0]); 861 #undef PRINT_REG 862 // Print some words near the top of the stack. 863 int64_t* rsp = ®s[16]; 864 int64_t* dump_sp = rsp; 865 for (int col1 = 0; col1 < 8; col1++) { 866 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 867 os::print_location(tty, *dump_sp++); 868 } 869 for (int row = 0; row < 25; row++) { 870 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 871 for (int col = 0; col < 4; col++) { 872 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 873 } 874 tty->cr(); 875 } 876 // Print some instructions around pc: 877 Disassembler::decode((address)pc-64, (address)pc); 878 tty->print_cr("--------"); 879 Disassembler::decode((address)pc, (address)pc+32); 880 } 881 882 // The java_calling_convention describes stack locations as ideal slots on 883 // a frame with no abi restrictions. Since we must observe abi restrictions 884 // (like the placement of the register window) the slots must be biased by 885 // the following value. 886 static int reg2offset_in(VMReg r) { 887 // Account for saved rbp and return address 888 // This should really be in_preserve_stack_slots 889 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 890 } 891 892 static int reg2offset_out(VMReg r) { 893 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 894 } 895 896 // A long move 897 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 898 899 // The calling conventions assures us that each VMregpair is either 900 // all really one physical register or adjacent stack slots. 901 902 if (src.is_single_phys_reg() ) { 903 if (dst.is_single_phys_reg()) { 904 if (dst.first() != src.first()) { 905 mov(dst.first()->as_Register(), src.first()->as_Register()); 906 } 907 } else { 908 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 909 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 910 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 911 } 912 } else if (dst.is_single_phys_reg()) { 913 assert(src.is_single_reg(), "not a stack pair"); 914 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 915 } else { 916 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 917 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 918 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 919 } 920 } 921 922 // A double move 923 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 924 925 // The calling conventions assures us that each VMregpair is either 926 // all really one physical register or adjacent stack slots. 927 928 if (src.is_single_phys_reg() ) { 929 if (dst.is_single_phys_reg()) { 930 // In theory these overlap but the ordering is such that this is likely a nop 931 if ( src.first() != dst.first()) { 932 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 933 } 934 } else { 935 assert(dst.is_single_reg(), "not a stack pair"); 936 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 937 } 938 } else if (dst.is_single_phys_reg()) { 939 assert(src.is_single_reg(), "not a stack pair"); 940 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 941 } else { 942 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 943 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 944 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 945 } 946 } 947 948 949 // A float arg may have to do float reg int reg conversion 950 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 951 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 952 953 // The calling conventions assures us that each VMregpair is either 954 // all really one physical register or adjacent stack slots. 955 956 if (src.first()->is_stack()) { 957 if (dst.first()->is_stack()) { 958 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 959 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 960 } else { 961 // stack to reg 962 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 963 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 964 } 965 } else if (dst.first()->is_stack()) { 966 // reg to stack 967 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 968 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 969 } else { 970 // reg to reg 971 // In theory these overlap but the ordering is such that this is likely a nop 972 if ( src.first() != dst.first()) { 973 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 974 } 975 } 976 } 977 978 // On 64 bit we will store integer like items to the stack as 979 // 64 bits items (x86_32/64 abi) even though java would only store 980 // 32bits for a parameter. On 32bit it will simply be 32 bits 981 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 982 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 983 if (src.first()->is_stack()) { 984 if (dst.first()->is_stack()) { 985 // stack to stack 986 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 987 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 988 } else { 989 // stack to reg 990 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 991 } 992 } else if (dst.first()->is_stack()) { 993 // reg to stack 994 // Do we really have to sign extend??? 995 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 996 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 997 } else { 998 // Do we really have to sign extend??? 999 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1000 if (dst.first() != src.first()) { 1001 movq(dst.first()->as_Register(), src.first()->as_Register()); 1002 } 1003 } 1004 } 1005 1006 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1007 if (src.first()->is_stack()) { 1008 if (dst.first()->is_stack()) { 1009 // stack to stack 1010 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1011 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1012 } else { 1013 // stack to reg 1014 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1015 } 1016 } else if (dst.first()->is_stack()) { 1017 // reg to stack 1018 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1019 } else { 1020 if (dst.first() != src.first()) { 1021 movq(dst.first()->as_Register(), src.first()->as_Register()); 1022 } 1023 } 1024 } 1025 1026 // An oop arg. Must pass a handle not the oop itself 1027 void MacroAssembler::object_move(OopMap* map, 1028 int oop_handle_offset, 1029 int framesize_in_slots, 1030 VMRegPair src, 1031 VMRegPair dst, 1032 bool is_receiver, 1033 int* receiver_offset) { 1034 1035 // must pass a handle. First figure out the location we use as a handle 1036 1037 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1038 1039 // See if oop is null if it is we need no handle 1040 1041 if (src.first()->is_stack()) { 1042 1043 // Oop is already on the stack as an argument 1044 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1045 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1046 if (is_receiver) { 1047 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1048 } 1049 1050 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1051 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1052 // conditionally move a null 1053 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1054 } else { 1055 1056 // Oop is in a register we must store it to the space we reserve 1057 // on the stack for oop_handles and pass a handle if oop is non-null 1058 1059 const Register rOop = src.first()->as_Register(); 1060 int oop_slot; 1061 if (rOop == j_rarg0) 1062 oop_slot = 0; 1063 else if (rOop == j_rarg1) 1064 oop_slot = 1; 1065 else if (rOop == j_rarg2) 1066 oop_slot = 2; 1067 else if (rOop == j_rarg3) 1068 oop_slot = 3; 1069 else if (rOop == j_rarg4) 1070 oop_slot = 4; 1071 else { 1072 assert(rOop == j_rarg5, "wrong register"); 1073 oop_slot = 5; 1074 } 1075 1076 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1077 int offset = oop_slot*VMRegImpl::stack_slot_size; 1078 1079 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1080 // Store oop in handle area, may be null 1081 movptr(Address(rsp, offset), rOop); 1082 if (is_receiver) { 1083 *receiver_offset = offset; 1084 } 1085 1086 cmpptr(rOop, NULL_WORD); 1087 lea(rHandle, Address(rsp, offset)); 1088 // conditionally move a null from the handle area where it was just stored 1089 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1090 } 1091 1092 // If arg is on the stack then place it otherwise it is already in correct reg. 1093 if (dst.first()->is_stack()) { 1094 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1095 } 1096 } 1097 1098 #endif // _LP64 1099 1100 // Now versions that are common to 32/64 bit 1101 1102 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1103 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1104 } 1105 1106 void MacroAssembler::addptr(Register dst, Register src) { 1107 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1108 } 1109 1110 void MacroAssembler::addptr(Address dst, Register src) { 1111 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1112 } 1113 1114 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1115 assert(rscratch != noreg || always_reachable(src), "missing"); 1116 1117 if (reachable(src)) { 1118 Assembler::addsd(dst, as_Address(src)); 1119 } else { 1120 lea(rscratch, src); 1121 Assembler::addsd(dst, Address(rscratch, 0)); 1122 } 1123 } 1124 1125 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1126 assert(rscratch != noreg || always_reachable(src), "missing"); 1127 1128 if (reachable(src)) { 1129 addss(dst, as_Address(src)); 1130 } else { 1131 lea(rscratch, src); 1132 addss(dst, Address(rscratch, 0)); 1133 } 1134 } 1135 1136 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1137 assert(rscratch != noreg || always_reachable(src), "missing"); 1138 1139 if (reachable(src)) { 1140 Assembler::addpd(dst, as_Address(src)); 1141 } else { 1142 lea(rscratch, src); 1143 Assembler::addpd(dst, Address(rscratch, 0)); 1144 } 1145 } 1146 1147 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1148 // Stub code is generated once and never copied. 1149 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1150 void MacroAssembler::align64() { 1151 align(64, (unsigned long long) pc()); 1152 } 1153 1154 void MacroAssembler::align32() { 1155 align(32, (unsigned long long) pc()); 1156 } 1157 1158 void MacroAssembler::align(int modulus) { 1159 // 8273459: Ensure alignment is possible with current segment alignment 1160 assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1161 align(modulus, offset()); 1162 } 1163 1164 void MacroAssembler::align(int modulus, int target) { 1165 if (target % modulus != 0) { 1166 nop(modulus - (target % modulus)); 1167 } 1168 } 1169 1170 void MacroAssembler::push_f(XMMRegister r) { 1171 subptr(rsp, wordSize); 1172 movflt(Address(rsp, 0), r); 1173 } 1174 1175 void MacroAssembler::pop_f(XMMRegister r) { 1176 movflt(r, Address(rsp, 0)); 1177 addptr(rsp, wordSize); 1178 } 1179 1180 void MacroAssembler::push_d(XMMRegister r) { 1181 subptr(rsp, 2 * wordSize); 1182 movdbl(Address(rsp, 0), r); 1183 } 1184 1185 void MacroAssembler::pop_d(XMMRegister r) { 1186 movdbl(r, Address(rsp, 0)); 1187 addptr(rsp, 2 * Interpreter::stackElementSize); 1188 } 1189 1190 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1191 // Used in sign-masking with aligned address. 1192 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1193 assert(rscratch != noreg || always_reachable(src), "missing"); 1194 1195 if (reachable(src)) { 1196 Assembler::andpd(dst, as_Address(src)); 1197 } else { 1198 lea(rscratch, src); 1199 Assembler::andpd(dst, Address(rscratch, 0)); 1200 } 1201 } 1202 1203 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1204 // Used in sign-masking with aligned address. 1205 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1206 assert(rscratch != noreg || always_reachable(src), "missing"); 1207 1208 if (reachable(src)) { 1209 Assembler::andps(dst, as_Address(src)); 1210 } else { 1211 lea(rscratch, src); 1212 Assembler::andps(dst, Address(rscratch, 0)); 1213 } 1214 } 1215 1216 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1217 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1218 } 1219 1220 #ifdef _LP64 1221 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1222 assert(rscratch != noreg || always_reachable(src), "missing"); 1223 1224 if (reachable(src)) { 1225 andq(dst, as_Address(src)); 1226 } else { 1227 lea(rscratch, src); 1228 andq(dst, Address(rscratch, 0)); 1229 } 1230 } 1231 #endif 1232 1233 void MacroAssembler::atomic_incl(Address counter_addr) { 1234 lock(); 1235 incrementl(counter_addr); 1236 } 1237 1238 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1239 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1240 1241 if (reachable(counter_addr)) { 1242 atomic_incl(as_Address(counter_addr)); 1243 } else { 1244 lea(rscratch, counter_addr); 1245 atomic_incl(Address(rscratch, 0)); 1246 } 1247 } 1248 1249 #ifdef _LP64 1250 void MacroAssembler::atomic_incq(Address counter_addr) { 1251 lock(); 1252 incrementq(counter_addr); 1253 } 1254 1255 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1256 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1257 1258 if (reachable(counter_addr)) { 1259 atomic_incq(as_Address(counter_addr)); 1260 } else { 1261 lea(rscratch, counter_addr); 1262 atomic_incq(Address(rscratch, 0)); 1263 } 1264 } 1265 #endif 1266 1267 // Writes to stack successive pages until offset reached to check for 1268 // stack overflow + shadow pages. This clobbers tmp. 1269 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1270 movptr(tmp, rsp); 1271 // Bang stack for total size given plus shadow page size. 1272 // Bang one page at a time because large size can bang beyond yellow and 1273 // red zones. 1274 Label loop; 1275 bind(loop); 1276 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1277 subptr(tmp, (int)os::vm_page_size()); 1278 subl(size, (int)os::vm_page_size()); 1279 jcc(Assembler::greater, loop); 1280 1281 // Bang down shadow pages too. 1282 // At this point, (tmp-0) is the last address touched, so don't 1283 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1284 // was post-decremented.) Skip this address by starting at i=1, and 1285 // touch a few more pages below. N.B. It is important to touch all 1286 // the way down including all pages in the shadow zone. 1287 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1288 // this could be any sized move but this is can be a debugging crumb 1289 // so the bigger the better. 1290 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1291 } 1292 } 1293 1294 void MacroAssembler::reserved_stack_check() { 1295 // testing if reserved zone needs to be enabled 1296 Label no_reserved_zone_enabling; 1297 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1298 NOT_LP64(get_thread(rsi);) 1299 1300 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1301 jcc(Assembler::below, no_reserved_zone_enabling); 1302 1303 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1304 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1305 should_not_reach_here(); 1306 1307 bind(no_reserved_zone_enabling); 1308 } 1309 1310 void MacroAssembler::c2bool(Register x) { 1311 // implements x == 0 ? 0 : 1 1312 // note: must only look at least-significant byte of x 1313 // since C-style booleans are stored in one byte 1314 // only! (was bug) 1315 andl(x, 0xFF); 1316 setb(Assembler::notZero, x); 1317 } 1318 1319 // Wouldn't need if AddressLiteral version had new name 1320 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1321 Assembler::call(L, rtype); 1322 } 1323 1324 void MacroAssembler::call(Register entry) { 1325 Assembler::call(entry); 1326 } 1327 1328 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1329 assert(rscratch != noreg || always_reachable(entry), "missing"); 1330 1331 if (reachable(entry)) { 1332 Assembler::call_literal(entry.target(), entry.rspec()); 1333 } else { 1334 lea(rscratch, entry); 1335 Assembler::call(rscratch); 1336 } 1337 } 1338 1339 void MacroAssembler::ic_call(address entry, jint method_index) { 1340 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1341 movptr(rax, (intptr_t)Universe::non_oop_word()); 1342 call(AddressLiteral(entry, rh)); 1343 } 1344 1345 void MacroAssembler::emit_static_call_stub() { 1346 // Static stub relocation also tags the Method* in the code-stream. 1347 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1348 // This is recognized as unresolved by relocs/nativeinst/ic code. 1349 jump(RuntimeAddress(pc())); 1350 } 1351 1352 // Implementation of call_VM versions 1353 1354 void MacroAssembler::call_VM(Register oop_result, 1355 address entry_point, 1356 bool check_exceptions) { 1357 Label C, E; 1358 call(C, relocInfo::none); 1359 jmp(E); 1360 1361 bind(C); 1362 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1363 ret(0); 1364 1365 bind(E); 1366 } 1367 1368 void MacroAssembler::call_VM(Register oop_result, 1369 address entry_point, 1370 Register arg_1, 1371 bool check_exceptions) { 1372 Label C, E; 1373 call(C, relocInfo::none); 1374 jmp(E); 1375 1376 bind(C); 1377 pass_arg1(this, arg_1); 1378 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1379 ret(0); 1380 1381 bind(E); 1382 } 1383 1384 void MacroAssembler::call_VM(Register oop_result, 1385 address entry_point, 1386 Register arg_1, 1387 Register arg_2, 1388 bool check_exceptions) { 1389 Label C, E; 1390 call(C, relocInfo::none); 1391 jmp(E); 1392 1393 bind(C); 1394 1395 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1396 1397 pass_arg2(this, arg_2); 1398 pass_arg1(this, arg_1); 1399 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1400 ret(0); 1401 1402 bind(E); 1403 } 1404 1405 void MacroAssembler::call_VM(Register oop_result, 1406 address entry_point, 1407 Register arg_1, 1408 Register arg_2, 1409 Register arg_3, 1410 bool check_exceptions) { 1411 Label C, E; 1412 call(C, relocInfo::none); 1413 jmp(E); 1414 1415 bind(C); 1416 1417 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1418 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1419 pass_arg3(this, arg_3); 1420 pass_arg2(this, arg_2); 1421 pass_arg1(this, arg_1); 1422 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1423 ret(0); 1424 1425 bind(E); 1426 } 1427 1428 void MacroAssembler::call_VM(Register oop_result, 1429 Register last_java_sp, 1430 address entry_point, 1431 int number_of_arguments, 1432 bool check_exceptions) { 1433 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1434 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1435 } 1436 1437 void MacroAssembler::call_VM(Register oop_result, 1438 Register last_java_sp, 1439 address entry_point, 1440 Register arg_1, 1441 bool check_exceptions) { 1442 pass_arg1(this, arg_1); 1443 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1444 } 1445 1446 void MacroAssembler::call_VM(Register oop_result, 1447 Register last_java_sp, 1448 address entry_point, 1449 Register arg_1, 1450 Register arg_2, 1451 bool check_exceptions) { 1452 1453 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1454 pass_arg2(this, arg_2); 1455 pass_arg1(this, arg_1); 1456 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1457 } 1458 1459 void MacroAssembler::call_VM(Register oop_result, 1460 Register last_java_sp, 1461 address entry_point, 1462 Register arg_1, 1463 Register arg_2, 1464 Register arg_3, 1465 bool check_exceptions) { 1466 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1467 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1468 pass_arg3(this, arg_3); 1469 pass_arg2(this, arg_2); 1470 pass_arg1(this, arg_1); 1471 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1472 } 1473 1474 void MacroAssembler::super_call_VM(Register oop_result, 1475 Register last_java_sp, 1476 address entry_point, 1477 int number_of_arguments, 1478 bool check_exceptions) { 1479 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1480 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1481 } 1482 1483 void MacroAssembler::super_call_VM(Register oop_result, 1484 Register last_java_sp, 1485 address entry_point, 1486 Register arg_1, 1487 bool check_exceptions) { 1488 pass_arg1(this, arg_1); 1489 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1490 } 1491 1492 void MacroAssembler::super_call_VM(Register oop_result, 1493 Register last_java_sp, 1494 address entry_point, 1495 Register arg_1, 1496 Register arg_2, 1497 bool check_exceptions) { 1498 1499 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1500 pass_arg2(this, arg_2); 1501 pass_arg1(this, arg_1); 1502 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1503 } 1504 1505 void MacroAssembler::super_call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 Register arg_3, 1511 bool check_exceptions) { 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1513 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1514 pass_arg3(this, arg_3); 1515 pass_arg2(this, arg_2); 1516 pass_arg1(this, arg_1); 1517 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1518 } 1519 1520 void MacroAssembler::call_VM_base(Register oop_result, 1521 Register java_thread, 1522 Register last_java_sp, 1523 address entry_point, 1524 int number_of_arguments, 1525 bool check_exceptions) { 1526 // determine java_thread register 1527 if (!java_thread->is_valid()) { 1528 #ifdef _LP64 1529 java_thread = r15_thread; 1530 #else 1531 java_thread = rdi; 1532 get_thread(java_thread); 1533 #endif // LP64 1534 } 1535 // determine last_java_sp register 1536 if (!last_java_sp->is_valid()) { 1537 last_java_sp = rsp; 1538 } 1539 // debugging support 1540 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1541 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1542 #ifdef ASSERT 1543 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1544 // r12 is the heapbase. 1545 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1546 #endif // ASSERT 1547 1548 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1549 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1550 1551 // push java thread (becomes first argument of C function) 1552 1553 NOT_LP64(push(java_thread); number_of_arguments++); 1554 LP64_ONLY(mov(c_rarg0, r15_thread)); 1555 1556 // set last Java frame before call 1557 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1558 1559 // Only interpreter should have to set fp 1560 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1561 1562 // do the call, remove parameters 1563 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1564 1565 // restore the thread (cannot use the pushed argument since arguments 1566 // may be overwritten by C code generated by an optimizing compiler); 1567 // however can use the register value directly if it is callee saved. 1568 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1569 // rdi & rsi (also r15) are callee saved -> nothing to do 1570 #ifdef ASSERT 1571 guarantee(java_thread != rax, "change this code"); 1572 push(rax); 1573 { Label L; 1574 get_thread(rax); 1575 cmpptr(java_thread, rax); 1576 jcc(Assembler::equal, L); 1577 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1578 bind(L); 1579 } 1580 pop(rax); 1581 #endif 1582 } else { 1583 get_thread(java_thread); 1584 } 1585 // reset last Java frame 1586 // Only interpreter should have to clear fp 1587 reset_last_Java_frame(java_thread, true); 1588 1589 // C++ interp handles this in the interpreter 1590 check_and_handle_popframe(java_thread); 1591 check_and_handle_earlyret(java_thread); 1592 1593 if (check_exceptions) { 1594 // check for pending exceptions (java_thread is set upon return) 1595 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1596 #ifndef _LP64 1597 jump_cc(Assembler::notEqual, 1598 RuntimeAddress(StubRoutines::forward_exception_entry())); 1599 #else 1600 // This used to conditionally jump to forward_exception however it is 1601 // possible if we relocate that the branch will not reach. So we must jump 1602 // around so we can always reach 1603 1604 Label ok; 1605 jcc(Assembler::equal, ok); 1606 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1607 bind(ok); 1608 #endif // LP64 1609 } 1610 1611 // get oop result if there is one and reset the value in the thread 1612 if (oop_result->is_valid()) { 1613 get_vm_result(oop_result, java_thread); 1614 } 1615 } 1616 1617 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1618 1619 // Calculate the value for last_Java_sp 1620 // somewhat subtle. call_VM does an intermediate call 1621 // which places a return address on the stack just under the 1622 // stack pointer as the user finished with it. This allows 1623 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1624 // On 32bit we then have to push additional args on the stack to accomplish 1625 // the actual requested call. On 64bit call_VM only can use register args 1626 // so the only extra space is the return address that call_VM created. 1627 // This hopefully explains the calculations here. 1628 1629 #ifdef _LP64 1630 // We've pushed one address, correct last_Java_sp 1631 lea(rax, Address(rsp, wordSize)); 1632 #else 1633 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1634 #endif // LP64 1635 1636 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1637 1638 } 1639 1640 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1641 void MacroAssembler::call_VM_leaf0(address entry_point) { 1642 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1643 } 1644 1645 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1646 call_VM_leaf_base(entry_point, number_of_arguments); 1647 } 1648 1649 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1650 pass_arg0(this, arg_0); 1651 call_VM_leaf(entry_point, 1); 1652 } 1653 1654 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1655 1656 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1657 pass_arg1(this, arg_1); 1658 pass_arg0(this, arg_0); 1659 call_VM_leaf(entry_point, 2); 1660 } 1661 1662 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1663 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1664 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1665 pass_arg2(this, arg_2); 1666 pass_arg1(this, arg_1); 1667 pass_arg0(this, arg_0); 1668 call_VM_leaf(entry_point, 3); 1669 } 1670 1671 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1672 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1673 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1674 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1675 pass_arg3(this, arg_3); 1676 pass_arg2(this, arg_2); 1677 pass_arg1(this, arg_1); 1678 pass_arg0(this, arg_0); 1679 call_VM_leaf(entry_point, 3); 1680 } 1681 1682 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1683 pass_arg0(this, arg_0); 1684 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1685 } 1686 1687 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1688 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1689 pass_arg1(this, arg_1); 1690 pass_arg0(this, arg_0); 1691 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1692 } 1693 1694 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1695 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1696 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1697 pass_arg2(this, arg_2); 1698 pass_arg1(this, arg_1); 1699 pass_arg0(this, arg_0); 1700 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1701 } 1702 1703 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1704 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1705 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1706 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1707 pass_arg3(this, arg_3); 1708 pass_arg2(this, arg_2); 1709 pass_arg1(this, arg_1); 1710 pass_arg0(this, arg_0); 1711 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1712 } 1713 1714 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1715 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1716 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1717 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1718 } 1719 1720 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1721 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1722 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1723 } 1724 1725 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1726 } 1727 1728 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1729 } 1730 1731 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1732 assert(rscratch != noreg || always_reachable(src1), "missing"); 1733 1734 if (reachable(src1)) { 1735 cmpl(as_Address(src1), imm); 1736 } else { 1737 lea(rscratch, src1); 1738 cmpl(Address(rscratch, 0), imm); 1739 } 1740 } 1741 1742 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1743 assert(!src2.is_lval(), "use cmpptr"); 1744 assert(rscratch != noreg || always_reachable(src2), "missing"); 1745 1746 if (reachable(src2)) { 1747 cmpl(src1, as_Address(src2)); 1748 } else { 1749 lea(rscratch, src2); 1750 cmpl(src1, Address(rscratch, 0)); 1751 } 1752 } 1753 1754 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1755 Assembler::cmpl(src1, imm); 1756 } 1757 1758 void MacroAssembler::cmp32(Register src1, Address src2) { 1759 Assembler::cmpl(src1, src2); 1760 } 1761 1762 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1763 ucomisd(opr1, opr2); 1764 1765 Label L; 1766 if (unordered_is_less) { 1767 movl(dst, -1); 1768 jcc(Assembler::parity, L); 1769 jcc(Assembler::below , L); 1770 movl(dst, 0); 1771 jcc(Assembler::equal , L); 1772 increment(dst); 1773 } else { // unordered is greater 1774 movl(dst, 1); 1775 jcc(Assembler::parity, L); 1776 jcc(Assembler::above , L); 1777 movl(dst, 0); 1778 jcc(Assembler::equal , L); 1779 decrementl(dst); 1780 } 1781 bind(L); 1782 } 1783 1784 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1785 ucomiss(opr1, opr2); 1786 1787 Label L; 1788 if (unordered_is_less) { 1789 movl(dst, -1); 1790 jcc(Assembler::parity, L); 1791 jcc(Assembler::below , L); 1792 movl(dst, 0); 1793 jcc(Assembler::equal , L); 1794 increment(dst); 1795 } else { // unordered is greater 1796 movl(dst, 1); 1797 jcc(Assembler::parity, L); 1798 jcc(Assembler::above , L); 1799 movl(dst, 0); 1800 jcc(Assembler::equal , L); 1801 decrementl(dst); 1802 } 1803 bind(L); 1804 } 1805 1806 1807 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1808 assert(rscratch != noreg || always_reachable(src1), "missing"); 1809 1810 if (reachable(src1)) { 1811 cmpb(as_Address(src1), imm); 1812 } else { 1813 lea(rscratch, src1); 1814 cmpb(Address(rscratch, 0), imm); 1815 } 1816 } 1817 1818 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1819 #ifdef _LP64 1820 assert(rscratch != noreg || always_reachable(src2), "missing"); 1821 1822 if (src2.is_lval()) { 1823 movptr(rscratch, src2); 1824 Assembler::cmpq(src1, rscratch); 1825 } else if (reachable(src2)) { 1826 cmpq(src1, as_Address(src2)); 1827 } else { 1828 lea(rscratch, src2); 1829 Assembler::cmpq(src1, Address(rscratch, 0)); 1830 } 1831 #else 1832 assert(rscratch == noreg, "not needed"); 1833 if (src2.is_lval()) { 1834 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1835 } else { 1836 cmpl(src1, as_Address(src2)); 1837 } 1838 #endif // _LP64 1839 } 1840 1841 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1842 assert(src2.is_lval(), "not a mem-mem compare"); 1843 #ifdef _LP64 1844 // moves src2's literal address 1845 movptr(rscratch, src2); 1846 Assembler::cmpq(src1, rscratch); 1847 #else 1848 assert(rscratch == noreg, "not needed"); 1849 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1850 #endif // _LP64 1851 } 1852 1853 void MacroAssembler::cmpoop(Register src1, Register src2) { 1854 cmpptr(src1, src2); 1855 } 1856 1857 void MacroAssembler::cmpoop(Register src1, Address src2) { 1858 cmpptr(src1, src2); 1859 } 1860 1861 #ifdef _LP64 1862 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1863 movoop(rscratch, src2); 1864 cmpptr(src1, rscratch); 1865 } 1866 #endif 1867 1868 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1869 assert(rscratch != noreg || always_reachable(adr), "missing"); 1870 1871 if (reachable(adr)) { 1872 lock(); 1873 cmpxchgptr(reg, as_Address(adr)); 1874 } else { 1875 lea(rscratch, adr); 1876 lock(); 1877 cmpxchgptr(reg, Address(rscratch, 0)); 1878 } 1879 } 1880 1881 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1882 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1883 } 1884 1885 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1886 assert(rscratch != noreg || always_reachable(src), "missing"); 1887 1888 if (reachable(src)) { 1889 Assembler::comisd(dst, as_Address(src)); 1890 } else { 1891 lea(rscratch, src); 1892 Assembler::comisd(dst, Address(rscratch, 0)); 1893 } 1894 } 1895 1896 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1897 assert(rscratch != noreg || always_reachable(src), "missing"); 1898 1899 if (reachable(src)) { 1900 Assembler::comiss(dst, as_Address(src)); 1901 } else { 1902 lea(rscratch, src); 1903 Assembler::comiss(dst, Address(rscratch, 0)); 1904 } 1905 } 1906 1907 1908 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1909 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1910 1911 Condition negated_cond = negate_condition(cond); 1912 Label L; 1913 jcc(negated_cond, L); 1914 pushf(); // Preserve flags 1915 atomic_incl(counter_addr, rscratch); 1916 popf(); 1917 bind(L); 1918 } 1919 1920 int MacroAssembler::corrected_idivl(Register reg) { 1921 // Full implementation of Java idiv and irem; checks for 1922 // special case as described in JVM spec., p.243 & p.271. 1923 // The function returns the (pc) offset of the idivl 1924 // instruction - may be needed for implicit exceptions. 1925 // 1926 // normal case special case 1927 // 1928 // input : rax,: dividend min_int 1929 // reg: divisor (may not be rax,/rdx) -1 1930 // 1931 // output: rax,: quotient (= rax, idiv reg) min_int 1932 // rdx: remainder (= rax, irem reg) 0 1933 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1934 const int min_int = 0x80000000; 1935 Label normal_case, special_case; 1936 1937 // check for special case 1938 cmpl(rax, min_int); 1939 jcc(Assembler::notEqual, normal_case); 1940 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1941 cmpl(reg, -1); 1942 jcc(Assembler::equal, special_case); 1943 1944 // handle normal case 1945 bind(normal_case); 1946 cdql(); 1947 int idivl_offset = offset(); 1948 idivl(reg); 1949 1950 // normal and special case exit 1951 bind(special_case); 1952 1953 return idivl_offset; 1954 } 1955 1956 1957 1958 void MacroAssembler::decrementl(Register reg, int value) { 1959 if (value == min_jint) {subl(reg, value) ; return; } 1960 if (value < 0) { incrementl(reg, -value); return; } 1961 if (value == 0) { ; return; } 1962 if (value == 1 && UseIncDec) { decl(reg) ; return; } 1963 /* else */ { subl(reg, value) ; return; } 1964 } 1965 1966 void MacroAssembler::decrementl(Address dst, int value) { 1967 if (value == min_jint) {subl(dst, value) ; return; } 1968 if (value < 0) { incrementl(dst, -value); return; } 1969 if (value == 0) { ; return; } 1970 if (value == 1 && UseIncDec) { decl(dst) ; return; } 1971 /* else */ { subl(dst, value) ; return; } 1972 } 1973 1974 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 1975 assert(shift_value > 0, "illegal shift value"); 1976 Label _is_positive; 1977 testl (reg, reg); 1978 jcc (Assembler::positive, _is_positive); 1979 int offset = (1 << shift_value) - 1 ; 1980 1981 if (offset == 1) { 1982 incrementl(reg); 1983 } else { 1984 addl(reg, offset); 1985 } 1986 1987 bind (_is_positive); 1988 sarl(reg, shift_value); 1989 } 1990 1991 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1992 assert(rscratch != noreg || always_reachable(src), "missing"); 1993 1994 if (reachable(src)) { 1995 Assembler::divsd(dst, as_Address(src)); 1996 } else { 1997 lea(rscratch, src); 1998 Assembler::divsd(dst, Address(rscratch, 0)); 1999 } 2000 } 2001 2002 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2003 assert(rscratch != noreg || always_reachable(src), "missing"); 2004 2005 if (reachable(src)) { 2006 Assembler::divss(dst, as_Address(src)); 2007 } else { 2008 lea(rscratch, src); 2009 Assembler::divss(dst, Address(rscratch, 0)); 2010 } 2011 } 2012 2013 void MacroAssembler::enter() { 2014 push(rbp); 2015 mov(rbp, rsp); 2016 } 2017 2018 void MacroAssembler::post_call_nop() { 2019 if (!Continuations::enabled()) { 2020 return; 2021 } 2022 InstructionMark im(this); 2023 relocate(post_call_nop_Relocation::spec()); 2024 InlineSkippedInstructionsCounter skipCounter(this); 2025 emit_int8((uint8_t)0x0f); 2026 emit_int8((uint8_t)0x1f); 2027 emit_int8((uint8_t)0x84); 2028 emit_int8((uint8_t)0x00); 2029 emit_int32(0x00); 2030 } 2031 2032 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2033 void MacroAssembler::fat_nop() { 2034 if (UseAddressNop) { 2035 addr_nop_5(); 2036 } else { 2037 emit_int8((uint8_t)0x26); // es: 2038 emit_int8((uint8_t)0x2e); // cs: 2039 emit_int8((uint8_t)0x64); // fs: 2040 emit_int8((uint8_t)0x65); // gs: 2041 emit_int8((uint8_t)0x90); 2042 } 2043 } 2044 2045 #ifndef _LP64 2046 void MacroAssembler::fcmp(Register tmp) { 2047 fcmp(tmp, 1, true, true); 2048 } 2049 2050 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2051 assert(!pop_right || pop_left, "usage error"); 2052 if (VM_Version::supports_cmov()) { 2053 assert(tmp == noreg, "unneeded temp"); 2054 if (pop_left) { 2055 fucomip(index); 2056 } else { 2057 fucomi(index); 2058 } 2059 if (pop_right) { 2060 fpop(); 2061 } 2062 } else { 2063 assert(tmp != noreg, "need temp"); 2064 if (pop_left) { 2065 if (pop_right) { 2066 fcompp(); 2067 } else { 2068 fcomp(index); 2069 } 2070 } else { 2071 fcom(index); 2072 } 2073 // convert FPU condition into eflags condition via rax, 2074 save_rax(tmp); 2075 fwait(); fnstsw_ax(); 2076 sahf(); 2077 restore_rax(tmp); 2078 } 2079 // condition codes set as follows: 2080 // 2081 // CF (corresponds to C0) if x < y 2082 // PF (corresponds to C2) if unordered 2083 // ZF (corresponds to C3) if x = y 2084 } 2085 2086 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2087 fcmp2int(dst, unordered_is_less, 1, true, true); 2088 } 2089 2090 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2091 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2092 Label L; 2093 if (unordered_is_less) { 2094 movl(dst, -1); 2095 jcc(Assembler::parity, L); 2096 jcc(Assembler::below , L); 2097 movl(dst, 0); 2098 jcc(Assembler::equal , L); 2099 increment(dst); 2100 } else { // unordered is greater 2101 movl(dst, 1); 2102 jcc(Assembler::parity, L); 2103 jcc(Assembler::above , L); 2104 movl(dst, 0); 2105 jcc(Assembler::equal , L); 2106 decrementl(dst); 2107 } 2108 bind(L); 2109 } 2110 2111 void MacroAssembler::fld_d(AddressLiteral src) { 2112 fld_d(as_Address(src)); 2113 } 2114 2115 void MacroAssembler::fld_s(AddressLiteral src) { 2116 fld_s(as_Address(src)); 2117 } 2118 2119 void MacroAssembler::fldcw(AddressLiteral src) { 2120 fldcw(as_Address(src)); 2121 } 2122 2123 void MacroAssembler::fpop() { 2124 ffree(); 2125 fincstp(); 2126 } 2127 2128 void MacroAssembler::fremr(Register tmp) { 2129 save_rax(tmp); 2130 { Label L; 2131 bind(L); 2132 fprem(); 2133 fwait(); fnstsw_ax(); 2134 sahf(); 2135 jcc(Assembler::parity, L); 2136 } 2137 restore_rax(tmp); 2138 // Result is in ST0. 2139 // Note: fxch & fpop to get rid of ST1 2140 // (otherwise FPU stack could overflow eventually) 2141 fxch(1); 2142 fpop(); 2143 } 2144 2145 void MacroAssembler::empty_FPU_stack() { 2146 if (VM_Version::supports_mmx()) { 2147 emms(); 2148 } else { 2149 for (int i = 8; i-- > 0; ) ffree(i); 2150 } 2151 } 2152 #endif // !LP64 2153 2154 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2155 assert(rscratch != noreg || always_reachable(src), "missing"); 2156 if (reachable(src)) { 2157 Assembler::mulpd(dst, as_Address(src)); 2158 } else { 2159 lea(rscratch, src); 2160 Assembler::mulpd(dst, Address(rscratch, 0)); 2161 } 2162 } 2163 2164 void MacroAssembler::load_float(Address src) { 2165 #ifdef _LP64 2166 movflt(xmm0, src); 2167 #else 2168 if (UseSSE >= 1) { 2169 movflt(xmm0, src); 2170 } else { 2171 fld_s(src); 2172 } 2173 #endif // LP64 2174 } 2175 2176 void MacroAssembler::store_float(Address dst) { 2177 #ifdef _LP64 2178 movflt(dst, xmm0); 2179 #else 2180 if (UseSSE >= 1) { 2181 movflt(dst, xmm0); 2182 } else { 2183 fstp_s(dst); 2184 } 2185 #endif // LP64 2186 } 2187 2188 void MacroAssembler::load_double(Address src) { 2189 #ifdef _LP64 2190 movdbl(xmm0, src); 2191 #else 2192 if (UseSSE >= 2) { 2193 movdbl(xmm0, src); 2194 } else { 2195 fld_d(src); 2196 } 2197 #endif // LP64 2198 } 2199 2200 void MacroAssembler::store_double(Address dst) { 2201 #ifdef _LP64 2202 movdbl(dst, xmm0); 2203 #else 2204 if (UseSSE >= 2) { 2205 movdbl(dst, xmm0); 2206 } else { 2207 fstp_d(dst); 2208 } 2209 #endif // LP64 2210 } 2211 2212 // dst = c = a * b + c 2213 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2214 Assembler::vfmadd231sd(c, a, b); 2215 if (dst != c) { 2216 movdbl(dst, c); 2217 } 2218 } 2219 2220 // dst = c = a * b + c 2221 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2222 Assembler::vfmadd231ss(c, a, b); 2223 if (dst != c) { 2224 movflt(dst, c); 2225 } 2226 } 2227 2228 // dst = c = a * b + c 2229 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2230 Assembler::vfmadd231pd(c, a, b, vector_len); 2231 if (dst != c) { 2232 vmovdqu(dst, c); 2233 } 2234 } 2235 2236 // dst = c = a * b + c 2237 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2238 Assembler::vfmadd231ps(c, a, b, vector_len); 2239 if (dst != c) { 2240 vmovdqu(dst, c); 2241 } 2242 } 2243 2244 // dst = c = a * b + c 2245 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2246 Assembler::vfmadd231pd(c, a, b, vector_len); 2247 if (dst != c) { 2248 vmovdqu(dst, c); 2249 } 2250 } 2251 2252 // dst = c = a * b + c 2253 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2254 Assembler::vfmadd231ps(c, a, b, vector_len); 2255 if (dst != c) { 2256 vmovdqu(dst, c); 2257 } 2258 } 2259 2260 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2261 assert(rscratch != noreg || always_reachable(dst), "missing"); 2262 2263 if (reachable(dst)) { 2264 incrementl(as_Address(dst)); 2265 } else { 2266 lea(rscratch, dst); 2267 incrementl(Address(rscratch, 0)); 2268 } 2269 } 2270 2271 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2272 incrementl(as_Address(dst, rscratch)); 2273 } 2274 2275 void MacroAssembler::incrementl(Register reg, int value) { 2276 if (value == min_jint) {addl(reg, value) ; return; } 2277 if (value < 0) { decrementl(reg, -value); return; } 2278 if (value == 0) { ; return; } 2279 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2280 /* else */ { addl(reg, value) ; return; } 2281 } 2282 2283 void MacroAssembler::incrementl(Address dst, int value) { 2284 if (value == min_jint) {addl(dst, value) ; return; } 2285 if (value < 0) { decrementl(dst, -value); return; } 2286 if (value == 0) { ; return; } 2287 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2288 /* else */ { addl(dst, value) ; return; } 2289 } 2290 2291 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2292 assert(rscratch != noreg || always_reachable(dst), "missing"); 2293 2294 if (reachable(dst)) { 2295 jmp_literal(dst.target(), dst.rspec()); 2296 } else { 2297 lea(rscratch, dst); 2298 jmp(rscratch); 2299 } 2300 } 2301 2302 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2303 assert(rscratch != noreg || always_reachable(dst), "missing"); 2304 2305 if (reachable(dst)) { 2306 InstructionMark im(this); 2307 relocate(dst.reloc()); 2308 const int short_size = 2; 2309 const int long_size = 6; 2310 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2311 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2312 // 0111 tttn #8-bit disp 2313 emit_int8(0x70 | cc); 2314 emit_int8((offs - short_size) & 0xFF); 2315 } else { 2316 // 0000 1111 1000 tttn #32-bit disp 2317 emit_int8(0x0F); 2318 emit_int8((unsigned char)(0x80 | cc)); 2319 emit_int32(offs - long_size); 2320 } 2321 } else { 2322 #ifdef ASSERT 2323 warning("reversing conditional branch"); 2324 #endif /* ASSERT */ 2325 Label skip; 2326 jccb(reverse[cc], skip); 2327 lea(rscratch, dst); 2328 Assembler::jmp(rscratch); 2329 bind(skip); 2330 } 2331 } 2332 2333 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2334 assert(rscratch != noreg || always_reachable(src), "missing"); 2335 2336 if (reachable(src)) { 2337 Assembler::ldmxcsr(as_Address(src)); 2338 } else { 2339 lea(rscratch, src); 2340 Assembler::ldmxcsr(Address(rscratch, 0)); 2341 } 2342 } 2343 2344 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2345 int off; 2346 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2347 off = offset(); 2348 movsbl(dst, src); // movsxb 2349 } else { 2350 off = load_unsigned_byte(dst, src); 2351 shll(dst, 24); 2352 sarl(dst, 24); 2353 } 2354 return off; 2355 } 2356 2357 // Note: load_signed_short used to be called load_signed_word. 2358 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2359 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2360 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2361 int MacroAssembler::load_signed_short(Register dst, Address src) { 2362 int off; 2363 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2364 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2365 // version but this is what 64bit has always done. This seems to imply 2366 // that users are only using 32bits worth. 2367 off = offset(); 2368 movswl(dst, src); // movsxw 2369 } else { 2370 off = load_unsigned_short(dst, src); 2371 shll(dst, 16); 2372 sarl(dst, 16); 2373 } 2374 return off; 2375 } 2376 2377 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2378 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2379 // and "3.9 Partial Register Penalties", p. 22). 2380 int off; 2381 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2382 off = offset(); 2383 movzbl(dst, src); // movzxb 2384 } else { 2385 xorl(dst, dst); 2386 off = offset(); 2387 movb(dst, src); 2388 } 2389 return off; 2390 } 2391 2392 // Note: load_unsigned_short used to be called load_unsigned_word. 2393 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2394 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2395 // and "3.9 Partial Register Penalties", p. 22). 2396 int off; 2397 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2398 off = offset(); 2399 movzwl(dst, src); // movzxw 2400 } else { 2401 xorl(dst, dst); 2402 off = offset(); 2403 movw(dst, src); 2404 } 2405 return off; 2406 } 2407 2408 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2409 switch (size_in_bytes) { 2410 #ifndef _LP64 2411 case 8: 2412 assert(dst2 != noreg, "second dest register required"); 2413 movl(dst, src); 2414 movl(dst2, src.plus_disp(BytesPerInt)); 2415 break; 2416 #else 2417 case 8: movq(dst, src); break; 2418 #endif 2419 case 4: movl(dst, src); break; 2420 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2421 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2422 default: ShouldNotReachHere(); 2423 } 2424 } 2425 2426 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2427 switch (size_in_bytes) { 2428 #ifndef _LP64 2429 case 8: 2430 assert(src2 != noreg, "second source register required"); 2431 movl(dst, src); 2432 movl(dst.plus_disp(BytesPerInt), src2); 2433 break; 2434 #else 2435 case 8: movq(dst, src); break; 2436 #endif 2437 case 4: movl(dst, src); break; 2438 case 2: movw(dst, src); break; 2439 case 1: movb(dst, src); break; 2440 default: ShouldNotReachHere(); 2441 } 2442 } 2443 2444 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2445 assert(rscratch != noreg || always_reachable(dst), "missing"); 2446 2447 if (reachable(dst)) { 2448 movl(as_Address(dst), src); 2449 } else { 2450 lea(rscratch, dst); 2451 movl(Address(rscratch, 0), src); 2452 } 2453 } 2454 2455 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2456 if (reachable(src)) { 2457 movl(dst, as_Address(src)); 2458 } else { 2459 lea(dst, src); 2460 movl(dst, Address(dst, 0)); 2461 } 2462 } 2463 2464 // C++ bool manipulation 2465 2466 void MacroAssembler::movbool(Register dst, Address src) { 2467 if(sizeof(bool) == 1) 2468 movb(dst, src); 2469 else if(sizeof(bool) == 2) 2470 movw(dst, src); 2471 else if(sizeof(bool) == 4) 2472 movl(dst, src); 2473 else 2474 // unsupported 2475 ShouldNotReachHere(); 2476 } 2477 2478 void MacroAssembler::movbool(Address dst, bool boolconst) { 2479 if(sizeof(bool) == 1) 2480 movb(dst, (int) boolconst); 2481 else if(sizeof(bool) == 2) 2482 movw(dst, (int) boolconst); 2483 else if(sizeof(bool) == 4) 2484 movl(dst, (int) boolconst); 2485 else 2486 // unsupported 2487 ShouldNotReachHere(); 2488 } 2489 2490 void MacroAssembler::movbool(Address dst, Register src) { 2491 if(sizeof(bool) == 1) 2492 movb(dst, src); 2493 else if(sizeof(bool) == 2) 2494 movw(dst, src); 2495 else if(sizeof(bool) == 4) 2496 movl(dst, src); 2497 else 2498 // unsupported 2499 ShouldNotReachHere(); 2500 } 2501 2502 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2503 assert(rscratch != noreg || always_reachable(src), "missing"); 2504 2505 if (reachable(src)) { 2506 movdl(dst, as_Address(src)); 2507 } else { 2508 lea(rscratch, src); 2509 movdl(dst, Address(rscratch, 0)); 2510 } 2511 } 2512 2513 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2514 assert(rscratch != noreg || always_reachable(src), "missing"); 2515 2516 if (reachable(src)) { 2517 movq(dst, as_Address(src)); 2518 } else { 2519 lea(rscratch, src); 2520 movq(dst, Address(rscratch, 0)); 2521 } 2522 } 2523 2524 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2525 assert(rscratch != noreg || always_reachable(src), "missing"); 2526 2527 if (reachable(src)) { 2528 if (UseXmmLoadAndClearUpper) { 2529 movsd (dst, as_Address(src)); 2530 } else { 2531 movlpd(dst, as_Address(src)); 2532 } 2533 } else { 2534 lea(rscratch, src); 2535 if (UseXmmLoadAndClearUpper) { 2536 movsd (dst, Address(rscratch, 0)); 2537 } else { 2538 movlpd(dst, Address(rscratch, 0)); 2539 } 2540 } 2541 } 2542 2543 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2544 assert(rscratch != noreg || always_reachable(src), "missing"); 2545 2546 if (reachable(src)) { 2547 movss(dst, as_Address(src)); 2548 } else { 2549 lea(rscratch, src); 2550 movss(dst, Address(rscratch, 0)); 2551 } 2552 } 2553 2554 void MacroAssembler::movptr(Register dst, Register src) { 2555 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2556 } 2557 2558 void MacroAssembler::movptr(Register dst, Address src) { 2559 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2560 } 2561 2562 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2563 void MacroAssembler::movptr(Register dst, intptr_t src) { 2564 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); 2565 } 2566 2567 void MacroAssembler::movptr(Address dst, Register src) { 2568 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2569 } 2570 2571 void MacroAssembler::movptr(Address dst, int32_t src) { 2572 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2573 } 2574 2575 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2576 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2577 Assembler::movdqu(dst, src); 2578 } 2579 2580 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2581 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2582 Assembler::movdqu(dst, src); 2583 } 2584 2585 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2586 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2587 Assembler::movdqu(dst, src); 2588 } 2589 2590 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2591 assert(rscratch != noreg || always_reachable(src), "missing"); 2592 2593 if (reachable(src)) { 2594 movdqu(dst, as_Address(src)); 2595 } else { 2596 lea(rscratch, src); 2597 movdqu(dst, Address(rscratch, 0)); 2598 } 2599 } 2600 2601 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2602 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2603 Assembler::vmovdqu(dst, src); 2604 } 2605 2606 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2607 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2608 Assembler::vmovdqu(dst, src); 2609 } 2610 2611 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2612 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2613 Assembler::vmovdqu(dst, src); 2614 } 2615 2616 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2617 assert(rscratch != noreg || always_reachable(src), "missing"); 2618 2619 if (reachable(src)) { 2620 vmovdqu(dst, as_Address(src)); 2621 } 2622 else { 2623 lea(rscratch, src); 2624 vmovdqu(dst, Address(rscratch, 0)); 2625 } 2626 } 2627 2628 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2629 assert(rscratch != noreg || always_reachable(src), "missing"); 2630 2631 if (vector_len == AVX_512bit) { 2632 evmovdquq(dst, src, AVX_512bit, rscratch); 2633 } else if (vector_len == AVX_256bit) { 2634 vmovdqu(dst, src, rscratch); 2635 } else { 2636 movdqu(dst, src, rscratch); 2637 } 2638 } 2639 2640 void MacroAssembler::kmov(KRegister dst, Address src) { 2641 if (VM_Version::supports_avx512bw()) { 2642 kmovql(dst, src); 2643 } else { 2644 assert(VM_Version::supports_evex(), ""); 2645 kmovwl(dst, src); 2646 } 2647 } 2648 2649 void MacroAssembler::kmov(Address dst, KRegister src) { 2650 if (VM_Version::supports_avx512bw()) { 2651 kmovql(dst, src); 2652 } else { 2653 assert(VM_Version::supports_evex(), ""); 2654 kmovwl(dst, src); 2655 } 2656 } 2657 2658 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2659 if (VM_Version::supports_avx512bw()) { 2660 kmovql(dst, src); 2661 } else { 2662 assert(VM_Version::supports_evex(), ""); 2663 kmovwl(dst, src); 2664 } 2665 } 2666 2667 void MacroAssembler::kmov(Register dst, KRegister src) { 2668 if (VM_Version::supports_avx512bw()) { 2669 kmovql(dst, src); 2670 } else { 2671 assert(VM_Version::supports_evex(), ""); 2672 kmovwl(dst, src); 2673 } 2674 } 2675 2676 void MacroAssembler::kmov(KRegister dst, Register src) { 2677 if (VM_Version::supports_avx512bw()) { 2678 kmovql(dst, src); 2679 } else { 2680 assert(VM_Version::supports_evex(), ""); 2681 kmovwl(dst, src); 2682 } 2683 } 2684 2685 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2686 assert(rscratch != noreg || always_reachable(src), "missing"); 2687 2688 if (reachable(src)) { 2689 kmovql(dst, as_Address(src)); 2690 } else { 2691 lea(rscratch, src); 2692 kmovql(dst, Address(rscratch, 0)); 2693 } 2694 } 2695 2696 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2697 assert(rscratch != noreg || always_reachable(src), "missing"); 2698 2699 if (reachable(src)) { 2700 kmovwl(dst, as_Address(src)); 2701 } else { 2702 lea(rscratch, src); 2703 kmovwl(dst, Address(rscratch, 0)); 2704 } 2705 } 2706 2707 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2708 int vector_len, Register rscratch) { 2709 assert(rscratch != noreg || always_reachable(src), "missing"); 2710 2711 if (reachable(src)) { 2712 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2713 } else { 2714 lea(rscratch, src); 2715 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2716 } 2717 } 2718 2719 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2720 int vector_len, Register rscratch) { 2721 assert(rscratch != noreg || always_reachable(src), "missing"); 2722 2723 if (reachable(src)) { 2724 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2725 } else { 2726 lea(rscratch, src); 2727 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2728 } 2729 } 2730 2731 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2732 assert(rscratch != noreg || always_reachable(src), "missing"); 2733 2734 if (reachable(src)) { 2735 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2736 } else { 2737 lea(rscratch, src); 2738 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2739 } 2740 } 2741 2742 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2743 assert(rscratch != noreg || always_reachable(src), "missing"); 2744 2745 if (reachable(src)) { 2746 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2747 } else { 2748 lea(rscratch, src); 2749 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2750 } 2751 } 2752 2753 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2754 assert(rscratch != noreg || always_reachable(src), "missing"); 2755 2756 if (reachable(src)) { 2757 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2758 } else { 2759 lea(rscratch, src); 2760 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2761 } 2762 } 2763 2764 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2765 assert(rscratch != noreg || always_reachable(src), "missing"); 2766 2767 if (reachable(src)) { 2768 Assembler::movdqa(dst, as_Address(src)); 2769 } else { 2770 lea(rscratch, src); 2771 Assembler::movdqa(dst, Address(rscratch, 0)); 2772 } 2773 } 2774 2775 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2776 assert(rscratch != noreg || always_reachable(src), "missing"); 2777 2778 if (reachable(src)) { 2779 Assembler::movsd(dst, as_Address(src)); 2780 } else { 2781 lea(rscratch, src); 2782 Assembler::movsd(dst, Address(rscratch, 0)); 2783 } 2784 } 2785 2786 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2787 assert(rscratch != noreg || always_reachable(src), "missing"); 2788 2789 if (reachable(src)) { 2790 Assembler::movss(dst, as_Address(src)); 2791 } else { 2792 lea(rscratch, src); 2793 Assembler::movss(dst, Address(rscratch, 0)); 2794 } 2795 } 2796 2797 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2798 assert(rscratch != noreg || always_reachable(src), "missing"); 2799 2800 if (reachable(src)) { 2801 Assembler::movddup(dst, as_Address(src)); 2802 } else { 2803 lea(rscratch, src); 2804 Assembler::movddup(dst, Address(rscratch, 0)); 2805 } 2806 } 2807 2808 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2809 assert(rscratch != noreg || always_reachable(src), "missing"); 2810 2811 if (reachable(src)) { 2812 Assembler::vmovddup(dst, as_Address(src), vector_len); 2813 } else { 2814 lea(rscratch, src); 2815 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2816 } 2817 } 2818 2819 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2820 assert(rscratch != noreg || always_reachable(src), "missing"); 2821 2822 if (reachable(src)) { 2823 Assembler::mulsd(dst, as_Address(src)); 2824 } else { 2825 lea(rscratch, src); 2826 Assembler::mulsd(dst, Address(rscratch, 0)); 2827 } 2828 } 2829 2830 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2831 assert(rscratch != noreg || always_reachable(src), "missing"); 2832 2833 if (reachable(src)) { 2834 Assembler::mulss(dst, as_Address(src)); 2835 } else { 2836 lea(rscratch, src); 2837 Assembler::mulss(dst, Address(rscratch, 0)); 2838 } 2839 } 2840 2841 void MacroAssembler::null_check(Register reg, int offset) { 2842 if (needs_explicit_null_check(offset)) { 2843 // provoke OS null exception if reg is null by 2844 // accessing M[reg] w/o changing any (non-CC) registers 2845 // NOTE: cmpl is plenty here to provoke a segv 2846 cmpptr(rax, Address(reg, 0)); 2847 // Note: should probably use testl(rax, Address(reg, 0)); 2848 // may be shorter code (however, this version of 2849 // testl needs to be implemented first) 2850 } else { 2851 // nothing to do, (later) access of M[reg + offset] 2852 // will provoke OS null exception if reg is null 2853 } 2854 } 2855 2856 void MacroAssembler::os_breakpoint() { 2857 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 2858 // (e.g., MSVC can't call ps() otherwise) 2859 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 2860 } 2861 2862 void MacroAssembler::unimplemented(const char* what) { 2863 const char* buf = nullptr; 2864 { 2865 ResourceMark rm; 2866 stringStream ss; 2867 ss.print("unimplemented: %s", what); 2868 buf = code_string(ss.as_string()); 2869 } 2870 stop(buf); 2871 } 2872 2873 #ifdef _LP64 2874 #define XSTATE_BV 0x200 2875 #endif 2876 2877 void MacroAssembler::pop_CPU_state() { 2878 pop_FPU_state(); 2879 pop_IU_state(); 2880 } 2881 2882 void MacroAssembler::pop_FPU_state() { 2883 #ifndef _LP64 2884 frstor(Address(rsp, 0)); 2885 #else 2886 fxrstor(Address(rsp, 0)); 2887 #endif 2888 addptr(rsp, FPUStateSizeInWords * wordSize); 2889 } 2890 2891 void MacroAssembler::pop_IU_state() { 2892 popa(); 2893 LP64_ONLY(addq(rsp, 8)); 2894 popf(); 2895 } 2896 2897 // Save Integer and Float state 2898 // Warning: Stack must be 16 byte aligned (64bit) 2899 void MacroAssembler::push_CPU_state() { 2900 push_IU_state(); 2901 push_FPU_state(); 2902 } 2903 2904 void MacroAssembler::push_FPU_state() { 2905 subptr(rsp, FPUStateSizeInWords * wordSize); 2906 #ifndef _LP64 2907 fnsave(Address(rsp, 0)); 2908 fwait(); 2909 #else 2910 fxsave(Address(rsp, 0)); 2911 #endif // LP64 2912 } 2913 2914 void MacroAssembler::push_IU_state() { 2915 // Push flags first because pusha kills them 2916 pushf(); 2917 // Make sure rsp stays 16-byte aligned 2918 LP64_ONLY(subq(rsp, 8)); 2919 pusha(); 2920 } 2921 2922 void MacroAssembler::push_cont_fastpath() { 2923 if (!Continuations::enabled()) return; 2924 2925 #ifndef _LP64 2926 Register rthread = rax; 2927 Register rrealsp = rbx; 2928 push(rthread); 2929 push(rrealsp); 2930 2931 get_thread(rthread); 2932 2933 // The code below wants the original RSP. 2934 // Move it back after the pushes above. 2935 movptr(rrealsp, rsp); 2936 addptr(rrealsp, 2*wordSize); 2937 #else 2938 Register rthread = r15_thread; 2939 Register rrealsp = rsp; 2940 #endif 2941 2942 Label done; 2943 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 2944 jccb(Assembler::belowEqual, done); 2945 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 2946 bind(done); 2947 2948 #ifndef _LP64 2949 pop(rrealsp); 2950 pop(rthread); 2951 #endif 2952 } 2953 2954 void MacroAssembler::pop_cont_fastpath() { 2955 if (!Continuations::enabled()) return; 2956 2957 #ifndef _LP64 2958 Register rthread = rax; 2959 Register rrealsp = rbx; 2960 push(rthread); 2961 push(rrealsp); 2962 2963 get_thread(rthread); 2964 2965 // The code below wants the original RSP. 2966 // Move it back after the pushes above. 2967 movptr(rrealsp, rsp); 2968 addptr(rrealsp, 2*wordSize); 2969 #else 2970 Register rthread = r15_thread; 2971 Register rrealsp = rsp; 2972 #endif 2973 2974 Label done; 2975 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 2976 jccb(Assembler::below, done); 2977 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 2978 bind(done); 2979 2980 #ifndef _LP64 2981 pop(rrealsp); 2982 pop(rthread); 2983 #endif 2984 } 2985 2986 void MacroAssembler::inc_held_monitor_count() { 2987 #ifndef _LP64 2988 Register thread = rax; 2989 push(thread); 2990 get_thread(thread); 2991 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 2992 pop(thread); 2993 #else // LP64 2994 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 2995 #endif 2996 } 2997 2998 void MacroAssembler::dec_held_monitor_count() { 2999 #ifndef _LP64 3000 Register thread = rax; 3001 push(thread); 3002 get_thread(thread); 3003 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3004 pop(thread); 3005 #else // LP64 3006 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3007 #endif 3008 } 3009 3010 #ifdef ASSERT 3011 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3012 #ifdef _LP64 3013 Label no_cont; 3014 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3015 testl(cont, cont); 3016 jcc(Assembler::zero, no_cont); 3017 stop(name); 3018 bind(no_cont); 3019 #else 3020 Unimplemented(); 3021 #endif 3022 } 3023 #endif 3024 3025 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3026 if (!java_thread->is_valid()) { 3027 java_thread = rdi; 3028 get_thread(java_thread); 3029 } 3030 // we must set sp to zero to clear frame 3031 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3032 // must clear fp, so that compiled frames are not confused; it is 3033 // possible that we need it only for debugging 3034 if (clear_fp) { 3035 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3036 } 3037 // Always clear the pc because it could have been set by make_walkable() 3038 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3039 vzeroupper(); 3040 } 3041 3042 void MacroAssembler::restore_rax(Register tmp) { 3043 if (tmp == noreg) pop(rax); 3044 else if (tmp != rax) mov(rax, tmp); 3045 } 3046 3047 void MacroAssembler::round_to(Register reg, int modulus) { 3048 addptr(reg, modulus - 1); 3049 andptr(reg, -modulus); 3050 } 3051 3052 void MacroAssembler::save_rax(Register tmp) { 3053 if (tmp == noreg) push(rax); 3054 else if (tmp != rax) mov(tmp, rax); 3055 } 3056 3057 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3058 if (at_return) { 3059 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3060 // we may safely use rsp instead to perform the stack watermark check. 3061 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3062 jcc(Assembler::above, slow_path); 3063 return; 3064 } 3065 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3066 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3067 } 3068 3069 // Calls to C land 3070 // 3071 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3072 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3073 // has to be reset to 0. This is required to allow proper stack traversal. 3074 void MacroAssembler::set_last_Java_frame(Register java_thread, 3075 Register last_java_sp, 3076 Register last_java_fp, 3077 address last_java_pc, 3078 Register rscratch) { 3079 vzeroupper(); 3080 // determine java_thread register 3081 if (!java_thread->is_valid()) { 3082 java_thread = rdi; 3083 get_thread(java_thread); 3084 } 3085 // determine last_java_sp register 3086 if (!last_java_sp->is_valid()) { 3087 last_java_sp = rsp; 3088 } 3089 // last_java_fp is optional 3090 if (last_java_fp->is_valid()) { 3091 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3092 } 3093 // last_java_pc is optional 3094 if (last_java_pc != nullptr) { 3095 Address java_pc(java_thread, 3096 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3097 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3098 } 3099 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3100 } 3101 3102 void MacroAssembler::shlptr(Register dst, int imm8) { 3103 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3104 } 3105 3106 void MacroAssembler::shrptr(Register dst, int imm8) { 3107 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3108 } 3109 3110 void MacroAssembler::sign_extend_byte(Register reg) { 3111 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3112 movsbl(reg, reg); // movsxb 3113 } else { 3114 shll(reg, 24); 3115 sarl(reg, 24); 3116 } 3117 } 3118 3119 void MacroAssembler::sign_extend_short(Register reg) { 3120 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3121 movswl(reg, reg); // movsxw 3122 } else { 3123 shll(reg, 16); 3124 sarl(reg, 16); 3125 } 3126 } 3127 3128 void MacroAssembler::testl(Address dst, int32_t imm32) { 3129 if (imm32 >= 0 && is8bit(imm32)) { 3130 testb(dst, imm32); 3131 } else { 3132 Assembler::testl(dst, imm32); 3133 } 3134 } 3135 3136 void MacroAssembler::testl(Register dst, int32_t imm32) { 3137 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3138 testb(dst, imm32); 3139 } else { 3140 Assembler::testl(dst, imm32); 3141 } 3142 } 3143 3144 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3145 assert(always_reachable(src), "Address should be reachable"); 3146 testl(dst, as_Address(src)); 3147 } 3148 3149 #ifdef _LP64 3150 3151 void MacroAssembler::testq(Address dst, int32_t imm32) { 3152 if (imm32 >= 0) { 3153 testl(dst, imm32); 3154 } else { 3155 Assembler::testq(dst, imm32); 3156 } 3157 } 3158 3159 void MacroAssembler::testq(Register dst, int32_t imm32) { 3160 if (imm32 >= 0) { 3161 testl(dst, imm32); 3162 } else { 3163 Assembler::testq(dst, imm32); 3164 } 3165 } 3166 3167 #endif 3168 3169 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3170 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3171 Assembler::pcmpeqb(dst, src); 3172 } 3173 3174 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3175 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3176 Assembler::pcmpeqw(dst, src); 3177 } 3178 3179 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3180 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3181 Assembler::pcmpestri(dst, src, imm8); 3182 } 3183 3184 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3185 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3186 Assembler::pcmpestri(dst, src, imm8); 3187 } 3188 3189 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3190 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3191 Assembler::pmovzxbw(dst, src); 3192 } 3193 3194 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3195 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3196 Assembler::pmovzxbw(dst, src); 3197 } 3198 3199 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3200 assert((src->encoding() < 16),"XMM register should be 0-15"); 3201 Assembler::pmovmskb(dst, src); 3202 } 3203 3204 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3205 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3206 Assembler::ptest(dst, src); 3207 } 3208 3209 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3210 assert(rscratch != noreg || always_reachable(src), "missing"); 3211 3212 if (reachable(src)) { 3213 Assembler::sqrtss(dst, as_Address(src)); 3214 } else { 3215 lea(rscratch, src); 3216 Assembler::sqrtss(dst, Address(rscratch, 0)); 3217 } 3218 } 3219 3220 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3221 assert(rscratch != noreg || always_reachable(src), "missing"); 3222 3223 if (reachable(src)) { 3224 Assembler::subsd(dst, as_Address(src)); 3225 } else { 3226 lea(rscratch, src); 3227 Assembler::subsd(dst, Address(rscratch, 0)); 3228 } 3229 } 3230 3231 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3232 assert(rscratch != noreg || always_reachable(src), "missing"); 3233 3234 if (reachable(src)) { 3235 Assembler::roundsd(dst, as_Address(src), rmode); 3236 } else { 3237 lea(rscratch, src); 3238 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3239 } 3240 } 3241 3242 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3243 assert(rscratch != noreg || always_reachable(src), "missing"); 3244 3245 if (reachable(src)) { 3246 Assembler::subss(dst, as_Address(src)); 3247 } else { 3248 lea(rscratch, src); 3249 Assembler::subss(dst, Address(rscratch, 0)); 3250 } 3251 } 3252 3253 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3254 assert(rscratch != noreg || always_reachable(src), "missing"); 3255 3256 if (reachable(src)) { 3257 Assembler::ucomisd(dst, as_Address(src)); 3258 } else { 3259 lea(rscratch, src); 3260 Assembler::ucomisd(dst, Address(rscratch, 0)); 3261 } 3262 } 3263 3264 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3265 assert(rscratch != noreg || always_reachable(src), "missing"); 3266 3267 if (reachable(src)) { 3268 Assembler::ucomiss(dst, as_Address(src)); 3269 } else { 3270 lea(rscratch, src); 3271 Assembler::ucomiss(dst, Address(rscratch, 0)); 3272 } 3273 } 3274 3275 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3276 assert(rscratch != noreg || always_reachable(src), "missing"); 3277 3278 // Used in sign-bit flipping with aligned address. 3279 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3280 if (reachable(src)) { 3281 Assembler::xorpd(dst, as_Address(src)); 3282 } else { 3283 lea(rscratch, src); 3284 Assembler::xorpd(dst, Address(rscratch, 0)); 3285 } 3286 } 3287 3288 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3289 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3290 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3291 } 3292 else { 3293 Assembler::xorpd(dst, src); 3294 } 3295 } 3296 3297 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3298 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3299 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3300 } else { 3301 Assembler::xorps(dst, src); 3302 } 3303 } 3304 3305 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3306 assert(rscratch != noreg || always_reachable(src), "missing"); 3307 3308 // Used in sign-bit flipping with aligned address. 3309 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3310 if (reachable(src)) { 3311 Assembler::xorps(dst, as_Address(src)); 3312 } else { 3313 lea(rscratch, src); 3314 Assembler::xorps(dst, Address(rscratch, 0)); 3315 } 3316 } 3317 3318 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3319 assert(rscratch != noreg || always_reachable(src), "missing"); 3320 3321 // Used in sign-bit flipping with aligned address. 3322 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3323 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3324 if (reachable(src)) { 3325 Assembler::pshufb(dst, as_Address(src)); 3326 } else { 3327 lea(rscratch, src); 3328 Assembler::pshufb(dst, Address(rscratch, 0)); 3329 } 3330 } 3331 3332 // AVX 3-operands instructions 3333 3334 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3335 assert(rscratch != noreg || always_reachable(src), "missing"); 3336 3337 if (reachable(src)) { 3338 vaddsd(dst, nds, as_Address(src)); 3339 } else { 3340 lea(rscratch, src); 3341 vaddsd(dst, nds, Address(rscratch, 0)); 3342 } 3343 } 3344 3345 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3346 assert(rscratch != noreg || always_reachable(src), "missing"); 3347 3348 if (reachable(src)) { 3349 vaddss(dst, nds, as_Address(src)); 3350 } else { 3351 lea(rscratch, src); 3352 vaddss(dst, nds, Address(rscratch, 0)); 3353 } 3354 } 3355 3356 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3357 assert(UseAVX > 0, "requires some form of AVX"); 3358 assert(rscratch != noreg || always_reachable(src), "missing"); 3359 3360 if (reachable(src)) { 3361 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3362 } else { 3363 lea(rscratch, src); 3364 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3365 } 3366 } 3367 3368 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3369 assert(UseAVX > 0, "requires some form of AVX"); 3370 assert(rscratch != noreg || always_reachable(src), "missing"); 3371 3372 if (reachable(src)) { 3373 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3374 } else { 3375 lea(rscratch, src); 3376 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3377 } 3378 } 3379 3380 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3381 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3382 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3383 3384 vandps(dst, nds, negate_field, vector_len, rscratch); 3385 } 3386 3387 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3388 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3389 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3390 3391 vandpd(dst, nds, negate_field, vector_len, rscratch); 3392 } 3393 3394 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3395 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3396 Assembler::vpaddb(dst, nds, src, vector_len); 3397 } 3398 3399 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3400 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3401 Assembler::vpaddb(dst, nds, src, vector_len); 3402 } 3403 3404 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3405 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3406 Assembler::vpaddw(dst, nds, src, vector_len); 3407 } 3408 3409 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3410 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3411 Assembler::vpaddw(dst, nds, src, vector_len); 3412 } 3413 3414 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3415 assert(rscratch != noreg || always_reachable(src), "missing"); 3416 3417 if (reachable(src)) { 3418 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3419 } else { 3420 lea(rscratch, src); 3421 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3422 } 3423 } 3424 3425 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3426 assert(rscratch != noreg || always_reachable(src), "missing"); 3427 3428 if (reachable(src)) { 3429 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3430 } else { 3431 lea(rscratch, src); 3432 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3433 } 3434 } 3435 3436 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3437 assert(rscratch != noreg || always_reachable(src), "missing"); 3438 3439 if (reachable(src)) { 3440 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3441 } else { 3442 lea(rscratch, src); 3443 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3444 } 3445 } 3446 3447 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3448 assert(rscratch != noreg || always_reachable(src), "missing"); 3449 3450 if (reachable(src)) { 3451 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3452 } else { 3453 lea(rscratch, src); 3454 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3455 } 3456 } 3457 3458 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3459 assert(rscratch != noreg || always_reachable(src), "missing"); 3460 3461 if (reachable(src)) { 3462 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3463 } else { 3464 lea(rscratch, src); 3465 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3466 } 3467 } 3468 3469 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3470 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3471 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3472 } 3473 3474 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3475 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3476 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3477 } 3478 3479 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3480 assert(rscratch != noreg || always_reachable(src), "missing"); 3481 3482 if (reachable(src)) { 3483 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3484 } else { 3485 lea(rscratch, src); 3486 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3487 } 3488 } 3489 3490 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3491 int comparison, bool is_signed, int vector_len, Register rscratch) { 3492 assert(rscratch != noreg || always_reachable(src), "missing"); 3493 3494 if (reachable(src)) { 3495 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3496 } else { 3497 lea(rscratch, src); 3498 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3499 } 3500 } 3501 3502 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3503 int comparison, bool is_signed, int vector_len, Register rscratch) { 3504 assert(rscratch != noreg || always_reachable(src), "missing"); 3505 3506 if (reachable(src)) { 3507 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3508 } else { 3509 lea(rscratch, src); 3510 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3511 } 3512 } 3513 3514 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3515 int comparison, bool is_signed, int vector_len, Register rscratch) { 3516 assert(rscratch != noreg || always_reachable(src), "missing"); 3517 3518 if (reachable(src)) { 3519 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3520 } else { 3521 lea(rscratch, src); 3522 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3523 } 3524 } 3525 3526 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3527 int comparison, bool is_signed, int vector_len, Register rscratch) { 3528 assert(rscratch != noreg || always_reachable(src), "missing"); 3529 3530 if (reachable(src)) { 3531 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3532 } else { 3533 lea(rscratch, src); 3534 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3535 } 3536 } 3537 3538 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3539 if (width == Assembler::Q) { 3540 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3541 } else { 3542 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3543 } 3544 } 3545 3546 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3547 int eq_cond_enc = 0x29; 3548 int gt_cond_enc = 0x37; 3549 if (width != Assembler::Q) { 3550 eq_cond_enc = 0x74 + width; 3551 gt_cond_enc = 0x64 + width; 3552 } 3553 switch (cond) { 3554 case eq: 3555 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3556 break; 3557 case neq: 3558 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3559 vallones(xtmp, vector_len); 3560 vpxor(dst, xtmp, dst, vector_len); 3561 break; 3562 case le: 3563 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3564 vallones(xtmp, vector_len); 3565 vpxor(dst, xtmp, dst, vector_len); 3566 break; 3567 case nlt: 3568 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3569 vallones(xtmp, vector_len); 3570 vpxor(dst, xtmp, dst, vector_len); 3571 break; 3572 case lt: 3573 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3574 break; 3575 case nle: 3576 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3577 break; 3578 default: 3579 assert(false, "Should not reach here"); 3580 } 3581 } 3582 3583 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3584 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3585 Assembler::vpmovzxbw(dst, src, vector_len); 3586 } 3587 3588 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3589 assert((src->encoding() < 16),"XMM register should be 0-15"); 3590 Assembler::vpmovmskb(dst, src, vector_len); 3591 } 3592 3593 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3594 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3595 Assembler::vpmullw(dst, nds, src, vector_len); 3596 } 3597 3598 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3599 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3600 Assembler::vpmullw(dst, nds, src, vector_len); 3601 } 3602 3603 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3604 assert((UseAVX > 0), "AVX support is needed"); 3605 assert(rscratch != noreg || always_reachable(src), "missing"); 3606 3607 if (reachable(src)) { 3608 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3609 } else { 3610 lea(rscratch, src); 3611 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3612 } 3613 } 3614 3615 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3616 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3617 Assembler::vpsubb(dst, nds, src, vector_len); 3618 } 3619 3620 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3621 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3622 Assembler::vpsubb(dst, nds, src, vector_len); 3623 } 3624 3625 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3626 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3627 Assembler::vpsubw(dst, nds, src, vector_len); 3628 } 3629 3630 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3631 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3632 Assembler::vpsubw(dst, nds, src, vector_len); 3633 } 3634 3635 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3636 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3637 Assembler::vpsraw(dst, nds, shift, vector_len); 3638 } 3639 3640 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3641 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3642 Assembler::vpsraw(dst, nds, shift, vector_len); 3643 } 3644 3645 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3646 assert(UseAVX > 2,""); 3647 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3648 vector_len = 2; 3649 } 3650 Assembler::evpsraq(dst, nds, shift, vector_len); 3651 } 3652 3653 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3654 assert(UseAVX > 2,""); 3655 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3656 vector_len = 2; 3657 } 3658 Assembler::evpsraq(dst, nds, shift, vector_len); 3659 } 3660 3661 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3662 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3663 Assembler::vpsrlw(dst, nds, shift, vector_len); 3664 } 3665 3666 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3667 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3668 Assembler::vpsrlw(dst, nds, shift, vector_len); 3669 } 3670 3671 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3672 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3673 Assembler::vpsllw(dst, nds, shift, vector_len); 3674 } 3675 3676 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3677 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3678 Assembler::vpsllw(dst, nds, shift, vector_len); 3679 } 3680 3681 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3682 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3683 Assembler::vptest(dst, src); 3684 } 3685 3686 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3687 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3688 Assembler::punpcklbw(dst, src); 3689 } 3690 3691 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3692 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3693 Assembler::pshufd(dst, src, mode); 3694 } 3695 3696 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3697 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3698 Assembler::pshuflw(dst, src, mode); 3699 } 3700 3701 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3702 assert(rscratch != noreg || always_reachable(src), "missing"); 3703 3704 if (reachable(src)) { 3705 vandpd(dst, nds, as_Address(src), vector_len); 3706 } else { 3707 lea(rscratch, src); 3708 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3709 } 3710 } 3711 3712 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3713 assert(rscratch != noreg || always_reachable(src), "missing"); 3714 3715 if (reachable(src)) { 3716 vandps(dst, nds, as_Address(src), vector_len); 3717 } else { 3718 lea(rscratch, src); 3719 vandps(dst, nds, Address(rscratch, 0), vector_len); 3720 } 3721 } 3722 3723 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3724 bool merge, int vector_len, Register rscratch) { 3725 assert(rscratch != noreg || always_reachable(src), "missing"); 3726 3727 if (reachable(src)) { 3728 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3729 } else { 3730 lea(rscratch, src); 3731 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3732 } 3733 } 3734 3735 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3736 assert(rscratch != noreg || always_reachable(src), "missing"); 3737 3738 if (reachable(src)) { 3739 vdivsd(dst, nds, as_Address(src)); 3740 } else { 3741 lea(rscratch, src); 3742 vdivsd(dst, nds, Address(rscratch, 0)); 3743 } 3744 } 3745 3746 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3747 assert(rscratch != noreg || always_reachable(src), "missing"); 3748 3749 if (reachable(src)) { 3750 vdivss(dst, nds, as_Address(src)); 3751 } else { 3752 lea(rscratch, src); 3753 vdivss(dst, nds, Address(rscratch, 0)); 3754 } 3755 } 3756 3757 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3758 assert(rscratch != noreg || always_reachable(src), "missing"); 3759 3760 if (reachable(src)) { 3761 vmulsd(dst, nds, as_Address(src)); 3762 } else { 3763 lea(rscratch, src); 3764 vmulsd(dst, nds, Address(rscratch, 0)); 3765 } 3766 } 3767 3768 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3769 assert(rscratch != noreg || always_reachable(src), "missing"); 3770 3771 if (reachable(src)) { 3772 vmulss(dst, nds, as_Address(src)); 3773 } else { 3774 lea(rscratch, src); 3775 vmulss(dst, nds, Address(rscratch, 0)); 3776 } 3777 } 3778 3779 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3780 assert(rscratch != noreg || always_reachable(src), "missing"); 3781 3782 if (reachable(src)) { 3783 vsubsd(dst, nds, as_Address(src)); 3784 } else { 3785 lea(rscratch, src); 3786 vsubsd(dst, nds, Address(rscratch, 0)); 3787 } 3788 } 3789 3790 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3791 assert(rscratch != noreg || always_reachable(src), "missing"); 3792 3793 if (reachable(src)) { 3794 vsubss(dst, nds, as_Address(src)); 3795 } else { 3796 lea(rscratch, src); 3797 vsubss(dst, nds, Address(rscratch, 0)); 3798 } 3799 } 3800 3801 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3802 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3803 assert(rscratch != noreg || always_reachable(src), "missing"); 3804 3805 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 3806 } 3807 3808 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3809 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3810 assert(rscratch != noreg || always_reachable(src), "missing"); 3811 3812 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 3813 } 3814 3815 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3816 assert(rscratch != noreg || always_reachable(src), "missing"); 3817 3818 if (reachable(src)) { 3819 vxorpd(dst, nds, as_Address(src), vector_len); 3820 } else { 3821 lea(rscratch, src); 3822 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 3823 } 3824 } 3825 3826 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3827 assert(rscratch != noreg || always_reachable(src), "missing"); 3828 3829 if (reachable(src)) { 3830 vxorps(dst, nds, as_Address(src), vector_len); 3831 } else { 3832 lea(rscratch, src); 3833 vxorps(dst, nds, Address(rscratch, 0), vector_len); 3834 } 3835 } 3836 3837 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3838 assert(rscratch != noreg || always_reachable(src), "missing"); 3839 3840 if (UseAVX > 1 || (vector_len < 1)) { 3841 if (reachable(src)) { 3842 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 3843 } else { 3844 lea(rscratch, src); 3845 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 3846 } 3847 } else { 3848 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 3849 } 3850 } 3851 3852 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3853 assert(rscratch != noreg || always_reachable(src), "missing"); 3854 3855 if (reachable(src)) { 3856 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 3857 } else { 3858 lea(rscratch, src); 3859 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 3860 } 3861 } 3862 3863 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 3864 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 3865 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 3866 // The inverted mask is sign-extended 3867 andptr(possibly_non_local, inverted_mask); 3868 } 3869 3870 void MacroAssembler::resolve_jobject(Register value, 3871 Register thread, 3872 Register tmp) { 3873 assert_different_registers(value, thread, tmp); 3874 Label done, tagged, weak_tagged; 3875 testptr(value, value); 3876 jcc(Assembler::zero, done); // Use null as-is. 3877 testptr(value, JNIHandles::tag_mask); // Test for tag. 3878 jcc(Assembler::notZero, tagged); 3879 3880 // Resolve local handle 3881 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 3882 verify_oop(value); 3883 jmp(done); 3884 3885 bind(tagged); 3886 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 3887 jcc(Assembler::notZero, weak_tagged); 3888 3889 // Resolve global handle 3890 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 3891 verify_oop(value); 3892 jmp(done); 3893 3894 bind(weak_tagged); 3895 // Resolve jweak. 3896 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3897 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 3898 verify_oop(value); 3899 3900 bind(done); 3901 } 3902 3903 void MacroAssembler::resolve_global_jobject(Register value, 3904 Register thread, 3905 Register tmp) { 3906 assert_different_registers(value, thread, tmp); 3907 Label done; 3908 3909 testptr(value, value); 3910 jcc(Assembler::zero, done); // Use null as-is. 3911 3912 #ifdef ASSERT 3913 { 3914 Label valid_global_tag; 3915 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 3916 jcc(Assembler::notZero, valid_global_tag); 3917 stop("non global jobject using resolve_global_jobject"); 3918 bind(valid_global_tag); 3919 } 3920 #endif 3921 3922 // Resolve global handle 3923 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 3924 verify_oop(value); 3925 3926 bind(done); 3927 } 3928 3929 void MacroAssembler::subptr(Register dst, int32_t imm32) { 3930 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 3931 } 3932 3933 // Force generation of a 4 byte immediate value even if it fits into 8bit 3934 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 3935 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 3936 } 3937 3938 void MacroAssembler::subptr(Register dst, Register src) { 3939 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 3940 } 3941 3942 // C++ bool manipulation 3943 void MacroAssembler::testbool(Register dst) { 3944 if(sizeof(bool) == 1) 3945 testb(dst, 0xff); 3946 else if(sizeof(bool) == 2) { 3947 // testw implementation needed for two byte bools 3948 ShouldNotReachHere(); 3949 } else if(sizeof(bool) == 4) 3950 testl(dst, dst); 3951 else 3952 // unsupported 3953 ShouldNotReachHere(); 3954 } 3955 3956 void MacroAssembler::testptr(Register dst, Register src) { 3957 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 3958 } 3959 3960 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 3961 void MacroAssembler::tlab_allocate(Register thread, Register obj, 3962 Register var_size_in_bytes, 3963 int con_size_in_bytes, 3964 Register t1, 3965 Register t2, 3966 Label& slow_case) { 3967 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3968 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 3969 } 3970 3971 RegSet MacroAssembler::call_clobbered_gp_registers() { 3972 RegSet regs; 3973 #ifdef _LP64 3974 regs += RegSet::of(rax, rcx, rdx); 3975 #ifndef WINDOWS 3976 regs += RegSet::of(rsi, rdi); 3977 #endif 3978 regs += RegSet::range(r8, r11); 3979 #else 3980 regs += RegSet::of(rax, rcx, rdx); 3981 #endif 3982 return regs; 3983 } 3984 3985 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 3986 int num_xmm_registers = XMMRegister::available_xmm_registers(); 3987 #if defined(WINDOWS) && defined(_LP64) 3988 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 3989 if (num_xmm_registers > 16) { 3990 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 3991 } 3992 return result; 3993 #else 3994 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 3995 #endif 3996 } 3997 3998 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 3999 4000 #ifndef _LP64 4001 static bool use_x87_registers() { return UseSSE < 2; } 4002 #endif 4003 static bool use_xmm_registers() { return UseSSE >= 1; } 4004 4005 // C1 only ever uses the first double/float of the XMM register. 4006 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4007 4008 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4009 if (UseSSE == 1) { 4010 masm->movflt(Address(rsp, offset), reg); 4011 } else { 4012 masm->movdbl(Address(rsp, offset), reg); 4013 } 4014 } 4015 4016 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4017 if (UseSSE == 1) { 4018 masm->movflt(reg, Address(rsp, offset)); 4019 } else { 4020 masm->movdbl(reg, Address(rsp, offset)); 4021 } 4022 } 4023 4024 int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu, 4025 int& gp_area_size, int& fp_area_size, int& xmm_area_size) { 4026 4027 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4028 StackAlignmentInBytes); 4029 #ifdef _LP64 4030 fp_area_size = 0; 4031 #else 4032 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4033 #endif 4034 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4035 4036 return gp_area_size + fp_area_size + xmm_area_size; 4037 } 4038 4039 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4040 block_comment("push_call_clobbered_registers start"); 4041 // Regular registers 4042 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4043 4044 int gp_area_size; 4045 int fp_area_size; 4046 int xmm_area_size; 4047 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4048 gp_area_size, fp_area_size, xmm_area_size); 4049 subptr(rsp, total_save_size); 4050 4051 push_set(gp_registers_to_push, 0); 4052 4053 #ifndef _LP64 4054 if (save_fpu && use_x87_registers()) { 4055 fnsave(Address(rsp, gp_area_size)); 4056 fwait(); 4057 } 4058 #endif 4059 if (save_fpu && use_xmm_registers()) { 4060 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4061 } 4062 4063 block_comment("push_call_clobbered_registers end"); 4064 } 4065 4066 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4067 block_comment("pop_call_clobbered_registers start"); 4068 4069 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4070 4071 int gp_area_size; 4072 int fp_area_size; 4073 int xmm_area_size; 4074 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4075 gp_area_size, fp_area_size, xmm_area_size); 4076 4077 if (restore_fpu && use_xmm_registers()) { 4078 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4079 } 4080 #ifndef _LP64 4081 if (restore_fpu && use_x87_registers()) { 4082 frstor(Address(rsp, gp_area_size)); 4083 } 4084 #endif 4085 4086 pop_set(gp_registers_to_pop, 0); 4087 4088 addptr(rsp, total_save_size); 4089 4090 vzeroupper(); 4091 4092 block_comment("pop_call_clobbered_registers end"); 4093 } 4094 4095 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4096 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4097 int spill_offset = offset; 4098 4099 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4100 save_xmm_register(this, spill_offset, *it); 4101 spill_offset += xmm_save_size(); 4102 } 4103 } 4104 4105 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4106 int restore_size = set.size() * xmm_save_size(); 4107 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4108 4109 int restore_offset = offset + restore_size - xmm_save_size(); 4110 4111 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4112 restore_xmm_register(this, restore_offset, *it); 4113 restore_offset -= xmm_save_size(); 4114 } 4115 } 4116 4117 void MacroAssembler::push_set(RegSet set, int offset) { 4118 int spill_offset; 4119 if (offset == -1) { 4120 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4121 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4122 subptr(rsp, aligned_size); 4123 spill_offset = 0; 4124 } else { 4125 spill_offset = offset; 4126 } 4127 4128 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4129 movptr(Address(rsp, spill_offset), *it); 4130 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4131 } 4132 } 4133 4134 void MacroAssembler::pop_set(RegSet set, int offset) { 4135 4136 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4137 int restore_size = set.size() * gp_reg_size; 4138 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4139 4140 int restore_offset; 4141 if (offset == -1) { 4142 restore_offset = restore_size - gp_reg_size; 4143 } else { 4144 restore_offset = offset + restore_size - gp_reg_size; 4145 } 4146 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4147 movptr(*it, Address(rsp, restore_offset)); 4148 restore_offset -= gp_reg_size; 4149 } 4150 4151 if (offset == -1) { 4152 addptr(rsp, aligned_size); 4153 } 4154 } 4155 4156 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4157 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4158 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4159 assert((offset_in_bytes & (BytesPerInt - 1)) == 0, "offset must be a multiple of BytesPerInt"); 4160 Label done; 4161 4162 testptr(length_in_bytes, length_in_bytes); 4163 jcc(Assembler::zero, done); 4164 4165 // Emit single 32bit store to clear leading bytes, if necessary. 4166 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4167 #ifdef _LP64 4168 if (!is_aligned(offset_in_bytes, BytesPerWord)) { 4169 movl(Address(address, offset_in_bytes), temp); 4170 offset_in_bytes += BytesPerInt; 4171 decrement(length_in_bytes, BytesPerInt); 4172 } 4173 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4174 testptr(length_in_bytes, length_in_bytes); 4175 jcc(Assembler::zero, done); 4176 #endif 4177 4178 // initialize topmost word, divide index by 2, check if odd and test if zero 4179 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4180 #ifdef ASSERT 4181 { 4182 Label L; 4183 testptr(length_in_bytes, BytesPerWord - 1); 4184 jcc(Assembler::zero, L); 4185 stop("length must be a multiple of BytesPerWord"); 4186 bind(L); 4187 } 4188 #endif 4189 Register index = length_in_bytes; 4190 if (UseIncDec) { 4191 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4192 } else { 4193 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4194 shrptr(index, 1); 4195 } 4196 #ifndef _LP64 4197 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4198 { 4199 Label even; 4200 // note: if index was a multiple of 8, then it cannot 4201 // be 0 now otherwise it must have been 0 before 4202 // => if it is even, we don't need to check for 0 again 4203 jcc(Assembler::carryClear, even); 4204 // clear topmost word (no jump would be needed if conditional assignment worked here) 4205 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4206 // index could be 0 now, must check again 4207 jcc(Assembler::zero, done); 4208 bind(even); 4209 } 4210 #endif // !_LP64 4211 // initialize remaining object fields: index is a multiple of 2 now 4212 { 4213 Label loop; 4214 bind(loop); 4215 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4216 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4217 decrement(index); 4218 jcc(Assembler::notZero, loop); 4219 } 4220 4221 bind(done); 4222 } 4223 4224 // Look up the method for a megamorphic invokeinterface call. 4225 // The target method is determined by <intf_klass, itable_index>. 4226 // The receiver klass is in recv_klass. 4227 // On success, the result will be in method_result, and execution falls through. 4228 // On failure, execution transfers to the given label. 4229 void MacroAssembler::lookup_interface_method(Register recv_klass, 4230 Register intf_klass, 4231 RegisterOrConstant itable_index, 4232 Register method_result, 4233 Register scan_temp, 4234 Label& L_no_such_interface, 4235 bool return_method) { 4236 assert_different_registers(recv_klass, intf_klass, scan_temp); 4237 assert_different_registers(method_result, intf_klass, scan_temp); 4238 assert(recv_klass != method_result || !return_method, 4239 "recv_klass can be destroyed when method isn't needed"); 4240 4241 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4242 "caller must use same register for non-constant itable index as for method"); 4243 4244 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4245 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4246 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4247 int scan_step = itableOffsetEntry::size() * wordSize; 4248 int vte_size = vtableEntry::size_in_bytes(); 4249 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4250 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4251 4252 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4253 4254 // %%% Could store the aligned, prescaled offset in the klassoop. 4255 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4256 4257 if (return_method) { 4258 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4259 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4260 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4261 } 4262 4263 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4264 // if (scan->interface() == intf) { 4265 // result = (klass + scan->offset() + itable_index); 4266 // } 4267 // } 4268 Label search, found_method; 4269 4270 for (int peel = 1; peel >= 0; peel--) { 4271 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4272 cmpptr(intf_klass, method_result); 4273 4274 if (peel) { 4275 jccb(Assembler::equal, found_method); 4276 } else { 4277 jccb(Assembler::notEqual, search); 4278 // (invert the test to fall through to found_method...) 4279 } 4280 4281 if (!peel) break; 4282 4283 bind(search); 4284 4285 // Check that the previous entry is non-null. A null entry means that 4286 // the receiver class doesn't implement the interface, and wasn't the 4287 // same as when the caller was compiled. 4288 testptr(method_result, method_result); 4289 jcc(Assembler::zero, L_no_such_interface); 4290 addptr(scan_temp, scan_step); 4291 } 4292 4293 bind(found_method); 4294 4295 if (return_method) { 4296 // Got a hit. 4297 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4298 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4299 } 4300 } 4301 4302 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4303 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder 4304 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4305 // The target method is determined by <holder_klass, itable_index>. 4306 // The receiver klass is in recv_klass. 4307 // On success, the result will be in method_result, and execution falls through. 4308 // On failure, execution transfers to the given label. 4309 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4310 Register holder_klass, 4311 Register resolved_klass, 4312 Register method_result, 4313 Register scan_temp, 4314 Register temp_reg2, 4315 Register receiver, 4316 int itable_index, 4317 Label& L_no_such_interface) { 4318 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4319 Register temp_itbl_klass = method_result; 4320 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4321 4322 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4323 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4324 int scan_step = itableOffsetEntry::size() * wordSize; 4325 int vte_size = vtableEntry::size_in_bytes(); 4326 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4327 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4328 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4329 assert(vte_size == wordSize, "adjust times_vte_scale"); 4330 4331 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4332 4333 // temp_itbl_klass = recv_klass.itable[0] 4334 // scan_temp = &recv_klass.itable[0] + step 4335 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4336 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4337 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4338 xorptr(temp_reg, temp_reg); 4339 4340 // Initial checks: 4341 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4342 // - if (itable[0] == 0), no such interface 4343 // - if (itable[0] == holder_klass), shortcut to "holder found" 4344 cmpptr(holder_klass, resolved_klass); 4345 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4346 testptr(temp_itbl_klass, temp_itbl_klass); 4347 jccb(Assembler::zero, L_no_such_interface); 4348 cmpptr(holder_klass, temp_itbl_klass); 4349 jccb(Assembler::equal, L_holder_found); 4350 4351 // Loop: Look for holder_klass record in itable 4352 // do { 4353 // tmp = itable[index]; 4354 // index += step; 4355 // if (tmp == holder_klass) { 4356 // goto L_holder_found; // Found! 4357 // } 4358 // } while (tmp != 0); 4359 // goto L_no_such_interface // Not found. 4360 Label L_scan_holder; 4361 bind(L_scan_holder); 4362 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4363 addptr(scan_temp, scan_step); 4364 cmpptr(holder_klass, temp_itbl_klass); 4365 jccb(Assembler::equal, L_holder_found); 4366 testptr(temp_itbl_klass, temp_itbl_klass); 4367 jccb(Assembler::notZero, L_scan_holder); 4368 4369 jmpb(L_no_such_interface); 4370 4371 // Loop: Look for resolved_class record in itable 4372 // do { 4373 // tmp = itable[index]; 4374 // index += step; 4375 // if (tmp == holder_klass) { 4376 // // Also check if we have met a holder klass 4377 // holder_tmp = itable[index-step-ioffset]; 4378 // } 4379 // if (tmp == resolved_klass) { 4380 // goto L_resolved_found; // Found! 4381 // } 4382 // } while (tmp != 0); 4383 // goto L_no_such_interface // Not found. 4384 // 4385 Label L_loop_scan_resolved; 4386 bind(L_loop_scan_resolved); 4387 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4388 addptr(scan_temp, scan_step); 4389 bind(L_loop_scan_resolved_entry); 4390 cmpptr(holder_klass, temp_itbl_klass); 4391 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4392 cmpptr(resolved_klass, temp_itbl_klass); 4393 jccb(Assembler::equal, L_resolved_found); 4394 testptr(temp_itbl_klass, temp_itbl_klass); 4395 jccb(Assembler::notZero, L_loop_scan_resolved); 4396 4397 jmpb(L_no_such_interface); 4398 4399 Label L_ready; 4400 4401 // See if we already have a holder klass. If not, go and scan for it. 4402 bind(L_resolved_found); 4403 testptr(temp_reg, temp_reg); 4404 jccb(Assembler::zero, L_scan_holder); 4405 jmpb(L_ready); 4406 4407 bind(L_holder_found); 4408 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4409 4410 // Finally, temp_reg contains holder_klass vtable offset 4411 bind(L_ready); 4412 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4413 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4414 load_klass(scan_temp, receiver, noreg); 4415 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4416 } else { 4417 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4418 } 4419 } 4420 4421 4422 // virtual method calling 4423 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4424 RegisterOrConstant vtable_index, 4425 Register method_result) { 4426 const ByteSize base = Klass::vtable_start_offset(); 4427 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4428 Address vtable_entry_addr(recv_klass, 4429 vtable_index, Address::times_ptr, 4430 base + vtableEntry::method_offset()); 4431 movptr(method_result, vtable_entry_addr); 4432 } 4433 4434 4435 void MacroAssembler::check_klass_subtype(Register sub_klass, 4436 Register super_klass, 4437 Register temp_reg, 4438 Label& L_success) { 4439 Label L_failure; 4440 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4441 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4442 bind(L_failure); 4443 } 4444 4445 4446 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4447 Register super_klass, 4448 Register temp_reg, 4449 Label* L_success, 4450 Label* L_failure, 4451 Label* L_slow_path, 4452 RegisterOrConstant super_check_offset) { 4453 assert_different_registers(sub_klass, super_klass, temp_reg); 4454 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4455 if (super_check_offset.is_register()) { 4456 assert_different_registers(sub_klass, super_klass, 4457 super_check_offset.as_register()); 4458 } else if (must_load_sco) { 4459 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4460 } 4461 4462 Label L_fallthrough; 4463 int label_nulls = 0; 4464 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4465 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4466 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4467 assert(label_nulls <= 1, "at most one null in the batch"); 4468 4469 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4470 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4471 Address super_check_offset_addr(super_klass, sco_offset); 4472 4473 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4474 // range of a jccb. If this routine grows larger, reconsider at 4475 // least some of these. 4476 #define local_jcc(assembler_cond, label) \ 4477 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4478 else jcc( assembler_cond, label) /*omit semi*/ 4479 4480 // Hacked jmp, which may only be used just before L_fallthrough. 4481 #define final_jmp(label) \ 4482 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4483 else jmp(label) /*omit semi*/ 4484 4485 // If the pointers are equal, we are done (e.g., String[] elements). 4486 // This self-check enables sharing of secondary supertype arrays among 4487 // non-primary types such as array-of-interface. Otherwise, each such 4488 // type would need its own customized SSA. 4489 // We move this check to the front of the fast path because many 4490 // type checks are in fact trivially successful in this manner, 4491 // so we get a nicely predicted branch right at the start of the check. 4492 cmpptr(sub_klass, super_klass); 4493 local_jcc(Assembler::equal, *L_success); 4494 4495 // Check the supertype display: 4496 if (must_load_sco) { 4497 // Positive movl does right thing on LP64. 4498 movl(temp_reg, super_check_offset_addr); 4499 super_check_offset = RegisterOrConstant(temp_reg); 4500 } 4501 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4502 cmpptr(super_klass, super_check_addr); // load displayed supertype 4503 4504 // This check has worked decisively for primary supers. 4505 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4506 // (Secondary supers are interfaces and very deeply nested subtypes.) 4507 // This works in the same check above because of a tricky aliasing 4508 // between the super_cache and the primary super display elements. 4509 // (The 'super_check_addr' can address either, as the case requires.) 4510 // Note that the cache is updated below if it does not help us find 4511 // what we need immediately. 4512 // So if it was a primary super, we can just fail immediately. 4513 // Otherwise, it's the slow path for us (no success at this point). 4514 4515 if (super_check_offset.is_register()) { 4516 local_jcc(Assembler::equal, *L_success); 4517 cmpl(super_check_offset.as_register(), sc_offset); 4518 if (L_failure == &L_fallthrough) { 4519 local_jcc(Assembler::equal, *L_slow_path); 4520 } else { 4521 local_jcc(Assembler::notEqual, *L_failure); 4522 final_jmp(*L_slow_path); 4523 } 4524 } else if (super_check_offset.as_constant() == sc_offset) { 4525 // Need a slow path; fast failure is impossible. 4526 if (L_slow_path == &L_fallthrough) { 4527 local_jcc(Assembler::equal, *L_success); 4528 } else { 4529 local_jcc(Assembler::notEqual, *L_slow_path); 4530 final_jmp(*L_success); 4531 } 4532 } else { 4533 // No slow path; it's a fast decision. 4534 if (L_failure == &L_fallthrough) { 4535 local_jcc(Assembler::equal, *L_success); 4536 } else { 4537 local_jcc(Assembler::notEqual, *L_failure); 4538 final_jmp(*L_success); 4539 } 4540 } 4541 4542 bind(L_fallthrough); 4543 4544 #undef local_jcc 4545 #undef final_jmp 4546 } 4547 4548 4549 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4550 Register super_klass, 4551 Register temp_reg, 4552 Register temp2_reg, 4553 Label* L_success, 4554 Label* L_failure, 4555 bool set_cond_codes) { 4556 assert_different_registers(sub_klass, super_klass, temp_reg); 4557 if (temp2_reg != noreg) 4558 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4559 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4560 4561 Label L_fallthrough; 4562 int label_nulls = 0; 4563 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4564 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4565 assert(label_nulls <= 1, "at most one null in the batch"); 4566 4567 // a couple of useful fields in sub_klass: 4568 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4569 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4570 Address secondary_supers_addr(sub_klass, ss_offset); 4571 Address super_cache_addr( sub_klass, sc_offset); 4572 4573 // Do a linear scan of the secondary super-klass chain. 4574 // This code is rarely used, so simplicity is a virtue here. 4575 // The repne_scan instruction uses fixed registers, which we must spill. 4576 // Don't worry too much about pre-existing connections with the input regs. 4577 4578 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4579 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4580 4581 // Get super_klass value into rax (even if it was in rdi or rcx). 4582 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4583 if (super_klass != rax) { 4584 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4585 mov(rax, super_klass); 4586 } 4587 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4588 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4589 4590 #ifndef PRODUCT 4591 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4592 ExternalAddress pst_counter_addr((address) pst_counter); 4593 NOT_LP64( incrementl(pst_counter_addr) ); 4594 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4595 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4596 #endif //PRODUCT 4597 4598 // We will consult the secondary-super array. 4599 movptr(rdi, secondary_supers_addr); 4600 // Load the array length. (Positive movl does right thing on LP64.) 4601 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4602 // Skip to start of data. 4603 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4604 4605 // Scan RCX words at [RDI] for an occurrence of RAX. 4606 // Set NZ/Z based on last compare. 4607 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4608 // not change flags (only scas instruction which is repeated sets flags). 4609 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4610 4611 testptr(rax,rax); // Set Z = 0 4612 repne_scan(); 4613 4614 // Unspill the temp. registers: 4615 if (pushed_rdi) pop(rdi); 4616 if (pushed_rcx) pop(rcx); 4617 if (pushed_rax) pop(rax); 4618 4619 if (set_cond_codes) { 4620 // Special hack for the AD files: rdi is guaranteed non-zero. 4621 assert(!pushed_rdi, "rdi must be left non-null"); 4622 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4623 } 4624 4625 if (L_failure == &L_fallthrough) 4626 jccb(Assembler::notEqual, *L_failure); 4627 else jcc(Assembler::notEqual, *L_failure); 4628 4629 // Success. Cache the super we found and proceed in triumph. 4630 movptr(super_cache_addr, super_klass); 4631 4632 if (L_success != &L_fallthrough) { 4633 jmp(*L_success); 4634 } 4635 4636 #undef IS_A_TEMP 4637 4638 bind(L_fallthrough); 4639 } 4640 4641 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 4642 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 4643 4644 Label L_fallthrough; 4645 if (L_fast_path == nullptr) { 4646 L_fast_path = &L_fallthrough; 4647 } else if (L_slow_path == nullptr) { 4648 L_slow_path = &L_fallthrough; 4649 } 4650 4651 // Fast path check: class is fully initialized 4652 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 4653 jcc(Assembler::equal, *L_fast_path); 4654 4655 // Fast path check: current thread is initializer thread 4656 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 4657 if (L_slow_path == &L_fallthrough) { 4658 jcc(Assembler::equal, *L_fast_path); 4659 bind(*L_slow_path); 4660 } else if (L_fast_path == &L_fallthrough) { 4661 jcc(Assembler::notEqual, *L_slow_path); 4662 bind(*L_fast_path); 4663 } else { 4664 Unimplemented(); 4665 } 4666 } 4667 4668 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 4669 if (VM_Version::supports_cmov()) { 4670 cmovl(cc, dst, src); 4671 } else { 4672 Label L; 4673 jccb(negate_condition(cc), L); 4674 movl(dst, src); 4675 bind(L); 4676 } 4677 } 4678 4679 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 4680 if (VM_Version::supports_cmov()) { 4681 cmovl(cc, dst, src); 4682 } else { 4683 Label L; 4684 jccb(negate_condition(cc), L); 4685 movl(dst, src); 4686 bind(L); 4687 } 4688 } 4689 4690 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 4691 if (!VerifyOops) return; 4692 4693 BLOCK_COMMENT("verify_oop {"); 4694 #ifdef _LP64 4695 push(rscratch1); 4696 #endif 4697 push(rax); // save rax 4698 push(reg); // pass register argument 4699 4700 // Pass register number to verify_oop_subroutine 4701 const char* b = nullptr; 4702 { 4703 ResourceMark rm; 4704 stringStream ss; 4705 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 4706 b = code_string(ss.as_string()); 4707 } 4708 ExternalAddress buffer((address) b); 4709 pushptr(buffer.addr(), rscratch1); 4710 4711 // call indirectly to solve generation ordering problem 4712 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4713 call(rax); 4714 // Caller pops the arguments (oop, message) and restores rax, r10 4715 BLOCK_COMMENT("} verify_oop"); 4716 } 4717 4718 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 4719 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 4720 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 4721 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 4722 vpternlogd(dst, 0xFF, dst, dst, vector_len); 4723 } else if (VM_Version::supports_avx()) { 4724 vpcmpeqd(dst, dst, dst, vector_len); 4725 } else { 4726 assert(VM_Version::supports_sse2(), ""); 4727 pcmpeqd(dst, dst); 4728 } 4729 } 4730 4731 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 4732 int extra_slot_offset) { 4733 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 4734 int stackElementSize = Interpreter::stackElementSize; 4735 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 4736 #ifdef ASSERT 4737 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 4738 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 4739 #endif 4740 Register scale_reg = noreg; 4741 Address::ScaleFactor scale_factor = Address::no_scale; 4742 if (arg_slot.is_constant()) { 4743 offset += arg_slot.as_constant() * stackElementSize; 4744 } else { 4745 scale_reg = arg_slot.as_register(); 4746 scale_factor = Address::times(stackElementSize); 4747 } 4748 offset += wordSize; // return PC is on stack 4749 return Address(rsp, scale_reg, scale_factor, offset); 4750 } 4751 4752 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 4753 if (!VerifyOops) return; 4754 4755 #ifdef _LP64 4756 push(rscratch1); 4757 #endif 4758 push(rax); // save rax, 4759 // addr may contain rsp so we will have to adjust it based on the push 4760 // we just did (and on 64 bit we do two pushes) 4761 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 4762 // stores rax into addr which is backwards of what was intended. 4763 if (addr.uses(rsp)) { 4764 lea(rax, addr); 4765 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 4766 } else { 4767 pushptr(addr); 4768 } 4769 4770 // Pass register number to verify_oop_subroutine 4771 const char* b = nullptr; 4772 { 4773 ResourceMark rm; 4774 stringStream ss; 4775 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 4776 b = code_string(ss.as_string()); 4777 } 4778 ExternalAddress buffer((address) b); 4779 pushptr(buffer.addr(), rscratch1); 4780 4781 // call indirectly to solve generation ordering problem 4782 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4783 call(rax); 4784 // Caller pops the arguments (addr, message) and restores rax, r10. 4785 } 4786 4787 void MacroAssembler::verify_tlab() { 4788 #ifdef ASSERT 4789 if (UseTLAB && VerifyOops) { 4790 Label next, ok; 4791 Register t1 = rsi; 4792 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 4793 4794 push(t1); 4795 NOT_LP64(push(thread_reg)); 4796 NOT_LP64(get_thread(thread_reg)); 4797 4798 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 4799 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 4800 jcc(Assembler::aboveEqual, next); 4801 STOP("assert(top >= start)"); 4802 should_not_reach_here(); 4803 4804 bind(next); 4805 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 4806 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 4807 jcc(Assembler::aboveEqual, ok); 4808 STOP("assert(top <= end)"); 4809 should_not_reach_here(); 4810 4811 bind(ok); 4812 NOT_LP64(pop(thread_reg)); 4813 pop(t1); 4814 } 4815 #endif 4816 } 4817 4818 class ControlWord { 4819 public: 4820 int32_t _value; 4821 4822 int rounding_control() const { return (_value >> 10) & 3 ; } 4823 int precision_control() const { return (_value >> 8) & 3 ; } 4824 bool precision() const { return ((_value >> 5) & 1) != 0; } 4825 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4826 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4827 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4828 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4829 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4830 4831 void print() const { 4832 // rounding control 4833 const char* rc; 4834 switch (rounding_control()) { 4835 case 0: rc = "round near"; break; 4836 case 1: rc = "round down"; break; 4837 case 2: rc = "round up "; break; 4838 case 3: rc = "chop "; break; 4839 default: 4840 rc = nullptr; // silence compiler warnings 4841 fatal("Unknown rounding control: %d", rounding_control()); 4842 }; 4843 // precision control 4844 const char* pc; 4845 switch (precision_control()) { 4846 case 0: pc = "24 bits "; break; 4847 case 1: pc = "reserved"; break; 4848 case 2: pc = "53 bits "; break; 4849 case 3: pc = "64 bits "; break; 4850 default: 4851 pc = nullptr; // silence compiler warnings 4852 fatal("Unknown precision control: %d", precision_control()); 4853 }; 4854 // flags 4855 char f[9]; 4856 f[0] = ' '; 4857 f[1] = ' '; 4858 f[2] = (precision ()) ? 'P' : 'p'; 4859 f[3] = (underflow ()) ? 'U' : 'u'; 4860 f[4] = (overflow ()) ? 'O' : 'o'; 4861 f[5] = (zero_divide ()) ? 'Z' : 'z'; 4862 f[6] = (denormalized()) ? 'D' : 'd'; 4863 f[7] = (invalid ()) ? 'I' : 'i'; 4864 f[8] = '\x0'; 4865 // output 4866 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 4867 } 4868 4869 }; 4870 4871 class StatusWord { 4872 public: 4873 int32_t _value; 4874 4875 bool busy() const { return ((_value >> 15) & 1) != 0; } 4876 bool C3() const { return ((_value >> 14) & 1) != 0; } 4877 bool C2() const { return ((_value >> 10) & 1) != 0; } 4878 bool C1() const { return ((_value >> 9) & 1) != 0; } 4879 bool C0() const { return ((_value >> 8) & 1) != 0; } 4880 int top() const { return (_value >> 11) & 7 ; } 4881 bool error_status() const { return ((_value >> 7) & 1) != 0; } 4882 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 4883 bool precision() const { return ((_value >> 5) & 1) != 0; } 4884 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4885 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4886 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4887 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4888 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4889 4890 void print() const { 4891 // condition codes 4892 char c[5]; 4893 c[0] = (C3()) ? '3' : '-'; 4894 c[1] = (C2()) ? '2' : '-'; 4895 c[2] = (C1()) ? '1' : '-'; 4896 c[3] = (C0()) ? '0' : '-'; 4897 c[4] = '\x0'; 4898 // flags 4899 char f[9]; 4900 f[0] = (error_status()) ? 'E' : '-'; 4901 f[1] = (stack_fault ()) ? 'S' : '-'; 4902 f[2] = (precision ()) ? 'P' : '-'; 4903 f[3] = (underflow ()) ? 'U' : '-'; 4904 f[4] = (overflow ()) ? 'O' : '-'; 4905 f[5] = (zero_divide ()) ? 'Z' : '-'; 4906 f[6] = (denormalized()) ? 'D' : '-'; 4907 f[7] = (invalid ()) ? 'I' : '-'; 4908 f[8] = '\x0'; 4909 // output 4910 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 4911 } 4912 4913 }; 4914 4915 class TagWord { 4916 public: 4917 int32_t _value; 4918 4919 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 4920 4921 void print() const { 4922 printf("%04x", _value & 0xFFFF); 4923 } 4924 4925 }; 4926 4927 class FPU_Register { 4928 public: 4929 int32_t _m0; 4930 int32_t _m1; 4931 int16_t _ex; 4932 4933 bool is_indefinite() const { 4934 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 4935 } 4936 4937 void print() const { 4938 char sign = (_ex < 0) ? '-' : '+'; 4939 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 4940 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 4941 }; 4942 4943 }; 4944 4945 class FPU_State { 4946 public: 4947 enum { 4948 register_size = 10, 4949 number_of_registers = 8, 4950 register_mask = 7 4951 }; 4952 4953 ControlWord _control_word; 4954 StatusWord _status_word; 4955 TagWord _tag_word; 4956 int32_t _error_offset; 4957 int32_t _error_selector; 4958 int32_t _data_offset; 4959 int32_t _data_selector; 4960 int8_t _register[register_size * number_of_registers]; 4961 4962 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 4963 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 4964 4965 const char* tag_as_string(int tag) const { 4966 switch (tag) { 4967 case 0: return "valid"; 4968 case 1: return "zero"; 4969 case 2: return "special"; 4970 case 3: return "empty"; 4971 } 4972 ShouldNotReachHere(); 4973 return nullptr; 4974 } 4975 4976 void print() const { 4977 // print computation registers 4978 { int t = _status_word.top(); 4979 for (int i = 0; i < number_of_registers; i++) { 4980 int j = (i - t) & register_mask; 4981 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 4982 st(j)->print(); 4983 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 4984 } 4985 } 4986 printf("\n"); 4987 // print control registers 4988 printf("ctrl = "); _control_word.print(); printf("\n"); 4989 printf("stat = "); _status_word .print(); printf("\n"); 4990 printf("tags = "); _tag_word .print(); printf("\n"); 4991 } 4992 4993 }; 4994 4995 class Flag_Register { 4996 public: 4997 int32_t _value; 4998 4999 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5000 bool direction() const { return ((_value >> 10) & 1) != 0; } 5001 bool sign() const { return ((_value >> 7) & 1) != 0; } 5002 bool zero() const { return ((_value >> 6) & 1) != 0; } 5003 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5004 bool parity() const { return ((_value >> 2) & 1) != 0; } 5005 bool carry() const { return ((_value >> 0) & 1) != 0; } 5006 5007 void print() const { 5008 // flags 5009 char f[8]; 5010 f[0] = (overflow ()) ? 'O' : '-'; 5011 f[1] = (direction ()) ? 'D' : '-'; 5012 f[2] = (sign ()) ? 'S' : '-'; 5013 f[3] = (zero ()) ? 'Z' : '-'; 5014 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5015 f[5] = (parity ()) ? 'P' : '-'; 5016 f[6] = (carry ()) ? 'C' : '-'; 5017 f[7] = '\x0'; 5018 // output 5019 printf("%08x flags = %s", _value, f); 5020 } 5021 5022 }; 5023 5024 class IU_Register { 5025 public: 5026 int32_t _value; 5027 5028 void print() const { 5029 printf("%08x %11d", _value, _value); 5030 } 5031 5032 }; 5033 5034 class IU_State { 5035 public: 5036 Flag_Register _eflags; 5037 IU_Register _rdi; 5038 IU_Register _rsi; 5039 IU_Register _rbp; 5040 IU_Register _rsp; 5041 IU_Register _rbx; 5042 IU_Register _rdx; 5043 IU_Register _rcx; 5044 IU_Register _rax; 5045 5046 void print() const { 5047 // computation registers 5048 printf("rax, = "); _rax.print(); printf("\n"); 5049 printf("rbx, = "); _rbx.print(); printf("\n"); 5050 printf("rcx = "); _rcx.print(); printf("\n"); 5051 printf("rdx = "); _rdx.print(); printf("\n"); 5052 printf("rdi = "); _rdi.print(); printf("\n"); 5053 printf("rsi = "); _rsi.print(); printf("\n"); 5054 printf("rbp, = "); _rbp.print(); printf("\n"); 5055 printf("rsp = "); _rsp.print(); printf("\n"); 5056 printf("\n"); 5057 // control registers 5058 printf("flgs = "); _eflags.print(); printf("\n"); 5059 } 5060 }; 5061 5062 5063 class CPU_State { 5064 public: 5065 FPU_State _fpu_state; 5066 IU_State _iu_state; 5067 5068 void print() const { 5069 printf("--------------------------------------------------\n"); 5070 _iu_state .print(); 5071 printf("\n"); 5072 _fpu_state.print(); 5073 printf("--------------------------------------------------\n"); 5074 } 5075 5076 }; 5077 5078 5079 static void _print_CPU_state(CPU_State* state) { 5080 state->print(); 5081 }; 5082 5083 5084 void MacroAssembler::print_CPU_state() { 5085 push_CPU_state(); 5086 push(rsp); // pass CPU state 5087 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5088 addptr(rsp, wordSize); // discard argument 5089 pop_CPU_state(); 5090 } 5091 5092 5093 #ifndef _LP64 5094 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5095 static int counter = 0; 5096 FPU_State* fs = &state->_fpu_state; 5097 counter++; 5098 // For leaf calls, only verify that the top few elements remain empty. 5099 // We only need 1 empty at the top for C2 code. 5100 if( stack_depth < 0 ) { 5101 if( fs->tag_for_st(7) != 3 ) { 5102 printf("FPR7 not empty\n"); 5103 state->print(); 5104 assert(false, "error"); 5105 return false; 5106 } 5107 return true; // All other stack states do not matter 5108 } 5109 5110 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 5111 "bad FPU control word"); 5112 5113 // compute stack depth 5114 int i = 0; 5115 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5116 int d = i; 5117 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5118 // verify findings 5119 if (i != FPU_State::number_of_registers) { 5120 // stack not contiguous 5121 printf("%s: stack not contiguous at ST%d\n", s, i); 5122 state->print(); 5123 assert(false, "error"); 5124 return false; 5125 } 5126 // check if computed stack depth corresponds to expected stack depth 5127 if (stack_depth < 0) { 5128 // expected stack depth is -stack_depth or less 5129 if (d > -stack_depth) { 5130 // too many elements on the stack 5131 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5132 state->print(); 5133 assert(false, "error"); 5134 return false; 5135 } 5136 } else { 5137 // expected stack depth is stack_depth 5138 if (d != stack_depth) { 5139 // wrong stack depth 5140 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5141 state->print(); 5142 assert(false, "error"); 5143 return false; 5144 } 5145 } 5146 // everything is cool 5147 return true; 5148 } 5149 5150 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5151 if (!VerifyFPU) return; 5152 push_CPU_state(); 5153 push(rsp); // pass CPU state 5154 ExternalAddress msg((address) s); 5155 // pass message string s 5156 pushptr(msg.addr(), noreg); 5157 push(stack_depth); // pass stack depth 5158 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5159 addptr(rsp, 3 * wordSize); // discard arguments 5160 // check for error 5161 { Label L; 5162 testl(rax, rax); 5163 jcc(Assembler::notZero, L); 5164 int3(); // break if error condition 5165 bind(L); 5166 } 5167 pop_CPU_state(); 5168 } 5169 #endif // _LP64 5170 5171 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 5172 // Either restore the MXCSR register after returning from the JNI Call 5173 // or verify that it wasn't changed (with -Xcheck:jni flag). 5174 if (VM_Version::supports_sse()) { 5175 if (RestoreMXCSROnJNICalls) { 5176 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 5177 } else if (CheckJNICalls) { 5178 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5179 } 5180 } 5181 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5182 vzeroupper(); 5183 5184 #ifndef _LP64 5185 // Either restore the x87 floating pointer control word after returning 5186 // from the JNI call or verify that it wasn't changed. 5187 if (CheckJNICalls) { 5188 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5189 } 5190 #endif // _LP64 5191 } 5192 5193 // ((OopHandle)result).resolve(); 5194 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5195 assert_different_registers(result, tmp); 5196 5197 // Only 64 bit platforms support GCs that require a tmp register 5198 // Only IN_HEAP loads require a thread_tmp register 5199 // OopHandle::resolve is an indirection like jobject. 5200 access_load_at(T_OBJECT, IN_NATIVE, 5201 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5202 } 5203 5204 // ((WeakHandle)result).resolve(); 5205 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5206 assert_different_registers(rresult, rtmp); 5207 Label resolved; 5208 5209 // A null weak handle resolves to null. 5210 cmpptr(rresult, 0); 5211 jcc(Assembler::equal, resolved); 5212 5213 // Only 64 bit platforms support GCs that require a tmp register 5214 // Only IN_HEAP loads require a thread_tmp register 5215 // WeakHandle::resolve is an indirection like jweak. 5216 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5217 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5218 bind(resolved); 5219 } 5220 5221 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5222 // get mirror 5223 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5224 load_method_holder(mirror, method); 5225 movptr(mirror, Address(mirror, mirror_offset)); 5226 resolve_oop_handle(mirror, tmp); 5227 } 5228 5229 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5230 load_method_holder(rresult, rmethod); 5231 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5232 } 5233 5234 void MacroAssembler::load_method_holder(Register holder, Register method) { 5235 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5236 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5237 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5238 } 5239 5240 #ifdef _LP64 5241 void MacroAssembler::load_nklass(Register dst, Register src) { 5242 assert(UseCompressedClassPointers, "expect compressed class pointers"); 5243 5244 if (!UseCompactObjectHeaders) { 5245 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5246 return; 5247 } 5248 5249 Label fast; 5250 movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5251 testb(dst, markWord::monitor_value); 5252 jccb(Assembler::zero, fast); 5253 5254 // Fetch displaced header 5255 movq(dst, Address(dst, OM_OFFSET_NO_MONITOR_VALUE_TAG(header))); 5256 5257 bind(fast); 5258 shrq(dst, markWord::klass_shift); 5259 } 5260 #endif 5261 5262 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 5263 assert_different_registers(src, tmp); 5264 assert_different_registers(dst, tmp); 5265 #ifdef _LP64 5266 if (UseCompressedClassPointers) { 5267 load_nklass(dst, src); 5268 decode_klass_not_null(dst, tmp); 5269 } else 5270 #endif 5271 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5272 } 5273 5274 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 5275 assert(!UseCompactObjectHeaders, "not with compact headers"); 5276 assert_different_registers(src, tmp); 5277 assert_different_registers(dst, tmp); 5278 #ifdef _LP64 5279 if (UseCompressedClassPointers) { 5280 encode_klass_not_null(src, tmp); 5281 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5282 } else 5283 #endif 5284 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5285 } 5286 5287 void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { 5288 #ifdef _LP64 5289 if (UseCompactObjectHeaders) { 5290 // NOTE: We need to deal with possible ObjectMonitor in object header. 5291 // Eventually we might be able to do simple movl & cmpl like in 5292 // the CCP path below. 5293 load_nklass(tmp, obj); 5294 cmpl(klass, tmp); 5295 } else if (UseCompressedClassPointers) { 5296 cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5297 } else 5298 #endif 5299 { 5300 cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); 5301 } 5302 } 5303 5304 void MacroAssembler::cmp_klass(Register src, Register dst, Register tmp1, Register tmp2) { 5305 #ifdef _LP64 5306 if (UseCompactObjectHeaders) { 5307 // NOTE: We need to deal with possible ObjectMonitor in object header. 5308 // Eventually we might be able to do simple movl & cmpl like in 5309 // the CCP path below. 5310 assert(tmp2 != noreg, "need tmp2"); 5311 assert_different_registers(src, dst, tmp1, tmp2); 5312 load_nklass(tmp1, src); 5313 load_nklass(tmp2, dst); 5314 cmpl(tmp1, tmp2); 5315 } else if (UseCompressedClassPointers) { 5316 movl(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); 5317 cmpl(tmp1, Address(dst, oopDesc::klass_offset_in_bytes())); 5318 } else 5319 #endif 5320 { 5321 movptr(tmp1, Address(src, oopDesc::klass_offset_in_bytes())); 5322 cmpptr(tmp1, Address(dst, oopDesc::klass_offset_in_bytes())); 5323 } 5324 } 5325 5326 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5327 Register tmp1, Register thread_tmp) { 5328 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5329 decorators = AccessInternal::decorator_fixup(decorators, type); 5330 bool as_raw = (decorators & AS_RAW) != 0; 5331 if (as_raw) { 5332 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5333 } else { 5334 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5335 } 5336 } 5337 5338 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 5339 Register tmp1, Register tmp2, Register tmp3) { 5340 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5341 decorators = AccessInternal::decorator_fixup(decorators, type); 5342 bool as_raw = (decorators & AS_RAW) != 0; 5343 if (as_raw) { 5344 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5345 } else { 5346 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5347 } 5348 } 5349 5350 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5351 Register thread_tmp, DecoratorSet decorators) { 5352 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5353 } 5354 5355 // Doesn't do verification, generates fixed size code 5356 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5357 Register thread_tmp, DecoratorSet decorators) { 5358 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5359 } 5360 5361 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5362 Register tmp2, Register tmp3, DecoratorSet decorators) { 5363 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5364 } 5365 5366 // Used for storing nulls. 5367 void MacroAssembler::store_heap_oop_null(Address dst) { 5368 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5369 } 5370 5371 #ifdef _LP64 5372 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5373 if (UseCompressedClassPointers) { 5374 // Store to klass gap in destination 5375 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5376 } 5377 } 5378 5379 #ifdef ASSERT 5380 void MacroAssembler::verify_heapbase(const char* msg) { 5381 assert (UseCompressedOops, "should be compressed"); 5382 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5383 if (CheckCompressedOops) { 5384 Label ok; 5385 ExternalAddress src2(CompressedOops::ptrs_base_addr()); 5386 const bool is_src2_reachable = reachable(src2); 5387 if (!is_src2_reachable) { 5388 push(rscratch1); // cmpptr trashes rscratch1 5389 } 5390 cmpptr(r12_heapbase, src2, rscratch1); 5391 jcc(Assembler::equal, ok); 5392 STOP(msg); 5393 bind(ok); 5394 if (!is_src2_reachable) { 5395 pop(rscratch1); 5396 } 5397 } 5398 } 5399 #endif 5400 5401 // Algorithm must match oop.inline.hpp encode_heap_oop. 5402 void MacroAssembler::encode_heap_oop(Register r) { 5403 #ifdef ASSERT 5404 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5405 #endif 5406 verify_oop_msg(r, "broken oop in encode_heap_oop"); 5407 if (CompressedOops::base() == nullptr) { 5408 if (CompressedOops::shift() != 0) { 5409 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5410 shrq(r, LogMinObjAlignmentInBytes); 5411 } 5412 return; 5413 } 5414 testq(r, r); 5415 cmovq(Assembler::equal, r, r12_heapbase); 5416 subq(r, r12_heapbase); 5417 shrq(r, LogMinObjAlignmentInBytes); 5418 } 5419 5420 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5421 #ifdef ASSERT 5422 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5423 if (CheckCompressedOops) { 5424 Label ok; 5425 testq(r, r); 5426 jcc(Assembler::notEqual, ok); 5427 STOP("null oop passed to encode_heap_oop_not_null"); 5428 bind(ok); 5429 } 5430 #endif 5431 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5432 if (CompressedOops::base() != nullptr) { 5433 subq(r, r12_heapbase); 5434 } 5435 if (CompressedOops::shift() != 0) { 5436 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5437 shrq(r, LogMinObjAlignmentInBytes); 5438 } 5439 } 5440 5441 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5442 #ifdef ASSERT 5443 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5444 if (CheckCompressedOops) { 5445 Label ok; 5446 testq(src, src); 5447 jcc(Assembler::notEqual, ok); 5448 STOP("null oop passed to encode_heap_oop_not_null2"); 5449 bind(ok); 5450 } 5451 #endif 5452 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5453 if (dst != src) { 5454 movq(dst, src); 5455 } 5456 if (CompressedOops::base() != nullptr) { 5457 subq(dst, r12_heapbase); 5458 } 5459 if (CompressedOops::shift() != 0) { 5460 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5461 shrq(dst, LogMinObjAlignmentInBytes); 5462 } 5463 } 5464 5465 void MacroAssembler::decode_heap_oop(Register r) { 5466 #ifdef ASSERT 5467 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5468 #endif 5469 if (CompressedOops::base() == nullptr) { 5470 if (CompressedOops::shift() != 0) { 5471 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5472 shlq(r, LogMinObjAlignmentInBytes); 5473 } 5474 } else { 5475 Label done; 5476 shlq(r, LogMinObjAlignmentInBytes); 5477 jccb(Assembler::equal, done); 5478 addq(r, r12_heapbase); 5479 bind(done); 5480 } 5481 verify_oop_msg(r, "broken oop in decode_heap_oop"); 5482 } 5483 5484 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5485 // Note: it will change flags 5486 assert (UseCompressedOops, "should only be used for compressed headers"); 5487 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5488 // Cannot assert, unverified entry point counts instructions (see .ad file) 5489 // vtableStubs also counts instructions in pd_code_size_limit. 5490 // Also do not verify_oop as this is called by verify_oop. 5491 if (CompressedOops::shift() != 0) { 5492 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5493 shlq(r, LogMinObjAlignmentInBytes); 5494 if (CompressedOops::base() != nullptr) { 5495 addq(r, r12_heapbase); 5496 } 5497 } else { 5498 assert (CompressedOops::base() == nullptr, "sanity"); 5499 } 5500 } 5501 5502 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5503 // Note: it will change flags 5504 assert (UseCompressedOops, "should only be used for compressed headers"); 5505 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5506 // Cannot assert, unverified entry point counts instructions (see .ad file) 5507 // vtableStubs also counts instructions in pd_code_size_limit. 5508 // Also do not verify_oop as this is called by verify_oop. 5509 if (CompressedOops::shift() != 0) { 5510 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5511 if (LogMinObjAlignmentInBytes == Address::times_8) { 5512 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5513 } else { 5514 if (dst != src) { 5515 movq(dst, src); 5516 } 5517 shlq(dst, LogMinObjAlignmentInBytes); 5518 if (CompressedOops::base() != nullptr) { 5519 addq(dst, r12_heapbase); 5520 } 5521 } 5522 } else { 5523 assert (CompressedOops::base() == nullptr, "sanity"); 5524 if (dst != src) { 5525 movq(dst, src); 5526 } 5527 } 5528 } 5529 5530 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 5531 assert_different_registers(r, tmp); 5532 if (CompressedKlassPointers::base() != nullptr) { 5533 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5534 subq(r, tmp); 5535 } 5536 if (CompressedKlassPointers::shift() != 0) { 5537 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5538 shrq(r, LogKlassAlignmentInBytes); 5539 } 5540 } 5541 5542 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 5543 assert_different_registers(src, dst); 5544 if (CompressedKlassPointers::base() != nullptr) { 5545 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 5546 addq(dst, src); 5547 } else { 5548 movptr(dst, src); 5549 } 5550 if (CompressedKlassPointers::shift() != 0) { 5551 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5552 shrq(dst, LogKlassAlignmentInBytes); 5553 } 5554 } 5555 5556 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 5557 assert_different_registers(r, tmp); 5558 // Note: it will change flags 5559 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 5560 // Cannot assert, unverified entry point counts instructions (see .ad file) 5561 // vtableStubs also counts instructions in pd_code_size_limit. 5562 // Also do not verify_oop as this is called by verify_oop. 5563 if (CompressedKlassPointers::shift() != 0) { 5564 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5565 shlq(r, LogKlassAlignmentInBytes); 5566 } 5567 if (CompressedKlassPointers::base() != nullptr) { 5568 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 5569 addq(r, tmp); 5570 } 5571 } 5572 5573 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 5574 assert_different_registers(src, dst); 5575 // Note: it will change flags 5576 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5577 // Cannot assert, unverified entry point counts instructions (see .ad file) 5578 // vtableStubs also counts instructions in pd_code_size_limit. 5579 // Also do not verify_oop as this is called by verify_oop. 5580 5581 if (CompressedKlassPointers::base() == nullptr && 5582 CompressedKlassPointers::shift() == 0) { 5583 // The best case scenario is that there is no base or shift. Then it is already 5584 // a pointer that needs nothing but a register rename. 5585 movl(dst, src); 5586 } else { 5587 if (CompressedKlassPointers::base() != nullptr) { 5588 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5589 } else { 5590 xorq(dst, dst); 5591 } 5592 if (CompressedKlassPointers::shift() != 0) { 5593 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5594 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5595 leaq(dst, Address(dst, src, Address::times_8, 0)); 5596 } else { 5597 addq(dst, src); 5598 } 5599 } 5600 } 5601 5602 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5603 assert (UseCompressedOops, "should only be used for compressed headers"); 5604 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5605 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5606 int oop_index = oop_recorder()->find_index(obj); 5607 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5608 mov_narrow_oop(dst, oop_index, rspec); 5609 } 5610 5611 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5612 assert (UseCompressedOops, "should only be used for compressed headers"); 5613 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5614 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5615 int oop_index = oop_recorder()->find_index(obj); 5616 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5617 mov_narrow_oop(dst, oop_index, rspec); 5618 } 5619 5620 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5621 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5622 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5623 int klass_index = oop_recorder()->find_index(k); 5624 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5625 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5626 } 5627 5628 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 5629 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5630 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5631 int klass_index = oop_recorder()->find_index(k); 5632 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5633 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5634 } 5635 5636 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 5637 assert (UseCompressedOops, "should only be used for compressed headers"); 5638 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5639 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5640 int oop_index = oop_recorder()->find_index(obj); 5641 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5642 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5643 } 5644 5645 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 5646 assert (UseCompressedOops, "should only be used for compressed headers"); 5647 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5648 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5649 int oop_index = oop_recorder()->find_index(obj); 5650 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5651 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5652 } 5653 5654 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 5655 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5656 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5657 int klass_index = oop_recorder()->find_index(k); 5658 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5659 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5660 } 5661 5662 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 5663 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5664 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5665 int klass_index = oop_recorder()->find_index(k); 5666 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5667 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5668 } 5669 5670 void MacroAssembler::reinit_heapbase() { 5671 if (UseCompressedOops) { 5672 if (Universe::heap() != nullptr) { 5673 if (CompressedOops::base() == nullptr) { 5674 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 5675 } else { 5676 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 5677 } 5678 } else { 5679 movptr(r12_heapbase, ExternalAddress(CompressedOops::ptrs_base_addr())); 5680 } 5681 } 5682 } 5683 5684 #endif // _LP64 5685 5686 #if COMPILER2_OR_JVMCI 5687 5688 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 5689 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5690 // cnt - number of qwords (8-byte words). 5691 // base - start address, qword aligned. 5692 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 5693 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 5694 if (use64byteVector) { 5695 vpxor(xtmp, xtmp, xtmp, AVX_512bit); 5696 } else if (MaxVectorSize >= 32) { 5697 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 5698 } else { 5699 pxor(xtmp, xtmp); 5700 } 5701 jmp(L_zero_64_bytes); 5702 5703 BIND(L_loop); 5704 if (MaxVectorSize >= 32) { 5705 fill64(base, 0, xtmp, use64byteVector); 5706 } else { 5707 movdqu(Address(base, 0), xtmp); 5708 movdqu(Address(base, 16), xtmp); 5709 movdqu(Address(base, 32), xtmp); 5710 movdqu(Address(base, 48), xtmp); 5711 } 5712 addptr(base, 64); 5713 5714 BIND(L_zero_64_bytes); 5715 subptr(cnt, 8); 5716 jccb(Assembler::greaterEqual, L_loop); 5717 5718 // Copy trailing 64 bytes 5719 if (use64byteVector) { 5720 addptr(cnt, 8); 5721 jccb(Assembler::equal, L_end); 5722 fill64_masked(3, base, 0, xtmp, mask, cnt, rtmp, true); 5723 jmp(L_end); 5724 } else { 5725 addptr(cnt, 4); 5726 jccb(Assembler::less, L_tail); 5727 if (MaxVectorSize >= 32) { 5728 vmovdqu(Address(base, 0), xtmp); 5729 } else { 5730 movdqu(Address(base, 0), xtmp); 5731 movdqu(Address(base, 16), xtmp); 5732 } 5733 } 5734 addptr(base, 32); 5735 subptr(cnt, 4); 5736 5737 BIND(L_tail); 5738 addptr(cnt, 4); 5739 jccb(Assembler::lessEqual, L_end); 5740 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 5741 fill32_masked(3, base, 0, xtmp, mask, cnt, rtmp); 5742 } else { 5743 decrement(cnt); 5744 5745 BIND(L_sloop); 5746 movq(Address(base, 0), xtmp); 5747 addptr(base, 8); 5748 decrement(cnt); 5749 jccb(Assembler::greaterEqual, L_sloop); 5750 } 5751 BIND(L_end); 5752 } 5753 5754 // Clearing constant sized memory using YMM/ZMM registers. 5755 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 5756 assert(UseAVX > 2 && VM_Version::supports_avx512vlbw(), ""); 5757 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 5758 5759 int vector64_count = (cnt & (~0x7)) >> 3; 5760 cnt = cnt & 0x7; 5761 const int fill64_per_loop = 4; 5762 const int max_unrolled_fill64 = 8; 5763 5764 // 64 byte initialization loop. 5765 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 5766 int start64 = 0; 5767 if (vector64_count > max_unrolled_fill64) { 5768 Label LOOP; 5769 Register index = rtmp; 5770 5771 start64 = vector64_count - (vector64_count % fill64_per_loop); 5772 5773 movl(index, 0); 5774 BIND(LOOP); 5775 for (int i = 0; i < fill64_per_loop; i++) { 5776 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 5777 } 5778 addl(index, fill64_per_loop * 64); 5779 cmpl(index, start64 * 64); 5780 jccb(Assembler::less, LOOP); 5781 } 5782 for (int i = start64; i < vector64_count; i++) { 5783 fill64(base, i * 64, xtmp, use64byteVector); 5784 } 5785 5786 // Clear remaining 64 byte tail. 5787 int disp = vector64_count * 64; 5788 if (cnt) { 5789 switch (cnt) { 5790 case 1: 5791 movq(Address(base, disp), xtmp); 5792 break; 5793 case 2: 5794 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 5795 break; 5796 case 3: 5797 movl(rtmp, 0x7); 5798 kmovwl(mask, rtmp); 5799 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 5800 break; 5801 case 4: 5802 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5803 break; 5804 case 5: 5805 if (use64byteVector) { 5806 movl(rtmp, 0x1F); 5807 kmovwl(mask, rtmp); 5808 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5809 } else { 5810 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5811 movq(Address(base, disp + 32), xtmp); 5812 } 5813 break; 5814 case 6: 5815 if (use64byteVector) { 5816 movl(rtmp, 0x3F); 5817 kmovwl(mask, rtmp); 5818 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5819 } else { 5820 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5821 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 5822 } 5823 break; 5824 case 7: 5825 if (use64byteVector) { 5826 movl(rtmp, 0x7F); 5827 kmovwl(mask, rtmp); 5828 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 5829 } else { 5830 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 5831 movl(rtmp, 0x7); 5832 kmovwl(mask, rtmp); 5833 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 5834 } 5835 break; 5836 default: 5837 fatal("Unexpected length : %d\n",cnt); 5838 break; 5839 } 5840 } 5841 } 5842 5843 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, 5844 bool is_large, KRegister mask) { 5845 // cnt - number of qwords (8-byte words). 5846 // base - start address, qword aligned. 5847 // is_large - if optimizers know cnt is larger than InitArrayShortSize 5848 assert(base==rdi, "base register must be edi for rep stos"); 5849 assert(tmp==rax, "tmp register must be eax for rep stos"); 5850 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 5851 assert(InitArrayShortSize % BytesPerLong == 0, 5852 "InitArrayShortSize should be the multiple of BytesPerLong"); 5853 5854 Label DONE; 5855 if (!is_large || !UseXMMForObjInit) { 5856 xorptr(tmp, tmp); 5857 } 5858 5859 if (!is_large) { 5860 Label LOOP, LONG; 5861 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 5862 jccb(Assembler::greater, LONG); 5863 5864 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 5865 5866 decrement(cnt); 5867 jccb(Assembler::negative, DONE); // Zero length 5868 5869 // Use individual pointer-sized stores for small counts: 5870 BIND(LOOP); 5871 movptr(Address(base, cnt, Address::times_ptr), tmp); 5872 decrement(cnt); 5873 jccb(Assembler::greaterEqual, LOOP); 5874 jmpb(DONE); 5875 5876 BIND(LONG); 5877 } 5878 5879 // Use longer rep-prefixed ops for non-small counts: 5880 if (UseFastStosb) { 5881 shlptr(cnt, 3); // convert to number of bytes 5882 rep_stosb(); 5883 } else if (UseXMMForObjInit) { 5884 xmm_clear_mem(base, cnt, tmp, xtmp, mask); 5885 } else { 5886 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 5887 rep_stos(); 5888 } 5889 5890 BIND(DONE); 5891 } 5892 5893 #endif //COMPILER2_OR_JVMCI 5894 5895 5896 void MacroAssembler::generate_fill(BasicType t, bool aligned, 5897 Register to, Register value, Register count, 5898 Register rtmp, XMMRegister xtmp) { 5899 ShortBranchVerifier sbv(this); 5900 assert_different_registers(to, value, count, rtmp); 5901 Label L_exit; 5902 Label L_fill_2_bytes, L_fill_4_bytes; 5903 5904 #if defined(COMPILER2) && defined(_LP64) 5905 if(MaxVectorSize >=32 && 5906 VM_Version::supports_avx512vlbw() && 5907 VM_Version::supports_bmi2()) { 5908 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 5909 return; 5910 } 5911 #endif 5912 5913 int shift = -1; 5914 switch (t) { 5915 case T_BYTE: 5916 shift = 2; 5917 break; 5918 case T_SHORT: 5919 shift = 1; 5920 break; 5921 case T_INT: 5922 shift = 0; 5923 break; 5924 default: ShouldNotReachHere(); 5925 } 5926 5927 if (t == T_BYTE) { 5928 andl(value, 0xff); 5929 movl(rtmp, value); 5930 shll(rtmp, 8); 5931 orl(value, rtmp); 5932 } 5933 if (t == T_SHORT) { 5934 andl(value, 0xffff); 5935 } 5936 if (t == T_BYTE || t == T_SHORT) { 5937 movl(rtmp, value); 5938 shll(rtmp, 16); 5939 orl(value, rtmp); 5940 } 5941 5942 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 5943 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 5944 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 5945 Label L_skip_align2; 5946 // align source address at 4 bytes address boundary 5947 if (t == T_BYTE) { 5948 Label L_skip_align1; 5949 // One byte misalignment happens only for byte arrays 5950 testptr(to, 1); 5951 jccb(Assembler::zero, L_skip_align1); 5952 movb(Address(to, 0), value); 5953 increment(to); 5954 decrement(count); 5955 BIND(L_skip_align1); 5956 } 5957 // Two bytes misalignment happens only for byte and short (char) arrays 5958 testptr(to, 2); 5959 jccb(Assembler::zero, L_skip_align2); 5960 movw(Address(to, 0), value); 5961 addptr(to, 2); 5962 subl(count, 1<<(shift-1)); 5963 BIND(L_skip_align2); 5964 } 5965 if (UseSSE < 2) { 5966 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 5967 // Fill 32-byte chunks 5968 subl(count, 8 << shift); 5969 jcc(Assembler::less, L_check_fill_8_bytes); 5970 align(16); 5971 5972 BIND(L_fill_32_bytes_loop); 5973 5974 for (int i = 0; i < 32; i += 4) { 5975 movl(Address(to, i), value); 5976 } 5977 5978 addptr(to, 32); 5979 subl(count, 8 << shift); 5980 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 5981 BIND(L_check_fill_8_bytes); 5982 addl(count, 8 << shift); 5983 jccb(Assembler::zero, L_exit); 5984 jmpb(L_fill_8_bytes); 5985 5986 // 5987 // length is too short, just fill qwords 5988 // 5989 BIND(L_fill_8_bytes_loop); 5990 movl(Address(to, 0), value); 5991 movl(Address(to, 4), value); 5992 addptr(to, 8); 5993 BIND(L_fill_8_bytes); 5994 subl(count, 1 << (shift + 1)); 5995 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 5996 // fall through to fill 4 bytes 5997 } else { 5998 Label L_fill_32_bytes; 5999 if (!UseUnalignedLoadStores) { 6000 // align to 8 bytes, we know we are 4 byte aligned to start 6001 testptr(to, 4); 6002 jccb(Assembler::zero, L_fill_32_bytes); 6003 movl(Address(to, 0), value); 6004 addptr(to, 4); 6005 subl(count, 1<<shift); 6006 } 6007 BIND(L_fill_32_bytes); 6008 { 6009 assert( UseSSE >= 2, "supported cpu only" ); 6010 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 6011 movdl(xtmp, value); 6012 if (UseAVX >= 2 && UseUnalignedLoadStores) { 6013 Label L_check_fill_32_bytes; 6014 if (UseAVX > 2) { 6015 // Fill 64-byte chunks 6016 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 6017 6018 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 6019 cmpl(count, VM_Version::avx3_threshold()); 6020 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 6021 6022 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 6023 6024 subl(count, 16 << shift); 6025 jccb(Assembler::less, L_check_fill_32_bytes); 6026 align(16); 6027 6028 BIND(L_fill_64_bytes_loop_avx3); 6029 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 6030 addptr(to, 64); 6031 subl(count, 16 << shift); 6032 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 6033 jmpb(L_check_fill_32_bytes); 6034 6035 BIND(L_check_fill_64_bytes_avx2); 6036 } 6037 // Fill 64-byte chunks 6038 Label L_fill_64_bytes_loop; 6039 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 6040 6041 subl(count, 16 << shift); 6042 jcc(Assembler::less, L_check_fill_32_bytes); 6043 align(16); 6044 6045 BIND(L_fill_64_bytes_loop); 6046 vmovdqu(Address(to, 0), xtmp); 6047 vmovdqu(Address(to, 32), xtmp); 6048 addptr(to, 64); 6049 subl(count, 16 << shift); 6050 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 6051 6052 BIND(L_check_fill_32_bytes); 6053 addl(count, 8 << shift); 6054 jccb(Assembler::less, L_check_fill_8_bytes); 6055 vmovdqu(Address(to, 0), xtmp); 6056 addptr(to, 32); 6057 subl(count, 8 << shift); 6058 6059 BIND(L_check_fill_8_bytes); 6060 // clean upper bits of YMM registers 6061 movdl(xtmp, value); 6062 pshufd(xtmp, xtmp, 0); 6063 } else { 6064 // Fill 32-byte chunks 6065 pshufd(xtmp, xtmp, 0); 6066 6067 subl(count, 8 << shift); 6068 jcc(Assembler::less, L_check_fill_8_bytes); 6069 align(16); 6070 6071 BIND(L_fill_32_bytes_loop); 6072 6073 if (UseUnalignedLoadStores) { 6074 movdqu(Address(to, 0), xtmp); 6075 movdqu(Address(to, 16), xtmp); 6076 } else { 6077 movq(Address(to, 0), xtmp); 6078 movq(Address(to, 8), xtmp); 6079 movq(Address(to, 16), xtmp); 6080 movq(Address(to, 24), xtmp); 6081 } 6082 6083 addptr(to, 32); 6084 subl(count, 8 << shift); 6085 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 6086 6087 BIND(L_check_fill_8_bytes); 6088 } 6089 addl(count, 8 << shift); 6090 jccb(Assembler::zero, L_exit); 6091 jmpb(L_fill_8_bytes); 6092 6093 // 6094 // length is too short, just fill qwords 6095 // 6096 BIND(L_fill_8_bytes_loop); 6097 movq(Address(to, 0), xtmp); 6098 addptr(to, 8); 6099 BIND(L_fill_8_bytes); 6100 subl(count, 1 << (shift + 1)); 6101 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 6102 } 6103 } 6104 // fill trailing 4 bytes 6105 BIND(L_fill_4_bytes); 6106 testl(count, 1<<shift); 6107 jccb(Assembler::zero, L_fill_2_bytes); 6108 movl(Address(to, 0), value); 6109 if (t == T_BYTE || t == T_SHORT) { 6110 Label L_fill_byte; 6111 addptr(to, 4); 6112 BIND(L_fill_2_bytes); 6113 // fill trailing 2 bytes 6114 testl(count, 1<<(shift-1)); 6115 jccb(Assembler::zero, L_fill_byte); 6116 movw(Address(to, 0), value); 6117 if (t == T_BYTE) { 6118 addptr(to, 2); 6119 BIND(L_fill_byte); 6120 // fill trailing byte 6121 testl(count, 1); 6122 jccb(Assembler::zero, L_exit); 6123 movb(Address(to, 0), value); 6124 } else { 6125 BIND(L_fill_byte); 6126 } 6127 } else { 6128 BIND(L_fill_2_bytes); 6129 } 6130 BIND(L_exit); 6131 } 6132 6133 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 6134 switch(type) { 6135 case T_BYTE: 6136 case T_BOOLEAN: 6137 evpbroadcastb(dst, src, vector_len); 6138 break; 6139 case T_SHORT: 6140 case T_CHAR: 6141 evpbroadcastw(dst, src, vector_len); 6142 break; 6143 case T_INT: 6144 case T_FLOAT: 6145 evpbroadcastd(dst, src, vector_len); 6146 break; 6147 case T_LONG: 6148 case T_DOUBLE: 6149 evpbroadcastq(dst, src, vector_len); 6150 break; 6151 default: 6152 fatal("Unhandled type : %s", type2name(type)); 6153 break; 6154 } 6155 } 6156 6157 // encode char[] to byte[] in ISO_8859_1 or ASCII 6158 //@IntrinsicCandidate 6159 //private static int implEncodeISOArray(byte[] sa, int sp, 6160 //byte[] da, int dp, int len) { 6161 // int i = 0; 6162 // for (; i < len; i++) { 6163 // char c = StringUTF16.getChar(sa, sp++); 6164 // if (c > '\u00FF') 6165 // break; 6166 // da[dp++] = (byte)c; 6167 // } 6168 // return i; 6169 //} 6170 // 6171 //@IntrinsicCandidate 6172 //private static int implEncodeAsciiArray(char[] sa, int sp, 6173 // byte[] da, int dp, int len) { 6174 // int i = 0; 6175 // for (; i < len; i++) { 6176 // char c = sa[sp++]; 6177 // if (c >= '\u0080') 6178 // break; 6179 // da[dp++] = (byte)c; 6180 // } 6181 // return i; 6182 //} 6183 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 6184 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 6185 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 6186 Register tmp5, Register result, bool ascii) { 6187 6188 // rsi: src 6189 // rdi: dst 6190 // rdx: len 6191 // rcx: tmp5 6192 // rax: result 6193 ShortBranchVerifier sbv(this); 6194 assert_different_registers(src, dst, len, tmp5, result); 6195 Label L_done, L_copy_1_char, L_copy_1_char_exit; 6196 6197 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 6198 int short_mask = ascii ? 0xff80 : 0xff00; 6199 6200 // set result 6201 xorl(result, result); 6202 // check for zero length 6203 testl(len, len); 6204 jcc(Assembler::zero, L_done); 6205 6206 movl(result, len); 6207 6208 // Setup pointers 6209 lea(src, Address(src, len, Address::times_2)); // char[] 6210 lea(dst, Address(dst, len, Address::times_1)); // byte[] 6211 negptr(len); 6212 6213 if (UseSSE42Intrinsics || UseAVX >= 2) { 6214 Label L_copy_8_chars, L_copy_8_chars_exit; 6215 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 6216 6217 if (UseAVX >= 2) { 6218 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 6219 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6220 movdl(tmp1Reg, tmp5); 6221 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 6222 jmp(L_chars_32_check); 6223 6224 bind(L_copy_32_chars); 6225 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 6226 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 6227 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6228 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6229 jccb(Assembler::notZero, L_copy_32_chars_exit); 6230 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 6231 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 6232 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 6233 6234 bind(L_chars_32_check); 6235 addptr(len, 32); 6236 jcc(Assembler::lessEqual, L_copy_32_chars); 6237 6238 bind(L_copy_32_chars_exit); 6239 subptr(len, 16); 6240 jccb(Assembler::greater, L_copy_16_chars_exit); 6241 6242 } else if (UseSSE42Intrinsics) { 6243 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 6244 movdl(tmp1Reg, tmp5); 6245 pshufd(tmp1Reg, tmp1Reg, 0); 6246 jmpb(L_chars_16_check); 6247 } 6248 6249 bind(L_copy_16_chars); 6250 if (UseAVX >= 2) { 6251 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 6252 vptest(tmp2Reg, tmp1Reg); 6253 jcc(Assembler::notZero, L_copy_16_chars_exit); 6254 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 6255 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 6256 } else { 6257 if (UseAVX > 0) { 6258 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6259 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6260 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 6261 } else { 6262 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 6263 por(tmp2Reg, tmp3Reg); 6264 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 6265 por(tmp2Reg, tmp4Reg); 6266 } 6267 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 6268 jccb(Assembler::notZero, L_copy_16_chars_exit); 6269 packuswb(tmp3Reg, tmp4Reg); 6270 } 6271 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 6272 6273 bind(L_chars_16_check); 6274 addptr(len, 16); 6275 jcc(Assembler::lessEqual, L_copy_16_chars); 6276 6277 bind(L_copy_16_chars_exit); 6278 if (UseAVX >= 2) { 6279 // clean upper bits of YMM registers 6280 vpxor(tmp2Reg, tmp2Reg); 6281 vpxor(tmp3Reg, tmp3Reg); 6282 vpxor(tmp4Reg, tmp4Reg); 6283 movdl(tmp1Reg, tmp5); 6284 pshufd(tmp1Reg, tmp1Reg, 0); 6285 } 6286 subptr(len, 8); 6287 jccb(Assembler::greater, L_copy_8_chars_exit); 6288 6289 bind(L_copy_8_chars); 6290 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 6291 ptest(tmp3Reg, tmp1Reg); 6292 jccb(Assembler::notZero, L_copy_8_chars_exit); 6293 packuswb(tmp3Reg, tmp1Reg); 6294 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 6295 addptr(len, 8); 6296 jccb(Assembler::lessEqual, L_copy_8_chars); 6297 6298 bind(L_copy_8_chars_exit); 6299 subptr(len, 8); 6300 jccb(Assembler::zero, L_done); 6301 } 6302 6303 bind(L_copy_1_char); 6304 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 6305 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 6306 jccb(Assembler::notZero, L_copy_1_char_exit); 6307 movb(Address(dst, len, Address::times_1, 0), tmp5); 6308 addptr(len, 1); 6309 jccb(Assembler::less, L_copy_1_char); 6310 6311 bind(L_copy_1_char_exit); 6312 addptr(result, len); // len is negative count of not processed elements 6313 6314 bind(L_done); 6315 } 6316 6317 #ifdef _LP64 6318 /** 6319 * Helper for multiply_to_len(). 6320 */ 6321 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 6322 addq(dest_lo, src1); 6323 adcq(dest_hi, 0); 6324 addq(dest_lo, src2); 6325 adcq(dest_hi, 0); 6326 } 6327 6328 /** 6329 * Multiply 64 bit by 64 bit first loop. 6330 */ 6331 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 6332 Register y, Register y_idx, Register z, 6333 Register carry, Register product, 6334 Register idx, Register kdx) { 6335 // 6336 // jlong carry, x[], y[], z[]; 6337 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6338 // huge_128 product = y[idx] * x[xstart] + carry; 6339 // z[kdx] = (jlong)product; 6340 // carry = (jlong)(product >>> 64); 6341 // } 6342 // z[xstart] = carry; 6343 // 6344 6345 Label L_first_loop, L_first_loop_exit; 6346 Label L_one_x, L_one_y, L_multiply; 6347 6348 decrementl(xstart); 6349 jcc(Assembler::negative, L_one_x); 6350 6351 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6352 rorq(x_xstart, 32); // convert big-endian to little-endian 6353 6354 bind(L_first_loop); 6355 decrementl(idx); 6356 jcc(Assembler::negative, L_first_loop_exit); 6357 decrementl(idx); 6358 jcc(Assembler::negative, L_one_y); 6359 movq(y_idx, Address(y, idx, Address::times_4, 0)); 6360 rorq(y_idx, 32); // convert big-endian to little-endian 6361 bind(L_multiply); 6362 movq(product, x_xstart); 6363 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 6364 addq(product, carry); 6365 adcq(rdx, 0); 6366 subl(kdx, 2); 6367 movl(Address(z, kdx, Address::times_4, 4), product); 6368 shrq(product, 32); 6369 movl(Address(z, kdx, Address::times_4, 0), product); 6370 movq(carry, rdx); 6371 jmp(L_first_loop); 6372 6373 bind(L_one_y); 6374 movl(y_idx, Address(y, 0)); 6375 jmp(L_multiply); 6376 6377 bind(L_one_x); 6378 movl(x_xstart, Address(x, 0)); 6379 jmp(L_first_loop); 6380 6381 bind(L_first_loop_exit); 6382 } 6383 6384 /** 6385 * Multiply 64 bit by 64 bit and add 128 bit. 6386 */ 6387 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 6388 Register yz_idx, Register idx, 6389 Register carry, Register product, int offset) { 6390 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6391 // z[kdx] = (jlong)product; 6392 6393 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 6394 rorq(yz_idx, 32); // convert big-endian to little-endian 6395 movq(product, x_xstart); 6396 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6397 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 6398 rorq(yz_idx, 32); // convert big-endian to little-endian 6399 6400 add2_with_carry(rdx, product, carry, yz_idx); 6401 6402 movl(Address(z, idx, Address::times_4, offset+4), product); 6403 shrq(product, 32); 6404 movl(Address(z, idx, Address::times_4, offset), product); 6405 6406 } 6407 6408 /** 6409 * Multiply 128 bit by 128 bit. Unrolled inner loop. 6410 */ 6411 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 6412 Register yz_idx, Register idx, Register jdx, 6413 Register carry, Register product, 6414 Register carry2) { 6415 // jlong carry, x[], y[], z[]; 6416 // int kdx = ystart+1; 6417 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6418 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6419 // z[kdx+idx+1] = (jlong)product; 6420 // jlong carry2 = (jlong)(product >>> 64); 6421 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6422 // z[kdx+idx] = (jlong)product; 6423 // carry = (jlong)(product >>> 64); 6424 // } 6425 // idx += 2; 6426 // if (idx > 0) { 6427 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6428 // z[kdx+idx] = (jlong)product; 6429 // carry = (jlong)(product >>> 64); 6430 // } 6431 // 6432 6433 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6434 6435 movl(jdx, idx); 6436 andl(jdx, 0xFFFFFFFC); 6437 shrl(jdx, 2); 6438 6439 bind(L_third_loop); 6440 subl(jdx, 1); 6441 jcc(Assembler::negative, L_third_loop_exit); 6442 subl(idx, 4); 6443 6444 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6445 movq(carry2, rdx); 6446 6447 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6448 movq(carry, rdx); 6449 jmp(L_third_loop); 6450 6451 bind (L_third_loop_exit); 6452 6453 andl (idx, 0x3); 6454 jcc(Assembler::zero, L_post_third_loop_done); 6455 6456 Label L_check_1; 6457 subl(idx, 2); 6458 jcc(Assembler::negative, L_check_1); 6459 6460 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6461 movq(carry, rdx); 6462 6463 bind (L_check_1); 6464 addl (idx, 0x2); 6465 andl (idx, 0x1); 6466 subl(idx, 1); 6467 jcc(Assembler::negative, L_post_third_loop_done); 6468 6469 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 6470 movq(product, x_xstart); 6471 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 6472 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 6473 6474 add2_with_carry(rdx, product, yz_idx, carry); 6475 6476 movl(Address(z, idx, Address::times_4, 0), product); 6477 shrq(product, 32); 6478 6479 shlq(rdx, 32); 6480 orq(product, rdx); 6481 movq(carry, product); 6482 6483 bind(L_post_third_loop_done); 6484 } 6485 6486 /** 6487 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 6488 * 6489 */ 6490 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 6491 Register carry, Register carry2, 6492 Register idx, Register jdx, 6493 Register yz_idx1, Register yz_idx2, 6494 Register tmp, Register tmp3, Register tmp4) { 6495 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 6496 6497 // jlong carry, x[], y[], z[]; 6498 // int kdx = ystart+1; 6499 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6500 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 6501 // jlong carry2 = (jlong)(tmp3 >>> 64); 6502 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 6503 // carry = (jlong)(tmp4 >>> 64); 6504 // z[kdx+idx+1] = (jlong)tmp3; 6505 // z[kdx+idx] = (jlong)tmp4; 6506 // } 6507 // idx += 2; 6508 // if (idx > 0) { 6509 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 6510 // z[kdx+idx] = (jlong)yz_idx1; 6511 // carry = (jlong)(yz_idx1 >>> 64); 6512 // } 6513 // 6514 6515 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6516 6517 movl(jdx, idx); 6518 andl(jdx, 0xFFFFFFFC); 6519 shrl(jdx, 2); 6520 6521 bind(L_third_loop); 6522 subl(jdx, 1); 6523 jcc(Assembler::negative, L_third_loop_exit); 6524 subl(idx, 4); 6525 6526 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 6527 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 6528 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 6529 rorxq(yz_idx2, yz_idx2, 32); 6530 6531 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6532 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 6533 6534 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 6535 rorxq(yz_idx1, yz_idx1, 32); 6536 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6537 rorxq(yz_idx2, yz_idx2, 32); 6538 6539 if (VM_Version::supports_adx()) { 6540 adcxq(tmp3, carry); 6541 adoxq(tmp3, yz_idx1); 6542 6543 adcxq(tmp4, tmp); 6544 adoxq(tmp4, yz_idx2); 6545 6546 movl(carry, 0); // does not affect flags 6547 adcxq(carry2, carry); 6548 adoxq(carry2, carry); 6549 } else { 6550 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 6551 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 6552 } 6553 movq(carry, carry2); 6554 6555 movl(Address(z, idx, Address::times_4, 12), tmp3); 6556 shrq(tmp3, 32); 6557 movl(Address(z, idx, Address::times_4, 8), tmp3); 6558 6559 movl(Address(z, idx, Address::times_4, 4), tmp4); 6560 shrq(tmp4, 32); 6561 movl(Address(z, idx, Address::times_4, 0), tmp4); 6562 6563 jmp(L_third_loop); 6564 6565 bind (L_third_loop_exit); 6566 6567 andl (idx, 0x3); 6568 jcc(Assembler::zero, L_post_third_loop_done); 6569 6570 Label L_check_1; 6571 subl(idx, 2); 6572 jcc(Assembler::negative, L_check_1); 6573 6574 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 6575 rorxq(yz_idx1, yz_idx1, 32); 6576 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 6577 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 6578 rorxq(yz_idx2, yz_idx2, 32); 6579 6580 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 6581 6582 movl(Address(z, idx, Address::times_4, 4), tmp3); 6583 shrq(tmp3, 32); 6584 movl(Address(z, idx, Address::times_4, 0), tmp3); 6585 movq(carry, tmp4); 6586 6587 bind (L_check_1); 6588 addl (idx, 0x2); 6589 andl (idx, 0x1); 6590 subl(idx, 1); 6591 jcc(Assembler::negative, L_post_third_loop_done); 6592 movl(tmp4, Address(y, idx, Address::times_4, 0)); 6593 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 6594 movl(tmp4, Address(z, idx, Address::times_4, 0)); 6595 6596 add2_with_carry(carry2, tmp3, tmp4, carry); 6597 6598 movl(Address(z, idx, Address::times_4, 0), tmp3); 6599 shrq(tmp3, 32); 6600 6601 shlq(carry2, 32); 6602 orq(tmp3, carry2); 6603 movq(carry, tmp3); 6604 6605 bind(L_post_third_loop_done); 6606 } 6607 6608 /** 6609 * Code for BigInteger::multiplyToLen() intrinsic. 6610 * 6611 * rdi: x 6612 * rax: xlen 6613 * rsi: y 6614 * rcx: ylen 6615 * r8: z 6616 * r11: zlen 6617 * r12: tmp1 6618 * r13: tmp2 6619 * r14: tmp3 6620 * r15: tmp4 6621 * rbx: tmp5 6622 * 6623 */ 6624 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 6625 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 6626 ShortBranchVerifier sbv(this); 6627 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 6628 6629 push(tmp1); 6630 push(tmp2); 6631 push(tmp3); 6632 push(tmp4); 6633 push(tmp5); 6634 6635 push(xlen); 6636 push(zlen); 6637 6638 const Register idx = tmp1; 6639 const Register kdx = tmp2; 6640 const Register xstart = tmp3; 6641 6642 const Register y_idx = tmp4; 6643 const Register carry = tmp5; 6644 const Register product = xlen; 6645 const Register x_xstart = zlen; // reuse register 6646 6647 // First Loop. 6648 // 6649 // final static long LONG_MASK = 0xffffffffL; 6650 // int xstart = xlen - 1; 6651 // int ystart = ylen - 1; 6652 // long carry = 0; 6653 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6654 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6655 // z[kdx] = (int)product; 6656 // carry = product >>> 32; 6657 // } 6658 // z[xstart] = (int)carry; 6659 // 6660 6661 movl(idx, ylen); // idx = ylen; 6662 movl(kdx, zlen); // kdx = xlen+ylen; 6663 xorq(carry, carry); // carry = 0; 6664 6665 Label L_done; 6666 6667 movl(xstart, xlen); 6668 decrementl(xstart); 6669 jcc(Assembler::negative, L_done); 6670 6671 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6672 6673 Label L_second_loop; 6674 testl(kdx, kdx); 6675 jcc(Assembler::zero, L_second_loop); 6676 6677 Label L_carry; 6678 subl(kdx, 1); 6679 jcc(Assembler::zero, L_carry); 6680 6681 movl(Address(z, kdx, Address::times_4, 0), carry); 6682 shrq(carry, 32); 6683 subl(kdx, 1); 6684 6685 bind(L_carry); 6686 movl(Address(z, kdx, Address::times_4, 0), carry); 6687 6688 // Second and third (nested) loops. 6689 // 6690 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6691 // carry = 0; 6692 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6693 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6694 // (z[k] & LONG_MASK) + carry; 6695 // z[k] = (int)product; 6696 // carry = product >>> 32; 6697 // } 6698 // z[i] = (int)carry; 6699 // } 6700 // 6701 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6702 6703 const Register jdx = tmp1; 6704 6705 bind(L_second_loop); 6706 xorl(carry, carry); // carry = 0; 6707 movl(jdx, ylen); // j = ystart+1 6708 6709 subl(xstart, 1); // i = xstart-1; 6710 jcc(Assembler::negative, L_done); 6711 6712 push (z); 6713 6714 Label L_last_x; 6715 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 6716 subl(xstart, 1); // i = xstart-1; 6717 jcc(Assembler::negative, L_last_x); 6718 6719 if (UseBMI2Instructions) { 6720 movq(rdx, Address(x, xstart, Address::times_4, 0)); 6721 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 6722 } else { 6723 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 6724 rorq(x_xstart, 32); // convert big-endian to little-endian 6725 } 6726 6727 Label L_third_loop_prologue; 6728 bind(L_third_loop_prologue); 6729 6730 push (x); 6731 push (xstart); 6732 push (ylen); 6733 6734 6735 if (UseBMI2Instructions) { 6736 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 6737 } else { // !UseBMI2Instructions 6738 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6739 } 6740 6741 pop(ylen); 6742 pop(xlen); 6743 pop(x); 6744 pop(z); 6745 6746 movl(tmp3, xlen); 6747 addl(tmp3, 1); 6748 movl(Address(z, tmp3, Address::times_4, 0), carry); 6749 subl(tmp3, 1); 6750 jccb(Assembler::negative, L_done); 6751 6752 shrq(carry, 32); 6753 movl(Address(z, tmp3, Address::times_4, 0), carry); 6754 jmp(L_second_loop); 6755 6756 // Next infrequent code is moved outside loops. 6757 bind(L_last_x); 6758 if (UseBMI2Instructions) { 6759 movl(rdx, Address(x, 0)); 6760 } else { 6761 movl(x_xstart, Address(x, 0)); 6762 } 6763 jmp(L_third_loop_prologue); 6764 6765 bind(L_done); 6766 6767 pop(zlen); 6768 pop(xlen); 6769 6770 pop(tmp5); 6771 pop(tmp4); 6772 pop(tmp3); 6773 pop(tmp2); 6774 pop(tmp1); 6775 } 6776 6777 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 6778 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 6779 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 6780 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 6781 Label VECTOR8_TAIL, VECTOR4_TAIL; 6782 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 6783 Label SAME_TILL_END, DONE; 6784 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 6785 6786 //scale is in rcx in both Win64 and Unix 6787 ShortBranchVerifier sbv(this); 6788 6789 shlq(length); 6790 xorq(result, result); 6791 6792 if ((AVX3Threshold == 0) && (UseAVX > 2) && 6793 VM_Version::supports_avx512vlbw()) { 6794 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 6795 6796 cmpq(length, 64); 6797 jcc(Assembler::less, VECTOR32_TAIL); 6798 6799 movq(tmp1, length); 6800 andq(tmp1, 0x3F); // tail count 6801 andq(length, ~(0x3F)); //vector count 6802 6803 bind(VECTOR64_LOOP); 6804 // AVX512 code to compare 64 byte vectors. 6805 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 6806 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 6807 kortestql(k7, k7); 6808 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 6809 addq(result, 64); 6810 subq(length, 64); 6811 jccb(Assembler::notZero, VECTOR64_LOOP); 6812 6813 //bind(VECTOR64_TAIL); 6814 testq(tmp1, tmp1); 6815 jcc(Assembler::zero, SAME_TILL_END); 6816 6817 //bind(VECTOR64_TAIL); 6818 // AVX512 code to compare up to 63 byte vectors. 6819 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 6820 shlxq(tmp2, tmp2, tmp1); 6821 notq(tmp2); 6822 kmovql(k3, tmp2); 6823 6824 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 6825 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 6826 6827 ktestql(k7, k3); 6828 jcc(Assembler::below, SAME_TILL_END); // not mismatch 6829 6830 bind(VECTOR64_NOT_EQUAL); 6831 kmovql(tmp1, k7); 6832 notq(tmp1); 6833 tzcntq(tmp1, tmp1); 6834 addq(result, tmp1); 6835 shrq(result); 6836 jmp(DONE); 6837 bind(VECTOR32_TAIL); 6838 } 6839 6840 cmpq(length, 8); 6841 jcc(Assembler::equal, VECTOR8_LOOP); 6842 jcc(Assembler::less, VECTOR4_TAIL); 6843 6844 if (UseAVX >= 2) { 6845 Label VECTOR16_TAIL, VECTOR32_LOOP; 6846 6847 cmpq(length, 16); 6848 jcc(Assembler::equal, VECTOR16_LOOP); 6849 jcc(Assembler::less, VECTOR8_LOOP); 6850 6851 cmpq(length, 32); 6852 jccb(Assembler::less, VECTOR16_TAIL); 6853 6854 subq(length, 32); 6855 bind(VECTOR32_LOOP); 6856 vmovdqu(rymm0, Address(obja, result)); 6857 vmovdqu(rymm1, Address(objb, result)); 6858 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 6859 vptest(rymm2, rymm2); 6860 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 6861 addq(result, 32); 6862 subq(length, 32); 6863 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 6864 addq(length, 32); 6865 jcc(Assembler::equal, SAME_TILL_END); 6866 //falling through if less than 32 bytes left //close the branch here. 6867 6868 bind(VECTOR16_TAIL); 6869 cmpq(length, 16); 6870 jccb(Assembler::less, VECTOR8_TAIL); 6871 bind(VECTOR16_LOOP); 6872 movdqu(rymm0, Address(obja, result)); 6873 movdqu(rymm1, Address(objb, result)); 6874 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 6875 ptest(rymm2, rymm2); 6876 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6877 addq(result, 16); 6878 subq(length, 16); 6879 jcc(Assembler::equal, SAME_TILL_END); 6880 //falling through if less than 16 bytes left 6881 } else {//regular intrinsics 6882 6883 cmpq(length, 16); 6884 jccb(Assembler::less, VECTOR8_TAIL); 6885 6886 subq(length, 16); 6887 bind(VECTOR16_LOOP); 6888 movdqu(rymm0, Address(obja, result)); 6889 movdqu(rymm1, Address(objb, result)); 6890 pxor(rymm0, rymm1); 6891 ptest(rymm0, rymm0); 6892 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 6893 addq(result, 16); 6894 subq(length, 16); 6895 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 6896 addq(length, 16); 6897 jcc(Assembler::equal, SAME_TILL_END); 6898 //falling through if less than 16 bytes left 6899 } 6900 6901 bind(VECTOR8_TAIL); 6902 cmpq(length, 8); 6903 jccb(Assembler::less, VECTOR4_TAIL); 6904 bind(VECTOR8_LOOP); 6905 movq(tmp1, Address(obja, result)); 6906 movq(tmp2, Address(objb, result)); 6907 xorq(tmp1, tmp2); 6908 testq(tmp1, tmp1); 6909 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 6910 addq(result, 8); 6911 subq(length, 8); 6912 jcc(Assembler::equal, SAME_TILL_END); 6913 //falling through if less than 8 bytes left 6914 6915 bind(VECTOR4_TAIL); 6916 cmpq(length, 4); 6917 jccb(Assembler::less, BYTES_TAIL); 6918 bind(VECTOR4_LOOP); 6919 movl(tmp1, Address(obja, result)); 6920 xorl(tmp1, Address(objb, result)); 6921 testl(tmp1, tmp1); 6922 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 6923 addq(result, 4); 6924 subq(length, 4); 6925 jcc(Assembler::equal, SAME_TILL_END); 6926 //falling through if less than 4 bytes left 6927 6928 bind(BYTES_TAIL); 6929 bind(BYTES_LOOP); 6930 load_unsigned_byte(tmp1, Address(obja, result)); 6931 load_unsigned_byte(tmp2, Address(objb, result)); 6932 xorl(tmp1, tmp2); 6933 testl(tmp1, tmp1); 6934 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6935 decq(length); 6936 jcc(Assembler::zero, SAME_TILL_END); 6937 incq(result); 6938 load_unsigned_byte(tmp1, Address(obja, result)); 6939 load_unsigned_byte(tmp2, Address(objb, result)); 6940 xorl(tmp1, tmp2); 6941 testl(tmp1, tmp1); 6942 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6943 decq(length); 6944 jcc(Assembler::zero, SAME_TILL_END); 6945 incq(result); 6946 load_unsigned_byte(tmp1, Address(obja, result)); 6947 load_unsigned_byte(tmp2, Address(objb, result)); 6948 xorl(tmp1, tmp2); 6949 testl(tmp1, tmp1); 6950 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 6951 jmp(SAME_TILL_END); 6952 6953 if (UseAVX >= 2) { 6954 bind(VECTOR32_NOT_EQUAL); 6955 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 6956 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 6957 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 6958 vpmovmskb(tmp1, rymm0); 6959 bsfq(tmp1, tmp1); 6960 addq(result, tmp1); 6961 shrq(result); 6962 jmp(DONE); 6963 } 6964 6965 bind(VECTOR16_NOT_EQUAL); 6966 if (UseAVX >= 2) { 6967 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 6968 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 6969 pxor(rymm0, rymm2); 6970 } else { 6971 pcmpeqb(rymm2, rymm2); 6972 pxor(rymm0, rymm1); 6973 pcmpeqb(rymm0, rymm1); 6974 pxor(rymm0, rymm2); 6975 } 6976 pmovmskb(tmp1, rymm0); 6977 bsfq(tmp1, tmp1); 6978 addq(result, tmp1); 6979 shrq(result); 6980 jmpb(DONE); 6981 6982 bind(VECTOR8_NOT_EQUAL); 6983 bind(VECTOR4_NOT_EQUAL); 6984 bsfq(tmp1, tmp1); 6985 shrq(tmp1, 3); 6986 addq(result, tmp1); 6987 bind(BYTES_NOT_EQUAL); 6988 shrq(result); 6989 jmpb(DONE); 6990 6991 bind(SAME_TILL_END); 6992 mov64(result, -1); 6993 6994 bind(DONE); 6995 } 6996 6997 //Helper functions for square_to_len() 6998 6999 /** 7000 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 7001 * Preserves x and z and modifies rest of the registers. 7002 */ 7003 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7004 // Perform square and right shift by 1 7005 // Handle odd xlen case first, then for even xlen do the following 7006 // jlong carry = 0; 7007 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 7008 // huge_128 product = x[j:j+1] * x[j:j+1]; 7009 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 7010 // z[i+2:i+3] = (jlong)(product >>> 1); 7011 // carry = (jlong)product; 7012 // } 7013 7014 xorq(tmp5, tmp5); // carry 7015 xorq(rdxReg, rdxReg); 7016 xorl(tmp1, tmp1); // index for x 7017 xorl(tmp4, tmp4); // index for z 7018 7019 Label L_first_loop, L_first_loop_exit; 7020 7021 testl(xlen, 1); 7022 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 7023 7024 // Square and right shift by 1 the odd element using 32 bit multiply 7025 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 7026 imulq(raxReg, raxReg); 7027 shrq(raxReg, 1); 7028 adcq(tmp5, 0); 7029 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 7030 incrementl(tmp1); 7031 addl(tmp4, 2); 7032 7033 // Square and right shift by 1 the rest using 64 bit multiply 7034 bind(L_first_loop); 7035 cmpptr(tmp1, xlen); 7036 jccb(Assembler::equal, L_first_loop_exit); 7037 7038 // Square 7039 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 7040 rorq(raxReg, 32); // convert big-endian to little-endian 7041 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 7042 7043 // Right shift by 1 and save carry 7044 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 7045 rcrq(rdxReg, 1); 7046 rcrq(raxReg, 1); 7047 adcq(tmp5, 0); 7048 7049 // Store result in z 7050 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 7051 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 7052 7053 // Update indices for x and z 7054 addl(tmp1, 2); 7055 addl(tmp4, 4); 7056 jmp(L_first_loop); 7057 7058 bind(L_first_loop_exit); 7059 } 7060 7061 7062 /** 7063 * Perform the following multiply add operation using BMI2 instructions 7064 * carry:sum = sum + op1*op2 + carry 7065 * op2 should be in rdx 7066 * op2 is preserved, all other registers are modified 7067 */ 7068 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 7069 // assert op2 is rdx 7070 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 7071 addq(sum, carry); 7072 adcq(tmp2, 0); 7073 addq(sum, op1); 7074 adcq(tmp2, 0); 7075 movq(carry, tmp2); 7076 } 7077 7078 /** 7079 * Perform the following multiply add operation: 7080 * carry:sum = sum + op1*op2 + carry 7081 * Preserves op1, op2 and modifies rest of registers 7082 */ 7083 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 7084 // rdx:rax = op1 * op2 7085 movq(raxReg, op2); 7086 mulq(op1); 7087 7088 // rdx:rax = sum + carry + rdx:rax 7089 addq(sum, carry); 7090 adcq(rdxReg, 0); 7091 addq(sum, raxReg); 7092 adcq(rdxReg, 0); 7093 7094 // carry:sum = rdx:sum 7095 movq(carry, rdxReg); 7096 } 7097 7098 /** 7099 * Add 64 bit long carry into z[] with carry propagation. 7100 * Preserves z and carry register values and modifies rest of registers. 7101 * 7102 */ 7103 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 7104 Label L_fourth_loop, L_fourth_loop_exit; 7105 7106 movl(tmp1, 1); 7107 subl(zlen, 2); 7108 addq(Address(z, zlen, Address::times_4, 0), carry); 7109 7110 bind(L_fourth_loop); 7111 jccb(Assembler::carryClear, L_fourth_loop_exit); 7112 subl(zlen, 2); 7113 jccb(Assembler::negative, L_fourth_loop_exit); 7114 addq(Address(z, zlen, Address::times_4, 0), tmp1); 7115 jmp(L_fourth_loop); 7116 bind(L_fourth_loop_exit); 7117 } 7118 7119 /** 7120 * Shift z[] left by 1 bit. 7121 * Preserves x, len, z and zlen registers and modifies rest of the registers. 7122 * 7123 */ 7124 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 7125 7126 Label L_fifth_loop, L_fifth_loop_exit; 7127 7128 // Fifth loop 7129 // Perform primitiveLeftShift(z, zlen, 1) 7130 7131 const Register prev_carry = tmp1; 7132 const Register new_carry = tmp4; 7133 const Register value = tmp2; 7134 const Register zidx = tmp3; 7135 7136 // int zidx, carry; 7137 // long value; 7138 // carry = 0; 7139 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 7140 // (carry:value) = (z[i] << 1) | carry ; 7141 // z[i] = value; 7142 // } 7143 7144 movl(zidx, zlen); 7145 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 7146 7147 bind(L_fifth_loop); 7148 decl(zidx); // Use decl to preserve carry flag 7149 decl(zidx); 7150 jccb(Assembler::negative, L_fifth_loop_exit); 7151 7152 if (UseBMI2Instructions) { 7153 movq(value, Address(z, zidx, Address::times_4, 0)); 7154 rclq(value, 1); 7155 rorxq(value, value, 32); 7156 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7157 } 7158 else { 7159 // clear new_carry 7160 xorl(new_carry, new_carry); 7161 7162 // Shift z[i] by 1, or in previous carry and save new carry 7163 movq(value, Address(z, zidx, Address::times_4, 0)); 7164 shlq(value, 1); 7165 adcl(new_carry, 0); 7166 7167 orq(value, prev_carry); 7168 rorq(value, 0x20); 7169 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 7170 7171 // Set previous carry = new carry 7172 movl(prev_carry, new_carry); 7173 } 7174 jmp(L_fifth_loop); 7175 7176 bind(L_fifth_loop_exit); 7177 } 7178 7179 7180 /** 7181 * Code for BigInteger::squareToLen() intrinsic 7182 * 7183 * rdi: x 7184 * rsi: len 7185 * r8: z 7186 * rcx: zlen 7187 * r12: tmp1 7188 * r13: tmp2 7189 * r14: tmp3 7190 * r15: tmp4 7191 * rbx: tmp5 7192 * 7193 */ 7194 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7195 7196 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 7197 push(tmp1); 7198 push(tmp2); 7199 push(tmp3); 7200 push(tmp4); 7201 push(tmp5); 7202 7203 // First loop 7204 // Store the squares, right shifted one bit (i.e., divided by 2). 7205 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 7206 7207 // Add in off-diagonal sums. 7208 // 7209 // Second, third (nested) and fourth loops. 7210 // zlen +=2; 7211 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 7212 // carry = 0; 7213 // long op2 = x[xidx:xidx+1]; 7214 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 7215 // k -= 2; 7216 // long op1 = x[j:j+1]; 7217 // long sum = z[k:k+1]; 7218 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 7219 // z[k:k+1] = sum; 7220 // } 7221 // add_one_64(z, k, carry, tmp_regs); 7222 // } 7223 7224 const Register carry = tmp5; 7225 const Register sum = tmp3; 7226 const Register op1 = tmp4; 7227 Register op2 = tmp2; 7228 7229 push(zlen); 7230 push(len); 7231 addl(zlen,2); 7232 bind(L_second_loop); 7233 xorq(carry, carry); 7234 subl(zlen, 4); 7235 subl(len, 2); 7236 push(zlen); 7237 push(len); 7238 cmpl(len, 0); 7239 jccb(Assembler::lessEqual, L_second_loop_exit); 7240 7241 // Multiply an array by one 64 bit long. 7242 if (UseBMI2Instructions) { 7243 op2 = rdxReg; 7244 movq(op2, Address(x, len, Address::times_4, 0)); 7245 rorxq(op2, op2, 32); 7246 } 7247 else { 7248 movq(op2, Address(x, len, Address::times_4, 0)); 7249 rorq(op2, 32); 7250 } 7251 7252 bind(L_third_loop); 7253 decrementl(len); 7254 jccb(Assembler::negative, L_third_loop_exit); 7255 decrementl(len); 7256 jccb(Assembler::negative, L_last_x); 7257 7258 movq(op1, Address(x, len, Address::times_4, 0)); 7259 rorq(op1, 32); 7260 7261 bind(L_multiply); 7262 subl(zlen, 2); 7263 movq(sum, Address(z, zlen, Address::times_4, 0)); 7264 7265 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 7266 if (UseBMI2Instructions) { 7267 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 7268 } 7269 else { 7270 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7271 } 7272 7273 movq(Address(z, zlen, Address::times_4, 0), sum); 7274 7275 jmp(L_third_loop); 7276 bind(L_third_loop_exit); 7277 7278 // Fourth loop 7279 // Add 64 bit long carry into z with carry propagation. 7280 // Uses offsetted zlen. 7281 add_one_64(z, zlen, carry, tmp1); 7282 7283 pop(len); 7284 pop(zlen); 7285 jmp(L_second_loop); 7286 7287 // Next infrequent code is moved outside loops. 7288 bind(L_last_x); 7289 movl(op1, Address(x, 0)); 7290 jmp(L_multiply); 7291 7292 bind(L_second_loop_exit); 7293 pop(len); 7294 pop(zlen); 7295 pop(len); 7296 pop(zlen); 7297 7298 // Fifth loop 7299 // Shift z left 1 bit. 7300 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 7301 7302 // z[zlen-1] |= x[len-1] & 1; 7303 movl(tmp3, Address(x, len, Address::times_4, -4)); 7304 andl(tmp3, 1); 7305 orl(Address(z, zlen, Address::times_4, -4), tmp3); 7306 7307 pop(tmp5); 7308 pop(tmp4); 7309 pop(tmp3); 7310 pop(tmp2); 7311 pop(tmp1); 7312 } 7313 7314 /** 7315 * Helper function for mul_add() 7316 * Multiply the in[] by int k and add to out[] starting at offset offs using 7317 * 128 bit by 32 bit multiply and return the carry in tmp5. 7318 * Only quad int aligned length of in[] is operated on in this function. 7319 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 7320 * This function preserves out, in and k registers. 7321 * len and offset point to the appropriate index in "in" & "out" correspondingly 7322 * tmp5 has the carry. 7323 * other registers are temporary and are modified. 7324 * 7325 */ 7326 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 7327 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 7328 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7329 7330 Label L_first_loop, L_first_loop_exit; 7331 7332 movl(tmp1, len); 7333 shrl(tmp1, 2); 7334 7335 bind(L_first_loop); 7336 subl(tmp1, 1); 7337 jccb(Assembler::negative, L_first_loop_exit); 7338 7339 subl(len, 4); 7340 subl(offset, 4); 7341 7342 Register op2 = tmp2; 7343 const Register sum = tmp3; 7344 const Register op1 = tmp4; 7345 const Register carry = tmp5; 7346 7347 if (UseBMI2Instructions) { 7348 op2 = rdxReg; 7349 } 7350 7351 movq(op1, Address(in, len, Address::times_4, 8)); 7352 rorq(op1, 32); 7353 movq(sum, Address(out, offset, Address::times_4, 8)); 7354 rorq(sum, 32); 7355 if (UseBMI2Instructions) { 7356 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7357 } 7358 else { 7359 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7360 } 7361 // Store back in big endian from little endian 7362 rorq(sum, 0x20); 7363 movq(Address(out, offset, Address::times_4, 8), sum); 7364 7365 movq(op1, Address(in, len, Address::times_4, 0)); 7366 rorq(op1, 32); 7367 movq(sum, Address(out, offset, Address::times_4, 0)); 7368 rorq(sum, 32); 7369 if (UseBMI2Instructions) { 7370 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7371 } 7372 else { 7373 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7374 } 7375 // Store back in big endian from little endian 7376 rorq(sum, 0x20); 7377 movq(Address(out, offset, Address::times_4, 0), sum); 7378 7379 jmp(L_first_loop); 7380 bind(L_first_loop_exit); 7381 } 7382 7383 /** 7384 * Code for BigInteger::mulAdd() intrinsic 7385 * 7386 * rdi: out 7387 * rsi: in 7388 * r11: offs (out.length - offset) 7389 * rcx: len 7390 * r8: k 7391 * r12: tmp1 7392 * r13: tmp2 7393 * r14: tmp3 7394 * r15: tmp4 7395 * rbx: tmp5 7396 * Multiply the in[] by word k and add to out[], return the carry in rax 7397 */ 7398 void MacroAssembler::mul_add(Register out, Register in, Register offs, 7399 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 7400 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 7401 7402 Label L_carry, L_last_in, L_done; 7403 7404 // carry = 0; 7405 // for (int j=len-1; j >= 0; j--) { 7406 // long product = (in[j] & LONG_MASK) * kLong + 7407 // (out[offs] & LONG_MASK) + carry; 7408 // out[offs--] = (int)product; 7409 // carry = product >>> 32; 7410 // } 7411 // 7412 push(tmp1); 7413 push(tmp2); 7414 push(tmp3); 7415 push(tmp4); 7416 push(tmp5); 7417 7418 Register op2 = tmp2; 7419 const Register sum = tmp3; 7420 const Register op1 = tmp4; 7421 const Register carry = tmp5; 7422 7423 if (UseBMI2Instructions) { 7424 op2 = rdxReg; 7425 movl(op2, k); 7426 } 7427 else { 7428 movl(op2, k); 7429 } 7430 7431 xorq(carry, carry); 7432 7433 //First loop 7434 7435 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 7436 //The carry is in tmp5 7437 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 7438 7439 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 7440 decrementl(len); 7441 jccb(Assembler::negative, L_carry); 7442 decrementl(len); 7443 jccb(Assembler::negative, L_last_in); 7444 7445 movq(op1, Address(in, len, Address::times_4, 0)); 7446 rorq(op1, 32); 7447 7448 subl(offs, 2); 7449 movq(sum, Address(out, offs, Address::times_4, 0)); 7450 rorq(sum, 32); 7451 7452 if (UseBMI2Instructions) { 7453 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 7454 } 7455 else { 7456 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 7457 } 7458 7459 // Store back in big endian from little endian 7460 rorq(sum, 0x20); 7461 movq(Address(out, offs, Address::times_4, 0), sum); 7462 7463 testl(len, len); 7464 jccb(Assembler::zero, L_carry); 7465 7466 //Multiply the last in[] entry, if any 7467 bind(L_last_in); 7468 movl(op1, Address(in, 0)); 7469 movl(sum, Address(out, offs, Address::times_4, -4)); 7470 7471 movl(raxReg, k); 7472 mull(op1); //tmp4 * eax -> edx:eax 7473 addl(sum, carry); 7474 adcl(rdxReg, 0); 7475 addl(sum, raxReg); 7476 adcl(rdxReg, 0); 7477 movl(carry, rdxReg); 7478 7479 movl(Address(out, offs, Address::times_4, -4), sum); 7480 7481 bind(L_carry); 7482 //return tmp5/carry as carry in rax 7483 movl(rax, carry); 7484 7485 bind(L_done); 7486 pop(tmp5); 7487 pop(tmp4); 7488 pop(tmp3); 7489 pop(tmp2); 7490 pop(tmp1); 7491 } 7492 #endif 7493 7494 /** 7495 * Emits code to update CRC-32 with a byte value according to constants in table 7496 * 7497 * @param [in,out]crc Register containing the crc. 7498 * @param [in]val Register containing the byte to fold into the CRC. 7499 * @param [in]table Register containing the table of crc constants. 7500 * 7501 * uint32_t crc; 7502 * val = crc_table[(val ^ crc) & 0xFF]; 7503 * crc = val ^ (crc >> 8); 7504 * 7505 */ 7506 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 7507 xorl(val, crc); 7508 andl(val, 0xFF); 7509 shrl(crc, 8); // unsigned shift 7510 xorl(crc, Address(table, val, Address::times_4, 0)); 7511 } 7512 7513 /** 7514 * Fold 128-bit data chunk 7515 */ 7516 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 7517 if (UseAVX > 0) { 7518 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 7519 vpclmulldq(xcrc, xK, xcrc); // [63:0] 7520 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 7521 pxor(xcrc, xtmp); 7522 } else { 7523 movdqa(xtmp, xcrc); 7524 pclmulhdq(xtmp, xK); // [123:64] 7525 pclmulldq(xcrc, xK); // [63:0] 7526 pxor(xcrc, xtmp); 7527 movdqu(xtmp, Address(buf, offset)); 7528 pxor(xcrc, xtmp); 7529 } 7530 } 7531 7532 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 7533 if (UseAVX > 0) { 7534 vpclmulhdq(xtmp, xK, xcrc); 7535 vpclmulldq(xcrc, xK, xcrc); 7536 pxor(xcrc, xbuf); 7537 pxor(xcrc, xtmp); 7538 } else { 7539 movdqa(xtmp, xcrc); 7540 pclmulhdq(xtmp, xK); 7541 pclmulldq(xcrc, xK); 7542 pxor(xcrc, xbuf); 7543 pxor(xcrc, xtmp); 7544 } 7545 } 7546 7547 /** 7548 * 8-bit folds to compute 32-bit CRC 7549 * 7550 * uint64_t xcrc; 7551 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 7552 */ 7553 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 7554 movdl(tmp, xcrc); 7555 andl(tmp, 0xFF); 7556 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 7557 psrldq(xcrc, 1); // unsigned shift one byte 7558 pxor(xcrc, xtmp); 7559 } 7560 7561 /** 7562 * uint32_t crc; 7563 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 7564 */ 7565 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 7566 movl(tmp, crc); 7567 andl(tmp, 0xFF); 7568 shrl(crc, 8); 7569 xorl(crc, Address(table, tmp, Address::times_4, 0)); 7570 } 7571 7572 /** 7573 * @param crc register containing existing CRC (32-bit) 7574 * @param buf register pointing to input byte buffer (byte*) 7575 * @param len register containing number of bytes 7576 * @param table register that will contain address of CRC table 7577 * @param tmp scratch register 7578 */ 7579 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 7580 assert_different_registers(crc, buf, len, table, tmp, rax); 7581 7582 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7583 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7584 7585 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7586 // context for the registers used, where all instructions below are using 128-bit mode 7587 // On EVEX without VL and BW, these instructions will all be AVX. 7588 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 7589 notl(crc); // ~crc 7590 cmpl(len, 16); 7591 jcc(Assembler::less, L_tail); 7592 7593 // Align buffer to 16 bytes 7594 movl(tmp, buf); 7595 andl(tmp, 0xF); 7596 jccb(Assembler::zero, L_aligned); 7597 subl(tmp, 16); 7598 addl(len, tmp); 7599 7600 align(4); 7601 BIND(L_align_loop); 7602 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7603 update_byte_crc32(crc, rax, table); 7604 increment(buf); 7605 incrementl(tmp); 7606 jccb(Assembler::less, L_align_loop); 7607 7608 BIND(L_aligned); 7609 movl(tmp, len); // save 7610 shrl(len, 4); 7611 jcc(Assembler::zero, L_tail_restore); 7612 7613 // Fold crc into first bytes of vector 7614 movdqa(xmm1, Address(buf, 0)); 7615 movdl(rax, xmm1); 7616 xorl(crc, rax); 7617 if (VM_Version::supports_sse4_1()) { 7618 pinsrd(xmm1, crc, 0); 7619 } else { 7620 pinsrw(xmm1, crc, 0); 7621 shrl(crc, 16); 7622 pinsrw(xmm1, crc, 1); 7623 } 7624 addptr(buf, 16); 7625 subl(len, 4); // len > 0 7626 jcc(Assembler::less, L_fold_tail); 7627 7628 movdqa(xmm2, Address(buf, 0)); 7629 movdqa(xmm3, Address(buf, 16)); 7630 movdqa(xmm4, Address(buf, 32)); 7631 addptr(buf, 48); 7632 subl(len, 3); 7633 jcc(Assembler::lessEqual, L_fold_512b); 7634 7635 // Fold total 512 bits of polynomial on each iteration, 7636 // 128 bits per each of 4 parallel streams. 7637 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 7638 7639 align32(); 7640 BIND(L_fold_512b_loop); 7641 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7642 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 7643 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 7644 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 7645 addptr(buf, 64); 7646 subl(len, 4); 7647 jcc(Assembler::greater, L_fold_512b_loop); 7648 7649 // Fold 512 bits to 128 bits. 7650 BIND(L_fold_512b); 7651 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7652 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 7653 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 7654 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 7655 7656 // Fold the rest of 128 bits data chunks 7657 BIND(L_fold_tail); 7658 addl(len, 3); 7659 jccb(Assembler::lessEqual, L_fold_128b); 7660 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 7661 7662 BIND(L_fold_tail_loop); 7663 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 7664 addptr(buf, 16); 7665 decrementl(len); 7666 jccb(Assembler::greater, L_fold_tail_loop); 7667 7668 // Fold 128 bits in xmm1 down into 32 bits in crc register. 7669 BIND(L_fold_128b); 7670 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 7671 if (UseAVX > 0) { 7672 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 7673 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 7674 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 7675 } else { 7676 movdqa(xmm2, xmm0); 7677 pclmulqdq(xmm2, xmm1, 0x1); 7678 movdqa(xmm3, xmm0); 7679 pand(xmm3, xmm2); 7680 pclmulqdq(xmm0, xmm3, 0x1); 7681 } 7682 psrldq(xmm1, 8); 7683 psrldq(xmm2, 4); 7684 pxor(xmm0, xmm1); 7685 pxor(xmm0, xmm2); 7686 7687 // 8 8-bit folds to compute 32-bit CRC. 7688 for (int j = 0; j < 4; j++) { 7689 fold_8bit_crc32(xmm0, table, xmm1, rax); 7690 } 7691 movdl(crc, xmm0); // mov 32 bits to general register 7692 for (int j = 0; j < 4; j++) { 7693 fold_8bit_crc32(crc, table, rax); 7694 } 7695 7696 BIND(L_tail_restore); 7697 movl(len, tmp); // restore 7698 BIND(L_tail); 7699 andl(len, 0xf); 7700 jccb(Assembler::zero, L_exit); 7701 7702 // Fold the rest of bytes 7703 align(4); 7704 BIND(L_tail_loop); 7705 movsbl(rax, Address(buf, 0)); // load byte with sign extension 7706 update_byte_crc32(crc, rax, table); 7707 increment(buf); 7708 decrementl(len); 7709 jccb(Assembler::greater, L_tail_loop); 7710 7711 BIND(L_exit); 7712 notl(crc); // ~c 7713 } 7714 7715 #ifdef _LP64 7716 // Helper function for AVX 512 CRC32 7717 // Fold 512-bit data chunks 7718 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 7719 Register pos, int offset) { 7720 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 7721 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 7722 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 7723 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 7724 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 7725 } 7726 7727 // Helper function for AVX 512 CRC32 7728 // Compute CRC32 for < 256B buffers 7729 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 7730 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 7731 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 7732 7733 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 7734 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 7735 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 7736 7737 // check if there is enough buffer to be able to fold 16B at a time 7738 cmpl(len, 32); 7739 jcc(Assembler::less, L_less_than_32); 7740 7741 // if there is, load the constants 7742 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 7743 movdl(xmm0, crc); // get the initial crc value 7744 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7745 pxor(xmm7, xmm0); 7746 7747 // update the buffer pointer 7748 addl(pos, 16); 7749 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 7750 subl(len, 32); 7751 jmp(L_16B_reduction_loop); 7752 7753 bind(L_less_than_32); 7754 //mov initial crc to the return value. this is necessary for zero - length buffers. 7755 movl(rax, crc); 7756 testl(len, len); 7757 jcc(Assembler::equal, L_cleanup); 7758 7759 movdl(xmm0, crc); //get the initial crc value 7760 7761 cmpl(len, 16); 7762 jcc(Assembler::equal, L_exact_16_left); 7763 jcc(Assembler::less, L_less_than_16_left); 7764 7765 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 7766 pxor(xmm7, xmm0); //xor the initial crc value 7767 addl(pos, 16); 7768 subl(len, 16); 7769 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 7770 jmp(L_get_last_two_xmms); 7771 7772 bind(L_less_than_16_left); 7773 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 7774 pxor(xmm1, xmm1); 7775 movptr(tmp1, rsp); 7776 movdqu(Address(tmp1, 0 * 16), xmm1); 7777 7778 cmpl(len, 4); 7779 jcc(Assembler::less, L_only_less_than_4); 7780 7781 //backup the counter value 7782 movl(tmp2, len); 7783 cmpl(len, 8); 7784 jcc(Assembler::less, L_less_than_8_left); 7785 7786 //load 8 Bytes 7787 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 7788 movq(Address(tmp1, 0 * 16), rax); 7789 addptr(tmp1, 8); 7790 subl(len, 8); 7791 addl(pos, 8); 7792 7793 bind(L_less_than_8_left); 7794 cmpl(len, 4); 7795 jcc(Assembler::less, L_less_than_4_left); 7796 7797 //load 4 Bytes 7798 movl(rax, Address(buf, pos, Address::times_1, 0)); 7799 movl(Address(tmp1, 0 * 16), rax); 7800 addptr(tmp1, 4); 7801 subl(len, 4); 7802 addl(pos, 4); 7803 7804 bind(L_less_than_4_left); 7805 cmpl(len, 2); 7806 jcc(Assembler::less, L_less_than_2_left); 7807 7808 // load 2 Bytes 7809 movw(rax, Address(buf, pos, Address::times_1, 0)); 7810 movl(Address(tmp1, 0 * 16), rax); 7811 addptr(tmp1, 2); 7812 subl(len, 2); 7813 addl(pos, 2); 7814 7815 bind(L_less_than_2_left); 7816 cmpl(len, 1); 7817 jcc(Assembler::less, L_zero_left); 7818 7819 // load 1 Byte 7820 movb(rax, Address(buf, pos, Address::times_1, 0)); 7821 movb(Address(tmp1, 0 * 16), rax); 7822 7823 bind(L_zero_left); 7824 movdqu(xmm7, Address(rsp, 0)); 7825 pxor(xmm7, xmm0); //xor the initial crc value 7826 7827 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 7828 movdqu(xmm0, Address(rax, tmp2)); 7829 pshufb(xmm7, xmm0); 7830 jmp(L_128_done); 7831 7832 bind(L_exact_16_left); 7833 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 7834 pxor(xmm7, xmm0); //xor the initial crc value 7835 jmp(L_128_done); 7836 7837 bind(L_only_less_than_4); 7838 cmpl(len, 3); 7839 jcc(Assembler::less, L_only_less_than_3); 7840 7841 // load 3 Bytes 7842 movb(rax, Address(buf, pos, Address::times_1, 0)); 7843 movb(Address(tmp1, 0), rax); 7844 7845 movb(rax, Address(buf, pos, Address::times_1, 1)); 7846 movb(Address(tmp1, 1), rax); 7847 7848 movb(rax, Address(buf, pos, Address::times_1, 2)); 7849 movb(Address(tmp1, 2), rax); 7850 7851 movdqu(xmm7, Address(rsp, 0)); 7852 pxor(xmm7, xmm0); //xor the initial crc value 7853 7854 pslldq(xmm7, 0x5); 7855 jmp(L_barrett); 7856 bind(L_only_less_than_3); 7857 cmpl(len, 2); 7858 jcc(Assembler::less, L_only_less_than_2); 7859 7860 // load 2 Bytes 7861 movb(rax, Address(buf, pos, Address::times_1, 0)); 7862 movb(Address(tmp1, 0), rax); 7863 7864 movb(rax, Address(buf, pos, Address::times_1, 1)); 7865 movb(Address(tmp1, 1), rax); 7866 7867 movdqu(xmm7, Address(rsp, 0)); 7868 pxor(xmm7, xmm0); //xor the initial crc value 7869 7870 pslldq(xmm7, 0x6); 7871 jmp(L_barrett); 7872 7873 bind(L_only_less_than_2); 7874 //load 1 Byte 7875 movb(rax, Address(buf, pos, Address::times_1, 0)); 7876 movb(Address(tmp1, 0), rax); 7877 7878 movdqu(xmm7, Address(rsp, 0)); 7879 pxor(xmm7, xmm0); //xor the initial crc value 7880 7881 pslldq(xmm7, 0x7); 7882 } 7883 7884 /** 7885 * Compute CRC32 using AVX512 instructions 7886 * param crc register containing existing CRC (32-bit) 7887 * param buf register pointing to input byte buffer (byte*) 7888 * param len register containing number of bytes 7889 * param table address of crc or crc32c table 7890 * param tmp1 scratch register 7891 * param tmp2 scratch register 7892 * return rax result register 7893 * 7894 * This routine is identical for crc32c with the exception of the precomputed constant 7895 * table which will be passed as the table argument. The calculation steps are 7896 * the same for both variants. 7897 */ 7898 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 7899 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 7900 7901 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 7902 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 7903 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 7904 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 7905 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 7906 7907 const Register pos = r12; 7908 push(r12); 7909 subptr(rsp, 16 * 2 + 8); 7910 7911 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 7912 // context for the registers used, where all instructions below are using 128-bit mode 7913 // On EVEX without VL and BW, these instructions will all be AVX. 7914 movl(pos, 0); 7915 7916 // check if smaller than 256B 7917 cmpl(len, 256); 7918 jcc(Assembler::less, L_less_than_256); 7919 7920 // load the initial crc value 7921 movdl(xmm10, crc); 7922 7923 // receive the initial 64B data, xor the initial crc value 7924 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 7925 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 7926 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 7927 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 7928 7929 subl(len, 256); 7930 cmpl(len, 256); 7931 jcc(Assembler::less, L_fold_128_B_loop); 7932 7933 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 7934 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 7935 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 7936 subl(len, 256); 7937 7938 bind(L_fold_256_B_loop); 7939 addl(pos, 256); 7940 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 7941 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 7942 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 7943 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 7944 7945 subl(len, 256); 7946 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 7947 7948 // Fold 256 into 128 7949 addl(pos, 256); 7950 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 7951 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 7952 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 7953 7954 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 7955 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 7956 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 7957 7958 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 7959 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 7960 7961 addl(len, 128); 7962 jmp(L_fold_128_B_register); 7963 7964 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 7965 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 7966 7967 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 7968 bind(L_fold_128_B_loop); 7969 addl(pos, 128); 7970 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 7971 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 7972 7973 subl(len, 128); 7974 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 7975 7976 addl(pos, 128); 7977 7978 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 7979 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 7980 bind(L_fold_128_B_register); 7981 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 7982 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 7983 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 7984 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 7985 // save last that has no multiplicand 7986 vextracti64x2(xmm7, xmm4, 3); 7987 7988 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 7989 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 7990 // Needed later in reduction loop 7991 movdqu(xmm10, Address(table, 1 * 16)); 7992 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 7993 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 7994 7995 // Swap 1,0,3,2 - 01 00 11 10 7996 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 7997 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 7998 vextracti128(xmm5, xmm8, 1); 7999 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 8000 8001 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 8002 // instead of a cmp instruction, we use the negative flag with the jl instruction 8003 addl(len, 128 - 16); 8004 jcc(Assembler::less, L_final_reduction_for_128); 8005 8006 bind(L_16B_reduction_loop); 8007 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8008 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8009 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8010 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 8011 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8012 addl(pos, 16); 8013 subl(len, 16); 8014 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 8015 8016 bind(L_final_reduction_for_128); 8017 addl(len, 16); 8018 jcc(Assembler::equal, L_128_done); 8019 8020 bind(L_get_last_two_xmms); 8021 movdqu(xmm2, xmm7); 8022 addl(pos, len); 8023 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 8024 subl(pos, len); 8025 8026 // get rid of the extra data that was loaded before 8027 // load the shift constant 8028 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 8029 movdqu(xmm0, Address(rax, len)); 8030 addl(rax, len); 8031 8032 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8033 //Change mask to 512 8034 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 8035 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 8036 8037 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 8038 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 8039 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8040 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 8041 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 8042 8043 bind(L_128_done); 8044 // compute crc of a 128-bit value 8045 movdqu(xmm10, Address(table, 3 * 16)); 8046 movdqu(xmm0, xmm7); 8047 8048 // 64b fold 8049 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 8050 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 8051 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8052 8053 // 32b fold 8054 movdqu(xmm0, xmm7); 8055 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 8056 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 8057 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 8058 jmp(L_barrett); 8059 8060 bind(L_less_than_256); 8061 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 8062 8063 //barrett reduction 8064 bind(L_barrett); 8065 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 8066 movdqu(xmm1, xmm7); 8067 movdqu(xmm2, xmm7); 8068 movdqu(xmm10, Address(table, 4 * 16)); 8069 8070 pclmulqdq(xmm7, xmm10, 0x0); 8071 pxor(xmm7, xmm2); 8072 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 8073 movdqu(xmm2, xmm7); 8074 pclmulqdq(xmm7, xmm10, 0x10); 8075 pxor(xmm7, xmm2); 8076 pxor(xmm7, xmm1); 8077 pextrd(crc, xmm7, 2); 8078 8079 bind(L_cleanup); 8080 addptr(rsp, 16 * 2 + 8); 8081 pop(r12); 8082 } 8083 8084 // S. Gueron / Information Processing Letters 112 (2012) 184 8085 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 8086 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 8087 // Output: the 64-bit carry-less product of B * CONST 8088 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 8089 Register tmp1, Register tmp2, Register tmp3) { 8090 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8091 if (n > 0) { 8092 addq(tmp3, n * 256 * 8); 8093 } 8094 // Q1 = TABLEExt[n][B & 0xFF]; 8095 movl(tmp1, in); 8096 andl(tmp1, 0x000000FF); 8097 shll(tmp1, 3); 8098 addq(tmp1, tmp3); 8099 movq(tmp1, Address(tmp1, 0)); 8100 8101 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8102 movl(tmp2, in); 8103 shrl(tmp2, 8); 8104 andl(tmp2, 0x000000FF); 8105 shll(tmp2, 3); 8106 addq(tmp2, tmp3); 8107 movq(tmp2, Address(tmp2, 0)); 8108 8109 shlq(tmp2, 8); 8110 xorq(tmp1, tmp2); 8111 8112 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8113 movl(tmp2, in); 8114 shrl(tmp2, 16); 8115 andl(tmp2, 0x000000FF); 8116 shll(tmp2, 3); 8117 addq(tmp2, tmp3); 8118 movq(tmp2, Address(tmp2, 0)); 8119 8120 shlq(tmp2, 16); 8121 xorq(tmp1, tmp2); 8122 8123 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8124 shrl(in, 24); 8125 andl(in, 0x000000FF); 8126 shll(in, 3); 8127 addq(in, tmp3); 8128 movq(in, Address(in, 0)); 8129 8130 shlq(in, 24); 8131 xorq(in, tmp1); 8132 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8133 } 8134 8135 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8136 Register in_out, 8137 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8138 XMMRegister w_xtmp2, 8139 Register tmp1, 8140 Register n_tmp2, Register n_tmp3) { 8141 if (is_pclmulqdq_supported) { 8142 movdl(w_xtmp1, in_out); // modified blindly 8143 8144 movl(tmp1, const_or_pre_comp_const_index); 8145 movdl(w_xtmp2, tmp1); 8146 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8147 8148 movdq(in_out, w_xtmp1); 8149 } else { 8150 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 8151 } 8152 } 8153 8154 // Recombination Alternative 2: No bit-reflections 8155 // T1 = (CRC_A * U1) << 1 8156 // T2 = (CRC_B * U2) << 1 8157 // C1 = T1 >> 32 8158 // C2 = T2 >> 32 8159 // T1 = T1 & 0xFFFFFFFF 8160 // T2 = T2 & 0xFFFFFFFF 8161 // T1 = CRC32(0, T1) 8162 // T2 = CRC32(0, T2) 8163 // C1 = C1 ^ T1 8164 // C2 = C2 ^ T2 8165 // CRC = C1 ^ C2 ^ CRC_C 8166 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8167 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8168 Register tmp1, Register tmp2, 8169 Register n_tmp3) { 8170 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8171 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8172 shlq(in_out, 1); 8173 movl(tmp1, in_out); 8174 shrq(in_out, 32); 8175 xorl(tmp2, tmp2); 8176 crc32(tmp2, tmp1, 4); 8177 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 8178 shlq(in1, 1); 8179 movl(tmp1, in1); 8180 shrq(in1, 32); 8181 xorl(tmp2, tmp2); 8182 crc32(tmp2, tmp1, 4); 8183 xorl(in1, tmp2); 8184 xorl(in_out, in1); 8185 xorl(in_out, in2); 8186 } 8187 8188 // Set N to predefined value 8189 // Subtract from a length of a buffer 8190 // execute in a loop: 8191 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 8192 // for i = 1 to N do 8193 // CRC_A = CRC32(CRC_A, A[i]) 8194 // CRC_B = CRC32(CRC_B, B[i]) 8195 // CRC_C = CRC32(CRC_C, C[i]) 8196 // end for 8197 // Recombine 8198 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8199 Register in_out1, Register in_out2, Register in_out3, 8200 Register tmp1, Register tmp2, Register tmp3, 8201 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8202 Register tmp4, Register tmp5, 8203 Register n_tmp6) { 8204 Label L_processPartitions; 8205 Label L_processPartition; 8206 Label L_exit; 8207 8208 bind(L_processPartitions); 8209 cmpl(in_out1, 3 * size); 8210 jcc(Assembler::less, L_exit); 8211 xorl(tmp1, tmp1); 8212 xorl(tmp2, tmp2); 8213 movq(tmp3, in_out2); 8214 addq(tmp3, size); 8215 8216 bind(L_processPartition); 8217 crc32(in_out3, Address(in_out2, 0), 8); 8218 crc32(tmp1, Address(in_out2, size), 8); 8219 crc32(tmp2, Address(in_out2, size * 2), 8); 8220 addq(in_out2, 8); 8221 cmpq(in_out2, tmp3); 8222 jcc(Assembler::less, L_processPartition); 8223 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8224 w_xtmp1, w_xtmp2, w_xtmp3, 8225 tmp4, tmp5, 8226 n_tmp6); 8227 addq(in_out2, 2 * size); 8228 subl(in_out1, 3 * size); 8229 jmp(L_processPartitions); 8230 8231 bind(L_exit); 8232 } 8233 #else 8234 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 8235 Register tmp1, Register tmp2, Register tmp3, 8236 XMMRegister xtmp1, XMMRegister xtmp2) { 8237 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 8238 if (n > 0) { 8239 addl(tmp3, n * 256 * 8); 8240 } 8241 // Q1 = TABLEExt[n][B & 0xFF]; 8242 movl(tmp1, in_out); 8243 andl(tmp1, 0x000000FF); 8244 shll(tmp1, 3); 8245 addl(tmp1, tmp3); 8246 movq(xtmp1, Address(tmp1, 0)); 8247 8248 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 8249 movl(tmp2, in_out); 8250 shrl(tmp2, 8); 8251 andl(tmp2, 0x000000FF); 8252 shll(tmp2, 3); 8253 addl(tmp2, tmp3); 8254 movq(xtmp2, Address(tmp2, 0)); 8255 8256 psllq(xtmp2, 8); 8257 pxor(xtmp1, xtmp2); 8258 8259 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 8260 movl(tmp2, in_out); 8261 shrl(tmp2, 16); 8262 andl(tmp2, 0x000000FF); 8263 shll(tmp2, 3); 8264 addl(tmp2, tmp3); 8265 movq(xtmp2, Address(tmp2, 0)); 8266 8267 psllq(xtmp2, 16); 8268 pxor(xtmp1, xtmp2); 8269 8270 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 8271 shrl(in_out, 24); 8272 andl(in_out, 0x000000FF); 8273 shll(in_out, 3); 8274 addl(in_out, tmp3); 8275 movq(xtmp2, Address(in_out, 0)); 8276 8277 psllq(xtmp2, 24); 8278 pxor(xtmp1, xtmp2); // Result in CXMM 8279 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 8280 } 8281 8282 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 8283 Register in_out, 8284 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 8285 XMMRegister w_xtmp2, 8286 Register tmp1, 8287 Register n_tmp2, Register n_tmp3) { 8288 if (is_pclmulqdq_supported) { 8289 movdl(w_xtmp1, in_out); 8290 8291 movl(tmp1, const_or_pre_comp_const_index); 8292 movdl(w_xtmp2, tmp1); 8293 pclmulqdq(w_xtmp1, w_xtmp2, 0); 8294 // Keep result in XMM since GPR is 32 bit in length 8295 } else { 8296 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 8297 } 8298 } 8299 8300 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 8301 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8302 Register tmp1, Register tmp2, 8303 Register n_tmp3) { 8304 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8305 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 8306 8307 psllq(w_xtmp1, 1); 8308 movdl(tmp1, w_xtmp1); 8309 psrlq(w_xtmp1, 32); 8310 movdl(in_out, w_xtmp1); 8311 8312 xorl(tmp2, tmp2); 8313 crc32(tmp2, tmp1, 4); 8314 xorl(in_out, tmp2); 8315 8316 psllq(w_xtmp2, 1); 8317 movdl(tmp1, w_xtmp2); 8318 psrlq(w_xtmp2, 32); 8319 movdl(in1, w_xtmp2); 8320 8321 xorl(tmp2, tmp2); 8322 crc32(tmp2, tmp1, 4); 8323 xorl(in1, tmp2); 8324 xorl(in_out, in1); 8325 xorl(in_out, in2); 8326 } 8327 8328 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 8329 Register in_out1, Register in_out2, Register in_out3, 8330 Register tmp1, Register tmp2, Register tmp3, 8331 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8332 Register tmp4, Register tmp5, 8333 Register n_tmp6) { 8334 Label L_processPartitions; 8335 Label L_processPartition; 8336 Label L_exit; 8337 8338 bind(L_processPartitions); 8339 cmpl(in_out1, 3 * size); 8340 jcc(Assembler::less, L_exit); 8341 xorl(tmp1, tmp1); 8342 xorl(tmp2, tmp2); 8343 movl(tmp3, in_out2); 8344 addl(tmp3, size); 8345 8346 bind(L_processPartition); 8347 crc32(in_out3, Address(in_out2, 0), 4); 8348 crc32(tmp1, Address(in_out2, size), 4); 8349 crc32(tmp2, Address(in_out2, size*2), 4); 8350 crc32(in_out3, Address(in_out2, 0+4), 4); 8351 crc32(tmp1, Address(in_out2, size+4), 4); 8352 crc32(tmp2, Address(in_out2, size*2+4), 4); 8353 addl(in_out2, 8); 8354 cmpl(in_out2, tmp3); 8355 jcc(Assembler::less, L_processPartition); 8356 8357 push(tmp3); 8358 push(in_out1); 8359 push(in_out2); 8360 tmp4 = tmp3; 8361 tmp5 = in_out1; 8362 n_tmp6 = in_out2; 8363 8364 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 8365 w_xtmp1, w_xtmp2, w_xtmp3, 8366 tmp4, tmp5, 8367 n_tmp6); 8368 8369 pop(in_out2); 8370 pop(in_out1); 8371 pop(tmp3); 8372 8373 addl(in_out2, 2 * size); 8374 subl(in_out1, 3 * size); 8375 jmp(L_processPartitions); 8376 8377 bind(L_exit); 8378 } 8379 #endif //LP64 8380 8381 #ifdef _LP64 8382 // Algorithm 2: Pipelined usage of the CRC32 instruction. 8383 // Input: A buffer I of L bytes. 8384 // Output: the CRC32C value of the buffer. 8385 // Notations: 8386 // Write L = 24N + r, with N = floor (L/24). 8387 // r = L mod 24 (0 <= r < 24). 8388 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 8389 // N quadwords, and R consists of r bytes. 8390 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 8391 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 8392 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 8393 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 8394 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8395 Register tmp1, Register tmp2, Register tmp3, 8396 Register tmp4, Register tmp5, Register tmp6, 8397 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8398 bool is_pclmulqdq_supported) { 8399 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8400 Label L_wordByWord; 8401 Label L_byteByByteProlog; 8402 Label L_byteByByte; 8403 Label L_exit; 8404 8405 if (is_pclmulqdq_supported ) { 8406 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8407 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 8408 8409 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8410 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8411 8412 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8413 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8414 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 8415 } else { 8416 const_or_pre_comp_const_index[0] = 1; 8417 const_or_pre_comp_const_index[1] = 0; 8418 8419 const_or_pre_comp_const_index[2] = 3; 8420 const_or_pre_comp_const_index[3] = 2; 8421 8422 const_or_pre_comp_const_index[4] = 5; 8423 const_or_pre_comp_const_index[5] = 4; 8424 } 8425 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8426 in2, in1, in_out, 8427 tmp1, tmp2, tmp3, 8428 w_xtmp1, w_xtmp2, w_xtmp3, 8429 tmp4, tmp5, 8430 tmp6); 8431 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8432 in2, in1, in_out, 8433 tmp1, tmp2, tmp3, 8434 w_xtmp1, w_xtmp2, w_xtmp3, 8435 tmp4, tmp5, 8436 tmp6); 8437 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8438 in2, in1, in_out, 8439 tmp1, tmp2, tmp3, 8440 w_xtmp1, w_xtmp2, w_xtmp3, 8441 tmp4, tmp5, 8442 tmp6); 8443 movl(tmp1, in2); 8444 andl(tmp1, 0x00000007); 8445 negl(tmp1); 8446 addl(tmp1, in2); 8447 addq(tmp1, in1); 8448 8449 cmpq(in1, tmp1); 8450 jccb(Assembler::greaterEqual, L_byteByByteProlog); 8451 align(16); 8452 BIND(L_wordByWord); 8453 crc32(in_out, Address(in1, 0), 8); 8454 addq(in1, 8); 8455 cmpq(in1, tmp1); 8456 jcc(Assembler::less, L_wordByWord); 8457 8458 BIND(L_byteByByteProlog); 8459 andl(in2, 0x00000007); 8460 movl(tmp2, 1); 8461 8462 cmpl(tmp2, in2); 8463 jccb(Assembler::greater, L_exit); 8464 BIND(L_byteByByte); 8465 crc32(in_out, Address(in1, 0), 1); 8466 incq(in1); 8467 incl(tmp2); 8468 cmpl(tmp2, in2); 8469 jcc(Assembler::lessEqual, L_byteByByte); 8470 8471 BIND(L_exit); 8472 } 8473 #else 8474 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 8475 Register tmp1, Register tmp2, Register tmp3, 8476 Register tmp4, Register tmp5, Register tmp6, 8477 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 8478 bool is_pclmulqdq_supported) { 8479 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 8480 Label L_wordByWord; 8481 Label L_byteByByteProlog; 8482 Label L_byteByByte; 8483 Label L_exit; 8484 8485 if (is_pclmulqdq_supported) { 8486 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 8487 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 8488 8489 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 8490 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 8491 8492 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 8493 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 8494 } else { 8495 const_or_pre_comp_const_index[0] = 1; 8496 const_or_pre_comp_const_index[1] = 0; 8497 8498 const_or_pre_comp_const_index[2] = 3; 8499 const_or_pre_comp_const_index[3] = 2; 8500 8501 const_or_pre_comp_const_index[4] = 5; 8502 const_or_pre_comp_const_index[5] = 4; 8503 } 8504 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 8505 in2, in1, in_out, 8506 tmp1, tmp2, tmp3, 8507 w_xtmp1, w_xtmp2, w_xtmp3, 8508 tmp4, tmp5, 8509 tmp6); 8510 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 8511 in2, in1, in_out, 8512 tmp1, tmp2, tmp3, 8513 w_xtmp1, w_xtmp2, w_xtmp3, 8514 tmp4, tmp5, 8515 tmp6); 8516 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 8517 in2, in1, in_out, 8518 tmp1, tmp2, tmp3, 8519 w_xtmp1, w_xtmp2, w_xtmp3, 8520 tmp4, tmp5, 8521 tmp6); 8522 movl(tmp1, in2); 8523 andl(tmp1, 0x00000007); 8524 negl(tmp1); 8525 addl(tmp1, in2); 8526 addl(tmp1, in1); 8527 8528 BIND(L_wordByWord); 8529 cmpl(in1, tmp1); 8530 jcc(Assembler::greaterEqual, L_byteByByteProlog); 8531 crc32(in_out, Address(in1,0), 4); 8532 addl(in1, 4); 8533 jmp(L_wordByWord); 8534 8535 BIND(L_byteByByteProlog); 8536 andl(in2, 0x00000007); 8537 movl(tmp2, 1); 8538 8539 BIND(L_byteByByte); 8540 cmpl(tmp2, in2); 8541 jccb(Assembler::greater, L_exit); 8542 movb(tmp1, Address(in1, 0)); 8543 crc32(in_out, tmp1, 1); 8544 incl(in1); 8545 incl(tmp2); 8546 jmp(L_byteByByte); 8547 8548 BIND(L_exit); 8549 } 8550 #endif // LP64 8551 #undef BIND 8552 #undef BLOCK_COMMENT 8553 8554 // Compress char[] array to byte[]. 8555 // ..\jdk\src\java.base\share\classes\java\lang\StringUTF16.java 8556 // @IntrinsicCandidate 8557 // private static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 8558 // for (int i = 0; i < len; i++) { 8559 // int c = src[srcOff++]; 8560 // if (c >>> 8 != 0) { 8561 // return 0; 8562 // } 8563 // dst[dstOff++] = (byte)c; 8564 // } 8565 // return len; 8566 // } 8567 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 8568 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8569 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8570 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 8571 Label copy_chars_loop, return_length, return_zero, done; 8572 8573 // rsi: src 8574 // rdi: dst 8575 // rdx: len 8576 // rcx: tmp5 8577 // rax: result 8578 8579 // rsi holds start addr of source char[] to be compressed 8580 // rdi holds start addr of destination byte[] 8581 // rdx holds length 8582 8583 assert(len != result, ""); 8584 8585 // save length for return 8586 push(len); 8587 8588 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 8589 VM_Version::supports_avx512vlbw() && 8590 VM_Version::supports_bmi2()) { 8591 8592 Label copy_32_loop, copy_loop_tail, below_threshold; 8593 8594 // alignment 8595 Label post_alignment; 8596 8597 // if length of the string is less than 16, handle it in an old fashioned way 8598 testl(len, -32); 8599 jcc(Assembler::zero, below_threshold); 8600 8601 // First check whether a character is compressible ( <= 0xFF). 8602 // Create mask to test for Unicode chars inside zmm vector 8603 movl(result, 0x00FF); 8604 evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit); 8605 8606 testl(len, -64); 8607 jcc(Assembler::zero, post_alignment); 8608 8609 movl(tmp5, dst); 8610 andl(tmp5, (32 - 1)); 8611 negl(tmp5); 8612 andl(tmp5, (32 - 1)); 8613 8614 // bail out when there is nothing to be done 8615 testl(tmp5, 0xFFFFFFFF); 8616 jcc(Assembler::zero, post_alignment); 8617 8618 // ~(~0 << len), where len is the # of remaining elements to process 8619 movl(result, 0xFFFFFFFF); 8620 shlxl(result, result, tmp5); 8621 notl(result); 8622 kmovdl(mask2, result); 8623 8624 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8625 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8626 ktestd(mask1, mask2); 8627 jcc(Assembler::carryClear, return_zero); 8628 8629 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8630 8631 addptr(src, tmp5); 8632 addptr(src, tmp5); 8633 addptr(dst, tmp5); 8634 subl(len, tmp5); 8635 8636 bind(post_alignment); 8637 // end of alignment 8638 8639 movl(tmp5, len); 8640 andl(tmp5, (32 - 1)); // tail count (in chars) 8641 andl(len, ~(32 - 1)); // vector count (in chars) 8642 jcc(Assembler::zero, copy_loop_tail); 8643 8644 lea(src, Address(src, len, Address::times_2)); 8645 lea(dst, Address(dst, len, Address::times_1)); 8646 negptr(len); 8647 8648 bind(copy_32_loop); 8649 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 8650 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 8651 kortestdl(mask1, mask1); 8652 jcc(Assembler::carryClear, return_zero); 8653 8654 // All elements in current processed chunk are valid candidates for 8655 // compression. Write a truncated byte elements to the memory. 8656 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 8657 addptr(len, 32); 8658 jcc(Assembler::notZero, copy_32_loop); 8659 8660 bind(copy_loop_tail); 8661 // bail out when there is nothing to be done 8662 testl(tmp5, 0xFFFFFFFF); 8663 jcc(Assembler::zero, return_length); 8664 8665 movl(len, tmp5); 8666 8667 // ~(~0 << len), where len is the # of remaining elements to process 8668 movl(result, 0xFFFFFFFF); 8669 shlxl(result, result, len); 8670 notl(result); 8671 8672 kmovdl(mask2, result); 8673 8674 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 8675 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 8676 ktestd(mask1, mask2); 8677 jcc(Assembler::carryClear, return_zero); 8678 8679 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 8680 jmp(return_length); 8681 8682 bind(below_threshold); 8683 } 8684 8685 if (UseSSE42Intrinsics) { 8686 Label copy_32_loop, copy_16, copy_tail; 8687 8688 movl(result, len); 8689 8690 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 8691 8692 // vectored compression 8693 andl(len, 0xfffffff0); // vector count (in chars) 8694 andl(result, 0x0000000f); // tail count (in chars) 8695 testl(len, len); 8696 jcc(Assembler::zero, copy_16); 8697 8698 // compress 16 chars per iter 8699 movdl(tmp1Reg, tmp5); 8700 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 8701 pxor(tmp4Reg, tmp4Reg); 8702 8703 lea(src, Address(src, len, Address::times_2)); 8704 lea(dst, Address(dst, len, Address::times_1)); 8705 negptr(len); 8706 8707 bind(copy_32_loop); 8708 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 8709 por(tmp4Reg, tmp2Reg); 8710 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 8711 por(tmp4Reg, tmp3Reg); 8712 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 8713 jcc(Assembler::notZero, return_zero); 8714 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 8715 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 8716 addptr(len, 16); 8717 jcc(Assembler::notZero, copy_32_loop); 8718 8719 // compress next vector of 8 chars (if any) 8720 bind(copy_16); 8721 movl(len, result); 8722 andl(len, 0xfffffff8); // vector count (in chars) 8723 andl(result, 0x00000007); // tail count (in chars) 8724 testl(len, len); 8725 jccb(Assembler::zero, copy_tail); 8726 8727 movdl(tmp1Reg, tmp5); 8728 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 8729 pxor(tmp3Reg, tmp3Reg); 8730 8731 movdqu(tmp2Reg, Address(src, 0)); 8732 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8733 jccb(Assembler::notZero, return_zero); 8734 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 8735 movq(Address(dst, 0), tmp2Reg); 8736 addptr(src, 16); 8737 addptr(dst, 8); 8738 8739 bind(copy_tail); 8740 movl(len, result); 8741 } 8742 // compress 1 char per iter 8743 testl(len, len); 8744 jccb(Assembler::zero, return_length); 8745 lea(src, Address(src, len, Address::times_2)); 8746 lea(dst, Address(dst, len, Address::times_1)); 8747 negptr(len); 8748 8749 bind(copy_chars_loop); 8750 load_unsigned_short(result, Address(src, len, Address::times_2)); 8751 testl(result, 0xff00); // check if Unicode char 8752 jccb(Assembler::notZero, return_zero); 8753 movb(Address(dst, len, Address::times_1), result); // ASCII char; compress to 1 byte 8754 increment(len); 8755 jcc(Assembler::notZero, copy_chars_loop); 8756 8757 // if compression succeeded, return length 8758 bind(return_length); 8759 pop(result); 8760 jmpb(done); 8761 8762 // if compression failed, return 0 8763 bind(return_zero); 8764 xorl(result, result); 8765 addptr(rsp, wordSize); 8766 8767 bind(done); 8768 } 8769 8770 // Inflate byte[] array to char[]. 8771 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 8772 // @IntrinsicCandidate 8773 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 8774 // for (int i = 0; i < len; i++) { 8775 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 8776 // } 8777 // } 8778 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 8779 XMMRegister tmp1, Register tmp2, KRegister mask) { 8780 Label copy_chars_loop, done, below_threshold, avx3_threshold; 8781 // rsi: src 8782 // rdi: dst 8783 // rdx: len 8784 // rcx: tmp2 8785 8786 // rsi holds start addr of source byte[] to be inflated 8787 // rdi holds start addr of destination char[] 8788 // rdx holds length 8789 assert_different_registers(src, dst, len, tmp2); 8790 movl(tmp2, len); 8791 if ((UseAVX > 2) && // AVX512 8792 VM_Version::supports_avx512vlbw() && 8793 VM_Version::supports_bmi2()) { 8794 8795 Label copy_32_loop, copy_tail; 8796 Register tmp3_aliased = len; 8797 8798 // if length of the string is less than 16, handle it in an old fashioned way 8799 testl(len, -16); 8800 jcc(Assembler::zero, below_threshold); 8801 8802 testl(len, -1 * AVX3Threshold); 8803 jcc(Assembler::zero, avx3_threshold); 8804 8805 // In order to use only one arithmetic operation for the main loop we use 8806 // this pre-calculation 8807 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 8808 andl(len, -32); // vector count 8809 jccb(Assembler::zero, copy_tail); 8810 8811 lea(src, Address(src, len, Address::times_1)); 8812 lea(dst, Address(dst, len, Address::times_2)); 8813 negptr(len); 8814 8815 8816 // inflate 32 chars per iter 8817 bind(copy_32_loop); 8818 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 8819 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 8820 addptr(len, 32); 8821 jcc(Assembler::notZero, copy_32_loop); 8822 8823 bind(copy_tail); 8824 // bail out when there is nothing to be done 8825 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 8826 jcc(Assembler::zero, done); 8827 8828 // ~(~0 << length), where length is the # of remaining elements to process 8829 movl(tmp3_aliased, -1); 8830 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 8831 notl(tmp3_aliased); 8832 kmovdl(mask, tmp3_aliased); 8833 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 8834 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 8835 8836 jmp(done); 8837 bind(avx3_threshold); 8838 } 8839 if (UseSSE42Intrinsics) { 8840 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 8841 8842 if (UseAVX > 1) { 8843 andl(tmp2, (16 - 1)); 8844 andl(len, -16); 8845 jccb(Assembler::zero, copy_new_tail); 8846 } else { 8847 andl(tmp2, 0x00000007); // tail count (in chars) 8848 andl(len, 0xfffffff8); // vector count (in chars) 8849 jccb(Assembler::zero, copy_tail); 8850 } 8851 8852 // vectored inflation 8853 lea(src, Address(src, len, Address::times_1)); 8854 lea(dst, Address(dst, len, Address::times_2)); 8855 negptr(len); 8856 8857 if (UseAVX > 1) { 8858 bind(copy_16_loop); 8859 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 8860 vmovdqu(Address(dst, len, Address::times_2), tmp1); 8861 addptr(len, 16); 8862 jcc(Assembler::notZero, copy_16_loop); 8863 8864 bind(below_threshold); 8865 bind(copy_new_tail); 8866 movl(len, tmp2); 8867 andl(tmp2, 0x00000007); 8868 andl(len, 0xFFFFFFF8); 8869 jccb(Assembler::zero, copy_tail); 8870 8871 pmovzxbw(tmp1, Address(src, 0)); 8872 movdqu(Address(dst, 0), tmp1); 8873 addptr(src, 8); 8874 addptr(dst, 2 * 8); 8875 8876 jmp(copy_tail, true); 8877 } 8878 8879 // inflate 8 chars per iter 8880 bind(copy_8_loop); 8881 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 8882 movdqu(Address(dst, len, Address::times_2), tmp1); 8883 addptr(len, 8); 8884 jcc(Assembler::notZero, copy_8_loop); 8885 8886 bind(copy_tail); 8887 movl(len, tmp2); 8888 8889 cmpl(len, 4); 8890 jccb(Assembler::less, copy_bytes); 8891 8892 movdl(tmp1, Address(src, 0)); // load 4 byte chars 8893 pmovzxbw(tmp1, tmp1); 8894 movq(Address(dst, 0), tmp1); 8895 subptr(len, 4); 8896 addptr(src, 4); 8897 addptr(dst, 8); 8898 8899 bind(copy_bytes); 8900 } else { 8901 bind(below_threshold); 8902 } 8903 8904 testl(len, len); 8905 jccb(Assembler::zero, done); 8906 lea(src, Address(src, len, Address::times_1)); 8907 lea(dst, Address(dst, len, Address::times_2)); 8908 negptr(len); 8909 8910 // inflate 1 char per iter 8911 bind(copy_chars_loop); 8912 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 8913 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 8914 increment(len); 8915 jcc(Assembler::notZero, copy_chars_loop); 8916 8917 bind(done); 8918 } 8919 8920 8921 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 8922 switch(type) { 8923 case T_BYTE: 8924 case T_BOOLEAN: 8925 evmovdqub(dst, kmask, src, merge, vector_len); 8926 break; 8927 case T_CHAR: 8928 case T_SHORT: 8929 evmovdquw(dst, kmask, src, merge, vector_len); 8930 break; 8931 case T_INT: 8932 case T_FLOAT: 8933 evmovdqul(dst, kmask, src, merge, vector_len); 8934 break; 8935 case T_LONG: 8936 case T_DOUBLE: 8937 evmovdquq(dst, kmask, src, merge, vector_len); 8938 break; 8939 default: 8940 fatal("Unexpected type argument %s", type2name(type)); 8941 break; 8942 } 8943 } 8944 8945 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 8946 switch(type) { 8947 case T_BYTE: 8948 case T_BOOLEAN: 8949 evmovdqub(dst, kmask, src, merge, vector_len); 8950 break; 8951 case T_CHAR: 8952 case T_SHORT: 8953 evmovdquw(dst, kmask, src, merge, vector_len); 8954 break; 8955 case T_INT: 8956 case T_FLOAT: 8957 evmovdqul(dst, kmask, src, merge, vector_len); 8958 break; 8959 case T_LONG: 8960 case T_DOUBLE: 8961 evmovdquq(dst, kmask, src, merge, vector_len); 8962 break; 8963 default: 8964 fatal("Unexpected type argument %s", type2name(type)); 8965 break; 8966 } 8967 } 8968 8969 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 8970 switch(masklen) { 8971 case 2: 8972 knotbl(dst, src); 8973 movl(rtmp, 3); 8974 kmovbl(ktmp, rtmp); 8975 kandbl(dst, ktmp, dst); 8976 break; 8977 case 4: 8978 knotbl(dst, src); 8979 movl(rtmp, 15); 8980 kmovbl(ktmp, rtmp); 8981 kandbl(dst, ktmp, dst); 8982 break; 8983 case 8: 8984 knotbl(dst, src); 8985 break; 8986 case 16: 8987 knotwl(dst, src); 8988 break; 8989 case 32: 8990 knotdl(dst, src); 8991 break; 8992 case 64: 8993 knotql(dst, src); 8994 break; 8995 default: 8996 fatal("Unexpected vector length %d", masklen); 8997 break; 8998 } 8999 } 9000 9001 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9002 switch(type) { 9003 case T_BOOLEAN: 9004 case T_BYTE: 9005 kandbl(dst, src1, src2); 9006 break; 9007 case T_CHAR: 9008 case T_SHORT: 9009 kandwl(dst, src1, src2); 9010 break; 9011 case T_INT: 9012 case T_FLOAT: 9013 kanddl(dst, src1, src2); 9014 break; 9015 case T_LONG: 9016 case T_DOUBLE: 9017 kandql(dst, src1, src2); 9018 break; 9019 default: 9020 fatal("Unexpected type argument %s", type2name(type)); 9021 break; 9022 } 9023 } 9024 9025 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9026 switch(type) { 9027 case T_BOOLEAN: 9028 case T_BYTE: 9029 korbl(dst, src1, src2); 9030 break; 9031 case T_CHAR: 9032 case T_SHORT: 9033 korwl(dst, src1, src2); 9034 break; 9035 case T_INT: 9036 case T_FLOAT: 9037 kordl(dst, src1, src2); 9038 break; 9039 case T_LONG: 9040 case T_DOUBLE: 9041 korql(dst, src1, src2); 9042 break; 9043 default: 9044 fatal("Unexpected type argument %s", type2name(type)); 9045 break; 9046 } 9047 } 9048 9049 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 9050 switch(type) { 9051 case T_BOOLEAN: 9052 case T_BYTE: 9053 kxorbl(dst, src1, src2); 9054 break; 9055 case T_CHAR: 9056 case T_SHORT: 9057 kxorwl(dst, src1, src2); 9058 break; 9059 case T_INT: 9060 case T_FLOAT: 9061 kxordl(dst, src1, src2); 9062 break; 9063 case T_LONG: 9064 case T_DOUBLE: 9065 kxorql(dst, src1, src2); 9066 break; 9067 default: 9068 fatal("Unexpected type argument %s", type2name(type)); 9069 break; 9070 } 9071 } 9072 9073 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9074 switch(type) { 9075 case T_BOOLEAN: 9076 case T_BYTE: 9077 evpermb(dst, mask, nds, src, merge, vector_len); break; 9078 case T_CHAR: 9079 case T_SHORT: 9080 evpermw(dst, mask, nds, src, merge, vector_len); break; 9081 case T_INT: 9082 case T_FLOAT: 9083 evpermd(dst, mask, nds, src, merge, vector_len); break; 9084 case T_LONG: 9085 case T_DOUBLE: 9086 evpermq(dst, mask, nds, src, merge, vector_len); break; 9087 default: 9088 fatal("Unexpected type argument %s", type2name(type)); break; 9089 } 9090 } 9091 9092 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9093 switch(type) { 9094 case T_BOOLEAN: 9095 case T_BYTE: 9096 evpermb(dst, mask, nds, src, merge, vector_len); break; 9097 case T_CHAR: 9098 case T_SHORT: 9099 evpermw(dst, mask, nds, src, merge, vector_len); break; 9100 case T_INT: 9101 case T_FLOAT: 9102 evpermd(dst, mask, nds, src, merge, vector_len); break; 9103 case T_LONG: 9104 case T_DOUBLE: 9105 evpermq(dst, mask, nds, src, merge, vector_len); break; 9106 default: 9107 fatal("Unexpected type argument %s", type2name(type)); break; 9108 } 9109 } 9110 9111 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9112 switch(type) { 9113 case T_BYTE: 9114 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9115 case T_SHORT: 9116 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9117 case T_INT: 9118 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9119 case T_LONG: 9120 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9121 default: 9122 fatal("Unexpected type argument %s", type2name(type)); break; 9123 } 9124 } 9125 9126 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9127 switch(type) { 9128 case T_BYTE: 9129 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9130 case T_SHORT: 9131 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9132 case T_INT: 9133 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9134 case T_LONG: 9135 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9136 default: 9137 fatal("Unexpected type argument %s", type2name(type)); break; 9138 } 9139 } 9140 9141 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9142 switch(type) { 9143 case T_BYTE: 9144 evpminsb(dst, mask, nds, src, merge, vector_len); break; 9145 case T_SHORT: 9146 evpminsw(dst, mask, nds, src, merge, vector_len); break; 9147 case T_INT: 9148 evpminsd(dst, mask, nds, src, merge, vector_len); break; 9149 case T_LONG: 9150 evpminsq(dst, mask, nds, src, merge, vector_len); break; 9151 default: 9152 fatal("Unexpected type argument %s", type2name(type)); break; 9153 } 9154 } 9155 9156 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9157 switch(type) { 9158 case T_BYTE: 9159 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 9160 case T_SHORT: 9161 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 9162 case T_INT: 9163 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 9164 case T_LONG: 9165 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 9166 default: 9167 fatal("Unexpected type argument %s", type2name(type)); break; 9168 } 9169 } 9170 9171 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9172 switch(type) { 9173 case T_INT: 9174 evpxord(dst, mask, nds, src, merge, vector_len); break; 9175 case T_LONG: 9176 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9177 default: 9178 fatal("Unexpected type argument %s", type2name(type)); break; 9179 } 9180 } 9181 9182 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9183 switch(type) { 9184 case T_INT: 9185 evpxord(dst, mask, nds, src, merge, vector_len); break; 9186 case T_LONG: 9187 evpxorq(dst, mask, nds, src, merge, vector_len); break; 9188 default: 9189 fatal("Unexpected type argument %s", type2name(type)); break; 9190 } 9191 } 9192 9193 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9194 switch(type) { 9195 case T_INT: 9196 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9197 case T_LONG: 9198 evporq(dst, mask, nds, src, merge, vector_len); break; 9199 default: 9200 fatal("Unexpected type argument %s", type2name(type)); break; 9201 } 9202 } 9203 9204 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9205 switch(type) { 9206 case T_INT: 9207 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 9208 case T_LONG: 9209 evporq(dst, mask, nds, src, merge, vector_len); break; 9210 default: 9211 fatal("Unexpected type argument %s", type2name(type)); break; 9212 } 9213 } 9214 9215 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 9216 switch(type) { 9217 case T_INT: 9218 evpandd(dst, mask, nds, src, merge, vector_len); break; 9219 case T_LONG: 9220 evpandq(dst, mask, nds, src, merge, vector_len); break; 9221 default: 9222 fatal("Unexpected type argument %s", type2name(type)); break; 9223 } 9224 } 9225 9226 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 9227 switch(type) { 9228 case T_INT: 9229 evpandd(dst, mask, nds, src, merge, vector_len); break; 9230 case T_LONG: 9231 evpandq(dst, mask, nds, src, merge, vector_len); break; 9232 default: 9233 fatal("Unexpected type argument %s", type2name(type)); break; 9234 } 9235 } 9236 9237 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 9238 switch(masklen) { 9239 case 8: 9240 kortestbl(src1, src2); 9241 break; 9242 case 16: 9243 kortestwl(src1, src2); 9244 break; 9245 case 32: 9246 kortestdl(src1, src2); 9247 break; 9248 case 64: 9249 kortestql(src1, src2); 9250 break; 9251 default: 9252 fatal("Unexpected mask length %d", masklen); 9253 break; 9254 } 9255 } 9256 9257 9258 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 9259 switch(masklen) { 9260 case 8: 9261 ktestbl(src1, src2); 9262 break; 9263 case 16: 9264 ktestwl(src1, src2); 9265 break; 9266 case 32: 9267 ktestdl(src1, src2); 9268 break; 9269 case 64: 9270 ktestql(src1, src2); 9271 break; 9272 default: 9273 fatal("Unexpected mask length %d", masklen); 9274 break; 9275 } 9276 } 9277 9278 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9279 switch(type) { 9280 case T_INT: 9281 evprold(dst, mask, src, shift, merge, vlen_enc); break; 9282 case T_LONG: 9283 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 9284 default: 9285 fatal("Unexpected type argument %s", type2name(type)); break; 9286 break; 9287 } 9288 } 9289 9290 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 9291 switch(type) { 9292 case T_INT: 9293 evprord(dst, mask, src, shift, merge, vlen_enc); break; 9294 case T_LONG: 9295 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 9296 default: 9297 fatal("Unexpected type argument %s", type2name(type)); break; 9298 } 9299 } 9300 9301 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9302 switch(type) { 9303 case T_INT: 9304 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 9305 case T_LONG: 9306 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 9307 default: 9308 fatal("Unexpected type argument %s", type2name(type)); break; 9309 } 9310 } 9311 9312 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 9313 switch(type) { 9314 case T_INT: 9315 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 9316 case T_LONG: 9317 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 9318 default: 9319 fatal("Unexpected type argument %s", type2name(type)); break; 9320 } 9321 } 9322 9323 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9324 assert(rscratch != noreg || always_reachable(src), "missing"); 9325 9326 if (reachable(src)) { 9327 evpandq(dst, nds, as_Address(src), vector_len); 9328 } else { 9329 lea(rscratch, src); 9330 evpandq(dst, nds, Address(rscratch, 0), vector_len); 9331 } 9332 } 9333 9334 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 9335 assert(rscratch != noreg || always_reachable(src), "missing"); 9336 9337 if (reachable(src)) { 9338 evporq(dst, nds, as_Address(src), vector_len); 9339 } else { 9340 lea(rscratch, src); 9341 evporq(dst, nds, Address(rscratch, 0), vector_len); 9342 } 9343 } 9344 9345 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 9346 assert(rscratch != noreg || always_reachable(src3), "missing"); 9347 9348 if (reachable(src3)) { 9349 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 9350 } else { 9351 lea(rscratch, src3); 9352 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 9353 } 9354 } 9355 9356 #if COMPILER2_OR_JVMCI 9357 9358 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 9359 Register length, Register temp, int vec_enc) { 9360 // Computing mask for predicated vector store. 9361 movptr(temp, -1); 9362 bzhiq(temp, temp, length); 9363 kmov(mask, temp); 9364 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 9365 } 9366 9367 // Set memory operation for length "less than" 64 bytes. 9368 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 9369 XMMRegister xmm, KRegister mask, Register length, 9370 Register temp, bool use64byteVector) { 9371 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9372 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9373 if (!use64byteVector) { 9374 fill32(dst, disp, xmm); 9375 subptr(length, 32 >> shift); 9376 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 9377 } else { 9378 assert(MaxVectorSize == 64, "vector length != 64"); 9379 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 9380 } 9381 } 9382 9383 9384 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 9385 XMMRegister xmm, KRegister mask, Register length, 9386 Register temp) { 9387 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9388 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 9389 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 9390 } 9391 9392 9393 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 9394 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9395 vmovdqu(dst, xmm); 9396 } 9397 9398 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 9399 fill32(Address(dst, disp), xmm); 9400 } 9401 9402 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 9403 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 9404 if (!use64byteVector) { 9405 fill32(dst, xmm); 9406 fill32(dst.plus_disp(32), xmm); 9407 } else { 9408 evmovdquq(dst, xmm, Assembler::AVX_512bit); 9409 } 9410 } 9411 9412 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 9413 fill64(Address(dst, disp), xmm, use64byteVector); 9414 } 9415 9416 #ifdef _LP64 9417 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 9418 Register count, Register rtmp, XMMRegister xtmp) { 9419 Label L_exit; 9420 Label L_fill_start; 9421 Label L_fill_64_bytes; 9422 Label L_fill_96_bytes; 9423 Label L_fill_128_bytes; 9424 Label L_fill_128_bytes_loop; 9425 Label L_fill_128_loop_header; 9426 Label L_fill_128_bytes_loop_header; 9427 Label L_fill_128_bytes_loop_pre_header; 9428 Label L_fill_zmm_sequence; 9429 9430 int shift = -1; 9431 int avx3threshold = VM_Version::avx3_threshold(); 9432 switch(type) { 9433 case T_BYTE: shift = 0; 9434 break; 9435 case T_SHORT: shift = 1; 9436 break; 9437 case T_INT: shift = 2; 9438 break; 9439 /* Uncomment when LONG fill stubs are supported. 9440 case T_LONG: shift = 3; 9441 break; 9442 */ 9443 default: 9444 fatal("Unhandled type: %s\n", type2name(type)); 9445 } 9446 9447 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 9448 9449 if (MaxVectorSize == 64) { 9450 cmpq(count, avx3threshold >> shift); 9451 jcc(Assembler::greater, L_fill_zmm_sequence); 9452 } 9453 9454 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 9455 9456 bind(L_fill_start); 9457 9458 cmpq(count, 32 >> shift); 9459 jccb(Assembler::greater, L_fill_64_bytes); 9460 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 9461 jmp(L_exit); 9462 9463 bind(L_fill_64_bytes); 9464 cmpq(count, 64 >> shift); 9465 jccb(Assembler::greater, L_fill_96_bytes); 9466 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 9467 jmp(L_exit); 9468 9469 bind(L_fill_96_bytes); 9470 cmpq(count, 96 >> shift); 9471 jccb(Assembler::greater, L_fill_128_bytes); 9472 fill64(to, 0, xtmp); 9473 subq(count, 64 >> shift); 9474 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 9475 jmp(L_exit); 9476 9477 bind(L_fill_128_bytes); 9478 cmpq(count, 128 >> shift); 9479 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 9480 fill64(to, 0, xtmp); 9481 fill32(to, 64, xtmp); 9482 subq(count, 96 >> shift); 9483 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 9484 jmp(L_exit); 9485 9486 bind(L_fill_128_bytes_loop_pre_header); 9487 { 9488 mov(rtmp, to); 9489 andq(rtmp, 31); 9490 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 9491 negq(rtmp); 9492 addq(rtmp, 32); 9493 mov64(r8, -1L); 9494 bzhiq(r8, r8, rtmp); 9495 kmovql(k2, r8); 9496 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 9497 addq(to, rtmp); 9498 shrq(rtmp, shift); 9499 subq(count, rtmp); 9500 } 9501 9502 cmpq(count, 128 >> shift); 9503 jcc(Assembler::less, L_fill_start); 9504 9505 bind(L_fill_128_bytes_loop_header); 9506 subq(count, 128 >> shift); 9507 9508 align32(); 9509 bind(L_fill_128_bytes_loop); 9510 fill64(to, 0, xtmp); 9511 fill64(to, 64, xtmp); 9512 addq(to, 128); 9513 subq(count, 128 >> shift); 9514 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 9515 9516 addq(count, 128 >> shift); 9517 jcc(Assembler::zero, L_exit); 9518 jmp(L_fill_start); 9519 } 9520 9521 if (MaxVectorSize == 64) { 9522 // Sequence using 64 byte ZMM register. 9523 Label L_fill_128_bytes_zmm; 9524 Label L_fill_192_bytes_zmm; 9525 Label L_fill_192_bytes_loop_zmm; 9526 Label L_fill_192_bytes_loop_header_zmm; 9527 Label L_fill_192_bytes_loop_pre_header_zmm; 9528 Label L_fill_start_zmm_sequence; 9529 9530 bind(L_fill_zmm_sequence); 9531 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 9532 9533 bind(L_fill_start_zmm_sequence); 9534 cmpq(count, 64 >> shift); 9535 jccb(Assembler::greater, L_fill_128_bytes_zmm); 9536 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 9537 jmp(L_exit); 9538 9539 bind(L_fill_128_bytes_zmm); 9540 cmpq(count, 128 >> shift); 9541 jccb(Assembler::greater, L_fill_192_bytes_zmm); 9542 fill64(to, 0, xtmp, true); 9543 subq(count, 64 >> shift); 9544 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 9545 jmp(L_exit); 9546 9547 bind(L_fill_192_bytes_zmm); 9548 cmpq(count, 192 >> shift); 9549 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 9550 fill64(to, 0, xtmp, true); 9551 fill64(to, 64, xtmp, true); 9552 subq(count, 128 >> shift); 9553 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 9554 jmp(L_exit); 9555 9556 bind(L_fill_192_bytes_loop_pre_header_zmm); 9557 { 9558 movq(rtmp, to); 9559 andq(rtmp, 63); 9560 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 9561 negq(rtmp); 9562 addq(rtmp, 64); 9563 mov64(r8, -1L); 9564 bzhiq(r8, r8, rtmp); 9565 kmovql(k2, r8); 9566 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 9567 addq(to, rtmp); 9568 shrq(rtmp, shift); 9569 subq(count, rtmp); 9570 } 9571 9572 cmpq(count, 192 >> shift); 9573 jcc(Assembler::less, L_fill_start_zmm_sequence); 9574 9575 bind(L_fill_192_bytes_loop_header_zmm); 9576 subq(count, 192 >> shift); 9577 9578 align32(); 9579 bind(L_fill_192_bytes_loop_zmm); 9580 fill64(to, 0, xtmp, true); 9581 fill64(to, 64, xtmp, true); 9582 fill64(to, 128, xtmp, true); 9583 addq(to, 192); 9584 subq(count, 192 >> shift); 9585 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 9586 9587 addq(count, 192 >> shift); 9588 jcc(Assembler::zero, L_exit); 9589 jmp(L_fill_start_zmm_sequence); 9590 } 9591 bind(L_exit); 9592 } 9593 #endif 9594 #endif //COMPILER2_OR_JVMCI 9595 9596 9597 #ifdef _LP64 9598 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 9599 Label done; 9600 cvttss2sil(dst, src); 9601 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9602 cmpl(dst, 0x80000000); // float_sign_flip 9603 jccb(Assembler::notEqual, done); 9604 subptr(rsp, 8); 9605 movflt(Address(rsp, 0), src); 9606 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 9607 pop(dst); 9608 bind(done); 9609 } 9610 9611 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 9612 Label done; 9613 cvttsd2sil(dst, src); 9614 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 9615 cmpl(dst, 0x80000000); // float_sign_flip 9616 jccb(Assembler::notEqual, done); 9617 subptr(rsp, 8); 9618 movdbl(Address(rsp, 0), src); 9619 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 9620 pop(dst); 9621 bind(done); 9622 } 9623 9624 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 9625 Label done; 9626 cvttss2siq(dst, src); 9627 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9628 jccb(Assembler::notEqual, done); 9629 subptr(rsp, 8); 9630 movflt(Address(rsp, 0), src); 9631 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 9632 pop(dst); 9633 bind(done); 9634 } 9635 9636 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9637 // Following code is line by line assembly translation rounding algorithm. 9638 // Please refer to java.lang.Math.round(float) algorithm for details. 9639 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 9640 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 9641 const int32_t FloatConsts_EXP_BIAS = 127; 9642 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 9643 const int32_t MINUS_32 = 0xFFFFFFE0; 9644 Label L_special_case, L_block1, L_exit; 9645 movl(rtmp, FloatConsts_EXP_BIT_MASK); 9646 movdl(dst, src); 9647 andl(dst, rtmp); 9648 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 9649 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 9650 subl(rtmp, dst); 9651 movl(rcx, rtmp); 9652 movl(dst, MINUS_32); 9653 testl(rtmp, dst); 9654 jccb(Assembler::notEqual, L_special_case); 9655 movdl(dst, src); 9656 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 9657 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 9658 movdl(rtmp, src); 9659 testl(rtmp, rtmp); 9660 jccb(Assembler::greaterEqual, L_block1); 9661 negl(dst); 9662 bind(L_block1); 9663 sarl(dst); 9664 addl(dst, 0x1); 9665 sarl(dst, 0x1); 9666 jmp(L_exit); 9667 bind(L_special_case); 9668 convert_f2i(dst, src); 9669 bind(L_exit); 9670 } 9671 9672 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 9673 // Following code is line by line assembly translation rounding algorithm. 9674 // Please refer to java.lang.Math.round(double) algorithm for details. 9675 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 9676 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 9677 const int64_t DoubleConsts_EXP_BIAS = 1023; 9678 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 9679 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 9680 Label L_special_case, L_block1, L_exit; 9681 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 9682 movq(dst, src); 9683 andq(dst, rtmp); 9684 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 9685 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 9686 subq(rtmp, dst); 9687 movq(rcx, rtmp); 9688 mov64(dst, MINUS_64); 9689 testq(rtmp, dst); 9690 jccb(Assembler::notEqual, L_special_case); 9691 movq(dst, src); 9692 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 9693 andq(dst, rtmp); 9694 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 9695 orq(dst, rtmp); 9696 movq(rtmp, src); 9697 testq(rtmp, rtmp); 9698 jccb(Assembler::greaterEqual, L_block1); 9699 negq(dst); 9700 bind(L_block1); 9701 sarq(dst); 9702 addq(dst, 0x1); 9703 sarq(dst, 0x1); 9704 jmp(L_exit); 9705 bind(L_special_case); 9706 convert_d2l(dst, src); 9707 bind(L_exit); 9708 } 9709 9710 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 9711 Label done; 9712 cvttsd2siq(dst, src); 9713 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 9714 jccb(Assembler::notEqual, done); 9715 subptr(rsp, 8); 9716 movdbl(Address(rsp, 0), src); 9717 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 9718 pop(dst); 9719 bind(done); 9720 } 9721 9722 void MacroAssembler::cache_wb(Address line) 9723 { 9724 // 64 bit cpus always support clflush 9725 assert(VM_Version::supports_clflush(), "clflush should be available"); 9726 bool optimized = VM_Version::supports_clflushopt(); 9727 bool no_evict = VM_Version::supports_clwb(); 9728 9729 // prefer clwb (writeback without evict) otherwise 9730 // prefer clflushopt (potentially parallel writeback with evict) 9731 // otherwise fallback on clflush (serial writeback with evict) 9732 9733 if (optimized) { 9734 if (no_evict) { 9735 clwb(line); 9736 } else { 9737 clflushopt(line); 9738 } 9739 } else { 9740 // no need for fence when using CLFLUSH 9741 clflush(line); 9742 } 9743 } 9744 9745 void MacroAssembler::cache_wbsync(bool is_pre) 9746 { 9747 assert(VM_Version::supports_clflush(), "clflush should be available"); 9748 bool optimized = VM_Version::supports_clflushopt(); 9749 bool no_evict = VM_Version::supports_clwb(); 9750 9751 // pick the correct implementation 9752 9753 if (!is_pre && (optimized || no_evict)) { 9754 // need an sfence for post flush when using clflushopt or clwb 9755 // otherwise no no need for any synchroniaztion 9756 9757 sfence(); 9758 } 9759 } 9760 9761 #endif // _LP64 9762 9763 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 9764 switch (cond) { 9765 // Note some conditions are synonyms for others 9766 case Assembler::zero: return Assembler::notZero; 9767 case Assembler::notZero: return Assembler::zero; 9768 case Assembler::less: return Assembler::greaterEqual; 9769 case Assembler::lessEqual: return Assembler::greater; 9770 case Assembler::greater: return Assembler::lessEqual; 9771 case Assembler::greaterEqual: return Assembler::less; 9772 case Assembler::below: return Assembler::aboveEqual; 9773 case Assembler::belowEqual: return Assembler::above; 9774 case Assembler::above: return Assembler::belowEqual; 9775 case Assembler::aboveEqual: return Assembler::below; 9776 case Assembler::overflow: return Assembler::noOverflow; 9777 case Assembler::noOverflow: return Assembler::overflow; 9778 case Assembler::negative: return Assembler::positive; 9779 case Assembler::positive: return Assembler::negative; 9780 case Assembler::parity: return Assembler::noParity; 9781 case Assembler::noParity: return Assembler::parity; 9782 } 9783 ShouldNotReachHere(); return Assembler::overflow; 9784 } 9785 9786 SkipIfEqual::SkipIfEqual( 9787 MacroAssembler* masm, const bool* flag_addr, bool value, Register rscratch) { 9788 _masm = masm; 9789 _masm->cmp8(ExternalAddress((address)flag_addr), value, rscratch); 9790 _masm->jcc(Assembler::equal, _label); 9791 } 9792 9793 SkipIfEqual::~SkipIfEqual() { 9794 _masm->bind(_label); 9795 } 9796 9797 // 32-bit Windows has its own fast-path implementation 9798 // of get_thread 9799 #if !defined(WIN32) || defined(_LP64) 9800 9801 // This is simply a call to Thread::current() 9802 void MacroAssembler::get_thread(Register thread) { 9803 if (thread != rax) { 9804 push(rax); 9805 } 9806 LP64_ONLY(push(rdi);) 9807 LP64_ONLY(push(rsi);) 9808 push(rdx); 9809 push(rcx); 9810 #ifdef _LP64 9811 push(r8); 9812 push(r9); 9813 push(r10); 9814 push(r11); 9815 #endif 9816 9817 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 9818 9819 #ifdef _LP64 9820 pop(r11); 9821 pop(r10); 9822 pop(r9); 9823 pop(r8); 9824 #endif 9825 pop(rcx); 9826 pop(rdx); 9827 LP64_ONLY(pop(rsi);) 9828 LP64_ONLY(pop(rdi);) 9829 if (thread != rax) { 9830 mov(thread, rax); 9831 pop(rax); 9832 } 9833 } 9834 9835 9836 #endif // !WIN32 || _LP64 9837 9838 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 9839 Label L_stack_ok; 9840 if (bias == 0) { 9841 testptr(sp, 2 * wordSize - 1); 9842 } else { 9843 // lea(tmp, Address(rsp, bias); 9844 mov(tmp, sp); 9845 addptr(tmp, bias); 9846 testptr(tmp, 2 * wordSize - 1); 9847 } 9848 jcc(Assembler::equal, L_stack_ok); 9849 block_comment(msg); 9850 stop(msg); 9851 bind(L_stack_ok); 9852 } 9853 9854 // Implements fast-locking. 9855 // Branches to slow upon failure to lock the object, with ZF cleared. 9856 // Falls through upon success with unspecified ZF. 9857 // 9858 // obj: the object to be locked 9859 // hdr: the (pre-loaded) header of the object, must be rax 9860 // thread: the thread which attempts to lock obj 9861 // tmp: a temporary register 9862 void MacroAssembler::fast_lock_impl(Register obj, Register hdr, Register thread, Register tmp, Label& slow) { 9863 assert(hdr == rax, "header must be in rax for cmpxchg"); 9864 assert_different_registers(obj, hdr, thread, tmp); 9865 9866 // First we need to check if the lock-stack has room for pushing the object reference. 9867 // Note: we subtract 1 from the end-offset so that we can do a 'greater' comparison, instead 9868 // of 'greaterEqual' below, which readily clears the ZF. This makes C2 code a little simpler and 9869 // avoids one branch. 9870 cmpl(Address(thread, JavaThread::lock_stack_top_offset()), LockStack::end_offset() - 1); 9871 jcc(Assembler::greater, slow); 9872 9873 // Now we attempt to take the fast-lock. 9874 // Clear lock_mask bits (locked state). 9875 andptr(hdr, ~(int32_t)markWord::lock_mask_in_place); 9876 movptr(tmp, hdr); 9877 // Set unlocked_value bit. 9878 orptr(hdr, markWord::unlocked_value); 9879 lock(); 9880 cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9881 jcc(Assembler::notEqual, slow); 9882 9883 // If successful, push object to lock-stack. 9884 movl(tmp, Address(thread, JavaThread::lock_stack_top_offset())); 9885 movptr(Address(thread, tmp), obj); 9886 incrementl(tmp, oopSize); 9887 movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp); 9888 } 9889 9890 // Implements fast-unlocking. 9891 // Branches to slow upon failure, with ZF cleared. 9892 // Falls through upon success, with unspecified ZF. 9893 // 9894 // obj: the object to be unlocked 9895 // hdr: the (pre-loaded) header of the object, must be rax 9896 // tmp: a temporary register 9897 void MacroAssembler::fast_unlock_impl(Register obj, Register hdr, Register tmp, Label& slow) { 9898 assert(hdr == rax, "header must be in rax for cmpxchg"); 9899 assert_different_registers(obj, hdr, tmp); 9900 9901 // Mark-word must be lock_mask now, try to swing it back to unlocked_value. 9902 movptr(tmp, hdr); // The expected old value 9903 orptr(tmp, markWord::unlocked_value); 9904 lock(); 9905 cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 9906 jcc(Assembler::notEqual, slow); 9907 // Pop the lock object from the lock-stack. 9908 #ifdef _LP64 9909 const Register thread = r15_thread; 9910 #else 9911 const Register thread = rax; 9912 get_thread(thread); 9913 #endif 9914 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 9915 #ifdef ASSERT 9916 movl(tmp, Address(thread, JavaThread::lock_stack_top_offset())); 9917 movptr(Address(thread, tmp), 0); 9918 #endif 9919 }